hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27274806c04272edad8da89e3616218937ff0c8b
| 18,630
|
py
|
Python
|
isi_sdk_7_2/isi_sdk_7_2/models/storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_7_2/isi_sdk_7_2/models/storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_7_2/isi_sdk_7_2/models/storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_7_2.models.storagepool_settings_spillover_target import StoragepoolSettingsSpilloverTarget # noqa: F401,E501
class StoragepoolSettingsExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'automatically_manage_io_optimization': 'str',
'automatically_manage_protection': 'str',
'global_namespace_acceleration_enabled': 'bool',
'protect_directories_one_level_higher': 'bool',
'spillover_enabled': 'bool',
'spillover_target': 'StoragepoolSettingsSpilloverTarget',
'ssd_l3_cache_default_enabled': 'bool',
'virtual_hot_spare_deny_writes': 'bool',
'virtual_hot_spare_hide_spare': 'bool',
'virtual_hot_spare_limit_drives': 'int',
'virtual_hot_spare_limit_percent': 'int'
}
attribute_map = {
'automatically_manage_io_optimization': 'automatically_manage_io_optimization',
'automatically_manage_protection': 'automatically_manage_protection',
'global_namespace_acceleration_enabled': 'global_namespace_acceleration_enabled',
'protect_directories_one_level_higher': 'protect_directories_one_level_higher',
'spillover_enabled': 'spillover_enabled',
'spillover_target': 'spillover_target',
'ssd_l3_cache_default_enabled': 'ssd_l3_cache_default_enabled',
'virtual_hot_spare_deny_writes': 'virtual_hot_spare_deny_writes',
'virtual_hot_spare_hide_spare': 'virtual_hot_spare_hide_spare',
'virtual_hot_spare_limit_drives': 'virtual_hot_spare_limit_drives',
'virtual_hot_spare_limit_percent': 'virtual_hot_spare_limit_percent'
}
def __init__(self, automatically_manage_io_optimization=None, automatically_manage_protection=None, global_namespace_acceleration_enabled=None, protect_directories_one_level_higher=None, spillover_enabled=None, spillover_target=None, ssd_l3_cache_default_enabled=None, virtual_hot_spare_deny_writes=None, virtual_hot_spare_hide_spare=None, virtual_hot_spare_limit_drives=None, virtual_hot_spare_limit_percent=None): # noqa: E501
"""StoragepoolSettingsExtended - a model defined in Swagger""" # noqa: E501
self._automatically_manage_io_optimization = None
self._automatically_manage_protection = None
self._global_namespace_acceleration_enabled = None
self._protect_directories_one_level_higher = None
self._spillover_enabled = None
self._spillover_target = None
self._ssd_l3_cache_default_enabled = None
self._virtual_hot_spare_deny_writes = None
self._virtual_hot_spare_hide_spare = None
self._virtual_hot_spare_limit_drives = None
self._virtual_hot_spare_limit_percent = None
self.discriminator = None
if automatically_manage_io_optimization is not None:
self.automatically_manage_io_optimization = automatically_manage_io_optimization
if automatically_manage_protection is not None:
self.automatically_manage_protection = automatically_manage_protection
if global_namespace_acceleration_enabled is not None:
self.global_namespace_acceleration_enabled = global_namespace_acceleration_enabled
if protect_directories_one_level_higher is not None:
self.protect_directories_one_level_higher = protect_directories_one_level_higher
if spillover_enabled is not None:
self.spillover_enabled = spillover_enabled
if spillover_target is not None:
self.spillover_target = spillover_target
if ssd_l3_cache_default_enabled is not None:
self.ssd_l3_cache_default_enabled = ssd_l3_cache_default_enabled
if virtual_hot_spare_deny_writes is not None:
self.virtual_hot_spare_deny_writes = virtual_hot_spare_deny_writes
if virtual_hot_spare_hide_spare is not None:
self.virtual_hot_spare_hide_spare = virtual_hot_spare_hide_spare
if virtual_hot_spare_limit_drives is not None:
self.virtual_hot_spare_limit_drives = virtual_hot_spare_limit_drives
if virtual_hot_spare_limit_percent is not None:
self.virtual_hot_spare_limit_percent = virtual_hot_spare_limit_percent
@property
def automatically_manage_io_optimization(self):
"""Gets the automatically_manage_io_optimization of this StoragepoolSettingsExtended. # noqa: E501
Automatically manage IO optimization settings on files. # noqa: E501
:return: The automatically_manage_io_optimization of this StoragepoolSettingsExtended. # noqa: E501
:rtype: str
"""
return self._automatically_manage_io_optimization
@automatically_manage_io_optimization.setter
def automatically_manage_io_optimization(self, automatically_manage_io_optimization):
"""Sets the automatically_manage_io_optimization of this StoragepoolSettingsExtended.
Automatically manage IO optimization settings on files. # noqa: E501
:param automatically_manage_io_optimization: The automatically_manage_io_optimization of this StoragepoolSettingsExtended. # noqa: E501
:type: str
"""
allowed_values = ["all", "files_at_default", "none"] # noqa: E501
if automatically_manage_io_optimization not in allowed_values:
raise ValueError(
"Invalid value for `automatically_manage_io_optimization` ({0}), must be one of {1}" # noqa: E501
.format(automatically_manage_io_optimization, allowed_values)
)
self._automatically_manage_io_optimization = automatically_manage_io_optimization
@property
def automatically_manage_protection(self):
"""Gets the automatically_manage_protection of this StoragepoolSettingsExtended. # noqa: E501
Automatically manage protection settings on files. # noqa: E501
:return: The automatically_manage_protection of this StoragepoolSettingsExtended. # noqa: E501
:rtype: str
"""
return self._automatically_manage_protection
@automatically_manage_protection.setter
def automatically_manage_protection(self, automatically_manage_protection):
"""Sets the automatically_manage_protection of this StoragepoolSettingsExtended.
Automatically manage protection settings on files. # noqa: E501
:param automatically_manage_protection: The automatically_manage_protection of this StoragepoolSettingsExtended. # noqa: E501
:type: str
"""
allowed_values = ["all", "files_at_default", "none"] # noqa: E501
if automatically_manage_protection not in allowed_values:
raise ValueError(
"Invalid value for `automatically_manage_protection` ({0}), must be one of {1}" # noqa: E501
.format(automatically_manage_protection, allowed_values)
)
self._automatically_manage_protection = automatically_manage_protection
@property
def global_namespace_acceleration_enabled(self):
"""Gets the global_namespace_acceleration_enabled of this StoragepoolSettingsExtended. # noqa: E501
Optimize namespace operations by storing metadata on SSDs. # noqa: E501
:return: The global_namespace_acceleration_enabled of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._global_namespace_acceleration_enabled
@global_namespace_acceleration_enabled.setter
def global_namespace_acceleration_enabled(self, global_namespace_acceleration_enabled):
"""Sets the global_namespace_acceleration_enabled of this StoragepoolSettingsExtended.
Optimize namespace operations by storing metadata on SSDs. # noqa: E501
:param global_namespace_acceleration_enabled: The global_namespace_acceleration_enabled of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._global_namespace_acceleration_enabled = global_namespace_acceleration_enabled
@property
def protect_directories_one_level_higher(self):
"""Gets the protect_directories_one_level_higher of this StoragepoolSettingsExtended. # noqa: E501
Automatically add additional protection level to all directories. # noqa: E501
:return: The protect_directories_one_level_higher of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._protect_directories_one_level_higher
@protect_directories_one_level_higher.setter
def protect_directories_one_level_higher(self, protect_directories_one_level_higher):
"""Sets the protect_directories_one_level_higher of this StoragepoolSettingsExtended.
Automatically add additional protection level to all directories. # noqa: E501
:param protect_directories_one_level_higher: The protect_directories_one_level_higher of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._protect_directories_one_level_higher = protect_directories_one_level_higher
@property
def spillover_enabled(self):
"""Gets the spillover_enabled of this StoragepoolSettingsExtended. # noqa: E501
Spill writes into other pools as needed. # noqa: E501
:return: The spillover_enabled of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._spillover_enabled
@spillover_enabled.setter
def spillover_enabled(self, spillover_enabled):
"""Sets the spillover_enabled of this StoragepoolSettingsExtended.
Spill writes into other pools as needed. # noqa: E501
:param spillover_enabled: The spillover_enabled of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._spillover_enabled = spillover_enabled
@property
def spillover_target(self):
"""Gets the spillover_target of this StoragepoolSettingsExtended. # noqa: E501
Target pool for spilled writes. # noqa: E501
:return: The spillover_target of this StoragepoolSettingsExtended. # noqa: E501
:rtype: StoragepoolSettingsSpilloverTarget
"""
return self._spillover_target
@spillover_target.setter
def spillover_target(self, spillover_target):
"""Sets the spillover_target of this StoragepoolSettingsExtended.
Target pool for spilled writes. # noqa: E501
:param spillover_target: The spillover_target of this StoragepoolSettingsExtended. # noqa: E501
:type: StoragepoolSettingsSpilloverTarget
"""
self._spillover_target = spillover_target
@property
def ssd_l3_cache_default_enabled(self):
"""Gets the ssd_l3_cache_default_enabled of this StoragepoolSettingsExtended. # noqa: E501
The L3 Cache default enabled state. This specifies whether L3 Cache should be enabled on new node pools # noqa: E501
:return: The ssd_l3_cache_default_enabled of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._ssd_l3_cache_default_enabled
@ssd_l3_cache_default_enabled.setter
def ssd_l3_cache_default_enabled(self, ssd_l3_cache_default_enabled):
"""Sets the ssd_l3_cache_default_enabled of this StoragepoolSettingsExtended.
The L3 Cache default enabled state. This specifies whether L3 Cache should be enabled on new node pools # noqa: E501
:param ssd_l3_cache_default_enabled: The ssd_l3_cache_default_enabled of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._ssd_l3_cache_default_enabled = ssd_l3_cache_default_enabled
@property
def virtual_hot_spare_deny_writes(self):
"""Gets the virtual_hot_spare_deny_writes of this StoragepoolSettingsExtended. # noqa: E501
Deny writes into reserved virtual hot spare space. # noqa: E501
:return: The virtual_hot_spare_deny_writes of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._virtual_hot_spare_deny_writes
@virtual_hot_spare_deny_writes.setter
def virtual_hot_spare_deny_writes(self, virtual_hot_spare_deny_writes):
"""Sets the virtual_hot_spare_deny_writes of this StoragepoolSettingsExtended.
Deny writes into reserved virtual hot spare space. # noqa: E501
:param virtual_hot_spare_deny_writes: The virtual_hot_spare_deny_writes of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._virtual_hot_spare_deny_writes = virtual_hot_spare_deny_writes
@property
def virtual_hot_spare_hide_spare(self):
"""Gets the virtual_hot_spare_hide_spare of this StoragepoolSettingsExtended. # noqa: E501
Hide reserved virtual hot spare space from free space counts. # noqa: E501
:return: The virtual_hot_spare_hide_spare of this StoragepoolSettingsExtended. # noqa: E501
:rtype: bool
"""
return self._virtual_hot_spare_hide_spare
@virtual_hot_spare_hide_spare.setter
def virtual_hot_spare_hide_spare(self, virtual_hot_spare_hide_spare):
"""Sets the virtual_hot_spare_hide_spare of this StoragepoolSettingsExtended.
Hide reserved virtual hot spare space from free space counts. # noqa: E501
:param virtual_hot_spare_hide_spare: The virtual_hot_spare_hide_spare of this StoragepoolSettingsExtended. # noqa: E501
:type: bool
"""
self._virtual_hot_spare_hide_spare = virtual_hot_spare_hide_spare
@property
def virtual_hot_spare_limit_drives(self):
"""Gets the virtual_hot_spare_limit_drives of this StoragepoolSettingsExtended. # noqa: E501
The number of drives to reserve for the virtual hot spare, from 0-4. # noqa: E501
:return: The virtual_hot_spare_limit_drives of this StoragepoolSettingsExtended. # noqa: E501
:rtype: int
"""
return self._virtual_hot_spare_limit_drives
@virtual_hot_spare_limit_drives.setter
def virtual_hot_spare_limit_drives(self, virtual_hot_spare_limit_drives):
"""Sets the virtual_hot_spare_limit_drives of this StoragepoolSettingsExtended.
The number of drives to reserve for the virtual hot spare, from 0-4. # noqa: E501
:param virtual_hot_spare_limit_drives: The virtual_hot_spare_limit_drives of this StoragepoolSettingsExtended. # noqa: E501
:type: int
"""
if virtual_hot_spare_limit_drives is not None and virtual_hot_spare_limit_drives > 4: # noqa: E501
raise ValueError("Invalid value for `virtual_hot_spare_limit_drives`, must be a value less than or equal to `4`") # noqa: E501
if virtual_hot_spare_limit_drives is not None and virtual_hot_spare_limit_drives < 0: # noqa: E501
raise ValueError("Invalid value for `virtual_hot_spare_limit_drives`, must be a value greater than or equal to `0`") # noqa: E501
self._virtual_hot_spare_limit_drives = virtual_hot_spare_limit_drives
@property
def virtual_hot_spare_limit_percent(self):
"""Gets the virtual_hot_spare_limit_percent of this StoragepoolSettingsExtended. # noqa: E501
The percent space to reserve for the virtual hot spare, from 0-20. # noqa: E501
:return: The virtual_hot_spare_limit_percent of this StoragepoolSettingsExtended. # noqa: E501
:rtype: int
"""
return self._virtual_hot_spare_limit_percent
@virtual_hot_spare_limit_percent.setter
def virtual_hot_spare_limit_percent(self, virtual_hot_spare_limit_percent):
"""Sets the virtual_hot_spare_limit_percent of this StoragepoolSettingsExtended.
The percent space to reserve for the virtual hot spare, from 0-20. # noqa: E501
:param virtual_hot_spare_limit_percent: The virtual_hot_spare_limit_percent of this StoragepoolSettingsExtended. # noqa: E501
:type: int
"""
if virtual_hot_spare_limit_percent is not None and virtual_hot_spare_limit_percent > 20: # noqa: E501
raise ValueError("Invalid value for `virtual_hot_spare_limit_percent`, must be a value less than or equal to `20`") # noqa: E501
if virtual_hot_spare_limit_percent is not None and virtual_hot_spare_limit_percent < 0: # noqa: E501
raise ValueError("Invalid value for `virtual_hot_spare_limit_percent`, must be a value greater than or equal to `0`") # noqa: E501
self._virtual_hot_spare_limit_percent = virtual_hot_spare_limit_percent
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StoragepoolSettingsExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 44.676259
| 433
| 0.720344
|
11b79395c1788c6813924d4c024d2f5478febe09
| 7,042
|
py
|
Python
|
parcelhubPOS/commons.py
|
ngcw/parcelhubpos
|
25404f9e944fd48fc8b19504d3e19e3beaa3e801
|
[
"MIT"
] | null | null | null |
parcelhubPOS/commons.py
|
ngcw/parcelhubpos
|
25404f9e944fd48fc8b19504d3e19e3beaa3e801
|
[
"MIT"
] | null | null | null |
parcelhubPOS/commons.py
|
ngcw/parcelhubpos
|
25404f9e944fd48fc8b19504d3e19e3beaa3e801
|
[
"MIT"
] | null | null | null |
from .models import User, Branch, UserBranchAccess, Terminal
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.sessions.models import Session
from django.utils import timezone
from django.contrib.auth import login
CONST_branchid = 'branchid'
CONST_terminalid = 'terminalid'
CONST_username = 'Username'
CONST_invoice = '1Invoice'
CONST_custacc = '2Customer Account'
CONST_payment = '3Payment'
CONST_soa = '4Statement Of Account'
CONST_masterdata = '5Information'
CONST_reporting = '6Report'
CONST_system = '7System'
def userselection(request):
sessiondict = []
if 'loggedusers' in request.session:
sessiondict = request.session['loggedusers']
if request.method == "POST":
selecteduser = request.POST.get('userselection')
if selecteduser:
loguser = User.objects.get(id=selecteduser)
if loguser is not None:
login(request, loguser)
request.session['userid'] = loguser.id
name = "%s %s"%(loguser.last_name, loguser.first_name )
request.session[CONST_username] = name
request.session['loggedusers'] = sessiondict
request.session['issuperuser'] = loguser.is_superuser
allloggedusers = User.objects.filter(id__in=request.session['loggedusers'])
return allloggedusers
def branchselection(request):
loguser = User.objects.get(id=request.session.get('userid'))
if loguser.is_superuser:
branches = Branch.objects.all()
else:
allbranchaccess = UserBranchAccess.objects.filter(user=loguser)
branchidlist = allbranchaccess.values_list('branch_id', flat=True)
branches = Branch.objects.filter(id__in=branchidlist)
selectedbranch = request.session.get(CONST_branchid)
if CONST_branchid not in request.session:
branchaccess = branches.first()
if branchaccess:
branchid = branchaccess.id
request.session[CONST_branchid] = branchid
if request.method == "POST" and 'branchselection' in request.POST:
selectedbranch = request.POST.get('branchselection')
if selectedbranch:
request.session[CONST_branchid] = selectedbranch
return branches
def terminalselection(request):
selectedbranch = request.session.get(CONST_branchid)
selectedterminal = request.session.get(CONST_terminalid)
if selectedbranch == '-1':
terminals = None
request.session[CONST_terminalid] = '-1'
else:
branch = Branch.objects.get(id=selectedbranch)
terminals = Terminal.objects.filter(branch=branch)
if CONST_terminalid not in request.session:
terminal = terminals.first()
if terminal:
terminalid = terminal.id
request.session[CONST_terminalid] = terminalid
else:
request.session[CONST_terminalid] = '-1'
elif selectedterminal != '-1':
terminal = Terminal.objects.filter(branch=branch,id=selectedterminal).first()
if terminal:
request.session[CONST_terminalid] = terminal.id
else:
terminal = Terminal.objects.filter(branch=branch).first()
if terminal:
request.session[CONST_terminalid] = terminal.id
else:
request.session[CONST_terminalid] = '-1'
if request.method == "POST" and 'terminalselection' in request.POST:
selectedterminal = request.POST.get('terminalselection')
if selectedterminal:
request.session[CONST_terminalid] = selectedterminal
return terminals
def navbar(request):
loguser = User.objects.get(id=request.session.get('userid'))
branchid = request.session.get(CONST_branchid)
terminalid = request.session.get(CONST_terminalid)
sel_branch = Branch.objects.filter(id=branchid)
branchaccess = UserBranchAccess.objects.filter(user=loguser, branch=sel_branch).first()
menudict = {}
if loguser.is_superuser or branchaccess:
#Everyone access feature
if branchid == '-1' or terminalid == '-1':
menudict[CONST_invoice] =[('New invoice (F9)',''),('Invoice list','/parcelhubPOS/invoice')]
else:
menudict[CONST_invoice] =[('New invoice (F9)','/parcelhubPOS/invoice/editinvoice/?invoiceid='),('Invoice list','/parcelhubPOS/invoice')]
menudict[CONST_custacc] =[]
if terminalid and terminalid != '-1' :
menudict[CONST_payment] =[('Payment overview','/parcelhubPOS/payment/?custid=""'),
('Payment receive','/parcelhubPOS/makepayment'),
]
else:
menudict[CONST_payment] =[('Payment overview','/parcelhubPOS/payment/?custid=""'),
('Payment receive',''),
]
menudict[CONST_soa] =[
('New statement of account','/parcelhubPOS/statementofaccount_new'),
]
menudict[CONST_reporting] =[('Cash up report','/parcelhubPOS/cashupreport'),
]
menudict[CONST_masterdata] = []
#Super admin and branch admin only feature
if loguser.is_superuser or branchaccess.access_level == 'Branch admin':
menudict[CONST_masterdata].append(('Vendor','/parcelhubPOS/vendor'))
menudict[CONST_masterdata].append(('Tax','/parcelhubPOS/tax') )
menudict[CONST_masterdata].append(('Zone domestic','/parcelhubPOS/zonedomestic') )
menudict[CONST_masterdata].append(('Zone international','/parcelhubPOS/zoneinternational') )
menudict[CONST_masterdata].append(('SKU','/parcelhubPOS/sku'))
menudict[CONST_masterdata].append(('SKU pricing','/parcelhubPOS/skubranch'))
menudict[CONST_masterdata].append(('User','/parcelhubPOS/user'))
else:
menudict[CONST_masterdata].append(('Vendor',''))
menudict[CONST_masterdata].append(('Tax','') )
menudict[CONST_masterdata].append(('Zone domestic','') )
menudict[CONST_masterdata].append(('Zone international','') )
menudict[CONST_masterdata].append(('SKU',''))
menudict[CONST_masterdata].append(('SKU pricing',''))
menudict[CONST_masterdata].append(('User',''))
#Super admin only feature
if loguser.is_superuser:
menudict[CONST_masterdata].append(('Branch','/parcelhubPOS/branch'))
menudict[CONST_system] =[('Global parameters','/parcelhubPOS/globalparameter')]
else:
menudict[CONST_masterdata].append(('Branch',''))
menudict[CONST_system] =[('Global parameters','')]
if len(menudict[CONST_masterdata]) == 0:
menudict.pop(CONST_masterdata)
return menudict
| 47.904762
| 148
| 0.625959
|
662678d9e8bb08e399a3f42b3f3cf064b7f9d7fe
| 13,467
|
py
|
Python
|
test/functional/decodescript.py
|
wolfoxonly/fpc
|
bf372369fbbc271330527b5fb83d6285a865332c
|
[
"MIT"
] | null | null | null |
test/functional/decodescript.py
|
wolfoxonly/fpc
|
bf372369fbbc271330527b5fb83d6285a865332c
|
[
"MIT"
] | null | null | null |
test/functional/decodescript.py
|
wolfoxonly/fpc
|
bf372369fbbc271330527b5fb83d6285a865332c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Flashpaychain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import FlashpaychainTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(FlashpaychainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| 74.816667
| 761
| 0.775451
|
a514180f4bd738e5500319fa8efc14f62919df06
| 201
|
py
|
Python
|
run.py
|
chunbo777/RINAproject
|
0488ab7d8b3cd9dfc7bcd1d9a4ed08066e000fbd
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
chunbo777/RINAproject
|
0488ab7d8b3cd9dfc7bcd1d9a4ed08066e000fbd
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
chunbo777/RINAproject
|
0488ab7d8b3cd9dfc7bcd1d9a4ed08066e000fbd
|
[
"Apache-2.0"
] | null | null | null |
import os
from scrapy.cmdline import execute
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
execute(
["scrapy", "crawl", "rinawine"]
)
except SystemExit:
pass
| 16.75
| 53
| 0.641791
|
1f5218e157e0df0311281c9de0898b9adde13b16
| 265
|
py
|
Python
|
webminer/entities/domain_model.py
|
asysc2020/Youth-Sports-Booster-Community
|
14535ab0f596d747a6a84357b826d4009406a67a
|
[
"MIT"
] | 38
|
2018-12-09T10:33:53.000Z
|
2022-01-01T05:59:44.000Z
|
webminer/entities/domain_model.py
|
asysc2020/Youth-Sports-Booster-Community
|
14535ab0f596d747a6a84357b826d4009406a67a
|
[
"MIT"
] | 50
|
2018-07-21T13:22:35.000Z
|
2019-09-17T08:31:47.000Z
|
webminer/entities/domain_model.py
|
asysc2020/Youth-Sports-Booster-Community
|
14535ab0f596d747a6a84357b826d4009406a67a
|
[
"MIT"
] | 16
|
2018-10-02T17:06:27.000Z
|
2022-03-16T22:12:30.000Z
|
""" Create a DomainModel providing a metaclass"""
from abc import ABCMeta
class DomainModel(metaclass=ABCMeta):
"""An ABC can be subclassed directly, and then acts as a mix-in class
metaclass (class, optional): Defaults to ABCMeta.
"""
pass
| 22.083333
| 73
| 0.69434
|
9f6d8b11676fbef04ddc1b18bdcff6cd84581f56
| 615
|
py
|
Python
|
tests/test_query.py
|
JoowonYun/hdacpy
|
3107bcc38c06c9b79d9cd4fea046fd8ea435d3e9
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
JoowonYun/hdacpy
|
3107bcc38c06c9b79d9cd4fea046fd8ea435d3e9
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
JoowonYun/hdacpy
|
3107bcc38c06c9b79d9cd4fea046fd8ea435d3e9
|
[
"MIT"
] | null | null | null |
import json
from unittest.mock import Mock
import pytest
from hdacpy.transaction import Transaction
@pytest.mark.skip(reason="only works if RESTful server runs in local")
def test_balance():
tx = Transaction(
host="http://localhost:1317",
privkey="26d167d549a4b2b66f766b0d3f2bdbe1cd92708818c338ff453abde316a2bd59",
account_num=11335,
sequence=0,
gas_price=37000,
chain_id="friday-devtest",
)
resp = tx.balance(address="friday15evpva2u57vv6l5czehyk69s0wnq9hrkqulwfz")
assert resp.status_code == 200
assert resp.json()['value'] == "500000000"
| 30.75
| 83
| 0.712195
|
cc120e517c34d0a37972a2596a9aedc0cd1edb65
| 5,013
|
py
|
Python
|
tacotron-2/tacotron/models/custom_decoder.py
|
bongsang/wavenet_generator
|
3484d81d95161703ab3337e7abffd09571aa45c1
|
[
"MIT"
] | null | null | null |
tacotron-2/tacotron/models/custom_decoder.py
|
bongsang/wavenet_generator
|
3484d81d95161703ab3337e7abffd09571aa45c1
|
[
"MIT"
] | null | null | null |
tacotron-2/tacotron/models/custom_decoder.py
|
bongsang/wavenet_generator
|
3484d81d95161703ab3337e7abffd09571aa45c1
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
from tacotron.models.helpers import TacoTrainingHelper, TacoTestHelper
class CustomDecoderOutput(
collections.namedtuple("CustomDecoderOutput", ("rnn_output", "token_output", "sample_id"))):
pass
class CustomDecoder(decoder.Decoder):
"""Custom sampling decoder.
Allows for stop token prediction at inference time
and returns equivalent loss in training time.
Note:
Only use this decoder with Tacotron 2 as it only accepts tacotron custom helpers
"""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize CustomDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
# if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
# raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return CustomDecoderOutput(
rnn_output=self._rnn_output_size(),
token_output=self._helper.token_output_size,
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return CustomDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
tf.float32,
self._helper.sample_ids_dtype)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a custom decoding step.
Enables for dyanmic <stop_token> prediction
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "CustomDecoderStep", (time, inputs, state)):
#Call outputprojection wrapper cell
(cell_outputs, stop_token), cell_state = self._cell(inputs, state)
#apply output_layer (if existant)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids,
stop_token_prediction=stop_token)
outputs = CustomDecoderOutput(cell_outputs, stop_token, sample_ids)
return (outputs, next_state, next_inputs, finished)
| 36.064748
| 101
| 0.754837
|
74134b2fd4c8655f3f033ba4ebea579d27f8c637
| 163
|
py
|
Python
|
POP1/worksheets/three/ex10/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 1
|
2021-12-29T19:38:56.000Z
|
2021-12-29T19:38:56.000Z
|
POP1/worksheets/three/ex10/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | null | null | null |
POP1/worksheets/three/ex10/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 2
|
2021-04-08T22:58:03.000Z
|
2021-04-09T01:16:51.000Z
|
words = {}
for _ in range(int(input())):
for w in input().split():
words[w] = words.get(w, 0) + 1
print(*sorted(words, key=lambda w: (-words[w], w)))
| 23.285714
| 51
| 0.552147
|
289931cbe82d6a09a06a1e0c6f00d61e4418e03f
| 1,564
|
py
|
Python
|
setup.py
|
nagesh4193/django-treebeard
|
15a567b8eb20dc1f1b0db0a527bc749f20327820
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nagesh4193/django-treebeard
|
15a567b8eb20dc1f1b0db0a527bc749f20327820
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nagesh4193/django-treebeard
|
15a567b8eb20dc1f1b0db0a527bc749f20327820
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from treebeard import __version__
import codecs
def root_dir():
try:
return os.path.dirname(__file__)
except NameError:
return '.'
setup_args = dict(
name='django-treebeard',
version=__version__,
url='https://github.com/django-treebeard/django-treebeard/',
author='Gustavo Picon',
author_email='tabo@tabo.pe',
license='Apache License 2.0',
packages=find_packages(exclude=['docs']),
include_package_data=True,
description='Efficient tree implementations for Django',
long_description=codecs.open(os.path.join(root_dir(), 'README.rst'), encoding='utf-8').read(),
python_requires='>=3.6',
install_requires=['Django>=2.2'],
tests_require=['pytest'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'])
if __name__ == '__main__':
setup(**setup_args)
| 31.28
| 98
| 0.628517
|
22a9d9cbb07e53451dc738b51b0a0c6b276cc41a
| 842
|
py
|
Python
|
Softuni Assignments/exam 11 March/lecture_cdn_tests.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
Softuni Assignments/exam 11 March/lecture_cdn_tests.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
Softuni Assignments/exam 11 March/lecture_cdn_tests.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
import unittest
# import io
# import sys
import lecture_cdn as sut
class TestLectureCDN(unittest.TestCase):
def test_get_tokens(self):
tokens = sut.extract_tokens('trainer:housey;course:csharp-oop-basics;lecture:polymorphism;duration:3h05m')
self.assertSequenceEqual(
sorted(['polymorphism', 'housey', 'csharp-oop-basics', '3h05m']),
sorted(list(tokens))
)
def test_get_tokens2(self):
tokens = sut.extract_tokens('lecture:matrices-extra;trainer:bojo;course:csharp-oop-basics;duration:4h35m')
self.assertSequenceEqual(
sorted(['matrices-extra', 'bojo', 'csharp-oop-basics', '4h35m']),
sorted(list(tokens))
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLectureCDN)
unittest.TextTestRunner(verbosity=2).run(suite)
| 33.68
| 114
| 0.680523
|
c70f38cdaf4023de01e7ea7b17e832e2fff22225
| 7,096
|
py
|
Python
|
sumo/plotting/__init__.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
sumo/plotting/__init__.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
sumo/plotting/__init__.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
Subpackage providing helper functions for generating publication ready plots.
"""
from functools import wraps
import numpy as np
import matplotlib.pyplot
from matplotlib.collections import LineCollection
from matplotlib import rc, rcParams
from pkg_resources import resource_filename
colour_cache = {}
sumo_base_style = resource_filename('sumo.plotting', 'sumo_base.mplstyle')
sumo_dos_style = resource_filename('sumo.plotting', 'sumo_dos.mplstyle')
sumo_bs_style = resource_filename('sumo.plotting', 'sumo_bs.mplstyle')
sumo_phonon_style = resource_filename('sumo.plotting', 'sumo_phonon.mplstyle')
sumo_optics_style = resource_filename('sumo.plotting', 'sumo_optics.mplstyle')
def styled_plot(*style_sheets):
"""Return a decorator that will apply matplotlib style sheets to a plot.
``style_sheets`` is a base set of styles, which will be ignored if
``no_base_style`` is set in the decorated function arguments.
The style will further be overwritten by any styles in the ``style``
optional argument of the decorated function.
Args:
style_sheets (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
supported definition of a style sheet. Can be a list of style of
style sheets.
"""
def decorator(get_plot):
@wraps(get_plot)
def wrapper(*args, fonts=None, style=None, no_base_style=False,
**kwargs):
if no_base_style:
list_style = []
else:
list_style = list(style_sheets)
if style is not None:
if isinstance(style, list):
list_style += style
else:
list_style += [style]
if fonts is not None:
list_style += [{'font.family': 'sans-serif',
'font.sans-serif': fonts}]
matplotlib.pyplot.style.use(list_style)
return get_plot(*args, **kwargs)
return wrapper
return decorator
def pretty_plot(width=None, height=None, plt=None, dpi=None):
"""Get a :obj:`matplotlib.pyplot` object with publication ready defaults.
Args:
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot`
object to use for plotting.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the plot.
Returns:
:obj:`matplotlib.pyplot`: A :obj:`matplotlib.pyplot` object with
publication ready defaults set.
"""
if plt is None:
plt = matplotlib.pyplot
if width is None:
width = matplotlib.rcParams['figure.figsize'][0]
if height is None:
height = matplotlib.rcParams['figure.figsize'][1]
if dpi is not None:
matplotlib.rcParams['figure.dpi'] = dpi
fig = plt.figure(figsize=(width, height))
fig.add_subplot(1, 1, 1)
return plt
def pretty_subplot(nrows, ncols, width=None, height=None, sharex=True,
sharey=True, dpi=None, plt=None, gridspec_kw=None):
"""Get a :obj:`matplotlib.pyplot` subplot object with pretty defaults.
Args:
nrows (int): The number of rows in the subplot.
ncols (int): The number of columns in the subplot.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
sharex (:obj:`bool`, optional): All subplots share the same x-axis.
Defaults to ``True``.
sharey (:obj:`bool`, optional): All subplots share the same y-axis.
Defaults to ``True``.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the plot.
plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot`
object to use for plotting.
gridspec_kw (:obj:`dict`, optional): Gridspec parameters. Please see:
:obj:`matplotlib.pyplot.subplot` for more information. Defaults
to ``None``.
Returns:
:obj:`matplotlib.pyplot`: A :obj:`matplotlib.pyplot` subplot object
with publication ready defaults set.
"""
if width is None:
width = rcParams['figure.figsize'][0]
if height is None:
height = rcParams['figure.figsize'][1]
# TODO: Make this work if plt is already set...
if plt is None:
plt = matplotlib.pyplot
plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, dpi=dpi,
figsize=(width, height), facecolor='w',
gridspec_kw=gridspec_kw)
return plt
def curry_power_tick(times_sign=r'\times'):
def f(val, pos):
return power_tick(val, pos, times_sign=times_sign)
return f
def power_tick(val, pos, times_sign=r'\times'):
"""Custom power ticker function. """
if val == 0:
return r'$\mathregular{0}$'
elif val < 0:
exponent = int(np.log10(-val))
else:
exponent = int(np.log10(val))
coeff = val / 10**exponent
return r'$\mathregular{{{:.1f} {} 10^{:2d}}}$'.format(coeff,
times_sign,
exponent)
def rgbline(x, y, red, green, blue, alpha=1, linestyles="solid",
linewidth=2.5):
"""Get a RGB coloured line for plotting.
Args:
x (list): x-axis data.
y (list): y-axis data (can be multidimensional array).
red (list): Red data (must have same shape as ``y``).
green (list): Green data (must have same shape as ``y``).
blue (list): blue data (must have same shape as ``y``).
alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency)
data (must have same shape as ``y`` or be an :obj:`int`).
linestyles (:obj:`str`, optional): Linestyle for plot. Options are
``"solid"`` or ``"dotted"``.
"""
y = np.array(y)
if len(y.shape) == 1:
y = np.array([y])
red = np.array([red])
green = np.array([green])
blue = np.array([blue])
alpha = np.array([alpha])
elif isinstance(alpha, int):
alpha = [alpha] * len(y)
seg = []
colours = []
for yy, rr, gg, bb, aa in zip(y, red, green, blue, alpha):
pts = np.array([x, yy]).T.reshape(-1, 1, 2)
seg.extend(np.concatenate([pts[:-1], pts[1:]], axis=1))
nseg = len(x) - 1
r = [0.5 * (rr[i] + rr[i + 1]) for i in range(nseg)]
g = [0.5 * (gg[i] + gg[i + 1]) for i in range(nseg)]
b = [0.5 * (bb[i] + bb[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * aa
colours.extend(list(zip(r, g, b, a)))
lc = LineCollection(seg, colors=colours, rasterized=True,
linewidth=linewidth, linestyles=linestyles)
return lc
| 35.128713
| 78
| 0.590333
|
1aab62441fd92d02832602ed85c7a4f74df90cd0
| 93,765
|
py
|
Python
|
pyidf/humidifiers_and_dehumidifiers.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19
|
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
pyidf/humidifiers_and_dehumidifiers.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
pyidf/humidifiers_and_dehumidifiers.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7
|
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
""" Data objects in group "Humidifiers and Dehumidifiers"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class HumidifierSteamElectric(DataObject):
""" Corresponds to IDD object `Humidifier:Steam:Electric`
Electrically heated steam humidifier with fan.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated capacity',
{'name': u'Rated Capacity',
'pyname': u'rated_capacity',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'rated power',
{'name': u'Rated Power',
'pyname': u'rated_power',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'rated fan power',
{'name': u'Rated Fan Power',
'pyname': u'rated_fan_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'standby power',
{'name': u'Standby Power',
'pyname': u'standby_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'air inlet node name',
{'name': u'Air Inlet Node Name',
'pyname': u'air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'air outlet node name',
{'name': u'Air Outlet Node Name',
'pyname': u'air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'water storage tank name',
{'name': u'Water Storage Tank Name',
'pyname': u'water_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Humidifier:Steam:Electric',
'pyname': u'HumidifierSteamElectric',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def rated_capacity(self):
"""field `Rated Capacity`
| Capacity is m3/s of water at 5.05 C
| Units: m3/s
| IP-Units: gal/min
Args:
value (float or "Autosize"): value for IDD Field `Rated Capacity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_capacity` or None if not set
"""
return self["Rated Capacity"]
@rated_capacity.setter
def rated_capacity(self, value=None):
"""Corresponds to IDD field `Rated Capacity`"""
self["Rated Capacity"] = value
@property
def rated_power(self):
"""field `Rated Power`
| if autosized the rated power is calculated from the rated capacity
| and enthalpy rise of water from 20.0C to 100.0C steam and assumes
| electric to thermal energy conversion efficiency of 100.0%
| Units: W
| IP-Units: W
Args:
value (float or "Autosize"): value for IDD Field `Rated Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_power` or None if not set
"""
return self["Rated Power"]
@rated_power.setter
def rated_power(self, value=None):
"""Corresponds to IDD field `Rated Power`"""
self["Rated Power"] = value
@property
def rated_fan_power(self):
"""field `Rated Fan Power`
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rated Fan Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rated_fan_power` or None if not set
"""
return self["Rated Fan Power"]
@rated_fan_power.setter
def rated_fan_power(self, value=None):
"""Corresponds to IDD field `Rated Fan Power`"""
self["Rated Fan Power"] = value
@property
def standby_power(self):
"""field `Standby Power`
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Standby Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `standby_power` or None if not set
"""
return self["Standby Power"]
@standby_power.setter
def standby_power(self, value=None):
"""Corresponds to IDD field `Standby Power`"""
self["Standby Power"] = value
@property
def air_inlet_node_name(self):
"""field `Air Inlet Node Name`
Args:
value (str): value for IDD Field `Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_inlet_node_name` or None if not set
"""
return self["Air Inlet Node Name"]
@air_inlet_node_name.setter
def air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Inlet Node Name`"""
self["Air Inlet Node Name"] = value
@property
def air_outlet_node_name(self):
"""field `Air Outlet Node Name`
Args:
value (str): value for IDD Field `Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_outlet_node_name` or None if not set
"""
return self["Air Outlet Node Name"]
@air_outlet_node_name.setter
def air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Outlet Node Name`"""
self["Air Outlet Node Name"] = value
@property
def water_storage_tank_name(self):
"""field `Water Storage Tank Name`
Args:
value (str): value for IDD Field `Water Storage Tank Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `water_storage_tank_name` or None if not set
"""
return self["Water Storage Tank Name"]
@water_storage_tank_name.setter
def water_storage_tank_name(self, value=None):
"""Corresponds to IDD field `Water Storage Tank Name`"""
self["Water Storage Tank Name"] = value
class HumidifierSteamGas(DataObject):
""" Corresponds to IDD object `Humidifier:Steam:Gas`
Natural gas fired steam humidifier with optional blower fan.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated capacity',
{'name': u'Rated Capacity',
'pyname': u'rated_capacity',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'rated gas use rate',
{'name': u'Rated Gas Use Rate',
'pyname': u'rated_gas_use_rate',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'thermal efficiency',
{'name': u'Thermal Efficiency',
'pyname': u'thermal_efficiency',
'default': 0.8,
'minimum>': 0.0,
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'thermal efficiency modifier curve name',
{'name': u'Thermal Efficiency Modifier Curve Name',
'pyname': u'thermal_efficiency_modifier_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated fan power',
{'name': u'Rated Fan Power',
'pyname': u'rated_fan_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'auxiliary electric power',
{'name': u'Auxiliary Electric Power',
'pyname': u'auxiliary_electric_power',
'default': 0.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'air inlet node name',
{'name': u'Air Inlet Node Name',
'pyname': u'air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'air outlet node name',
{'name': u'Air Outlet Node Name',
'pyname': u'air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'water storage tank name',
{'name': u'Water Storage Tank Name',
'pyname': u'water_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'inlet water temperature option',
{'name': u'Inlet Water Temperature Option',
'pyname': u'inlet_water_temperature_option',
'default': u'FixedInletWaterTemperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'FixedInletWaterTemperature',
u'VariableInletWaterTemperature'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Humidifier:Steam:Gas',
'pyname': u'HumidifierSteamGas',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def rated_capacity(self):
"""field `Rated Capacity`
| Capacity is m3/s of water at 5.05 C
| The nominal full capacity water addition rate in m3/s of water at 5.05 C
| Units: m3/s
| IP-Units: gal/min
Args:
value (float or "Autosize"): value for IDD Field `Rated Capacity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_capacity` or None if not set
"""
return self["Rated Capacity"]
@rated_capacity.setter
def rated_capacity(self, value=None):
"""Corresponds to IDD field `Rated Capacity`"""
self["Rated Capacity"] = value
@property
def rated_gas_use_rate(self):
"""field `Rated Gas Use Rate`
| if auto-sized, the rated gas use rate is calculated from the rated
| capacity and enthalpy rise of water from 20.0C to 100.0C steam and user
| input thermal efficiency value specified in the next input field. If this
| input field is hard-sized and Inlet Water Temperature Option input field is
| selected as FixedInletWaterTemperature, then the thermal efficiency input
| field will not be used or else if the Inlet Water Temperature Option input
| field is selected as VariableInletWaterTemperature, then the thermal efficiency
| input value is overridden by a value calculated from the capacity, rated gas use
| rate and design condition.
| Units: W
| IP-Units: W
Args:
value (float or "Autosize"): value for IDD Field `Rated Gas Use Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_gas_use_rate` or None if not set
"""
return self["Rated Gas Use Rate"]
@rated_gas_use_rate.setter
def rated_gas_use_rate(self, value=None):
"""Corresponds to IDD field `Rated Gas Use Rate`"""
self["Rated Gas Use Rate"] = value
@property
def thermal_efficiency(self):
"""field `Thermal Efficiency`
| Based on the higher heating value of fuel.
| If "Rated Gas Use Rate" in the field above is not auto-sized and the Inlet Water
| Temperature Option input field is specified as FixedInletWaterTemperature, then the
| thermal efficiency specified will not be used in the calculation, or else if the
| Inlet Water Temperature Option input field is selected as VariableInletWaterTemperature,
| then the thermal efficiency value is overridden by a value calculated from the capacity,
| rated gas use rate and design condition.
| Units: dimensionless
| Default value: 0.8
| value <= 1.0
Args:
value (float): value for IDD Field `Thermal Efficiency`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `thermal_efficiency` or None if not set
"""
return self["Thermal Efficiency"]
@thermal_efficiency.setter
def thermal_efficiency(self, value=0.8):
"""Corresponds to IDD field `Thermal Efficiency`"""
self["Thermal Efficiency"] = value
@property
def thermal_efficiency_modifier_curve_name(self):
"""field `Thermal Efficiency Modifier Curve Name`
| Linear, Quadratic and Cubic efficiency curves are solely a function of PLR.
| Linear = C1 + C2*PLR
| Quadratic = C1 + C2*PLR + C3*PLR^2
| Cubic = C1 + C2*PLR + C3*PLR^2 + C4*PLR^3
| This is thermal efficiency modifier curve name of gas fired steam humidifier.
| This curve is normalized, i.e., curve output value at rated condition is 1.0.
Args:
value (str): value for IDD Field `Thermal Efficiency Modifier Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `thermal_efficiency_modifier_curve_name` or None if not set
"""
return self["Thermal Efficiency Modifier Curve Name"]
@thermal_efficiency_modifier_curve_name.setter
def thermal_efficiency_modifier_curve_name(self, value=None):
"""Corresponds to IDD field `Thermal Efficiency Modifier Curve Name`"""
self["Thermal Efficiency Modifier Curve Name"] = value
@property
def rated_fan_power(self):
"""field `Rated Fan Power`
| The nominal full capacity electric power input to the blower fan in Watts. If no
| blower fan is required to inject the dry steam to the supply air stream, then
| this input field is set to zero.
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rated Fan Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rated_fan_power` or None if not set
"""
return self["Rated Fan Power"]
@rated_fan_power.setter
def rated_fan_power(self, value=None):
"""Corresponds to IDD field `Rated Fan Power`"""
self["Rated Fan Power"] = value
@property
def auxiliary_electric_power(self):
"""field `Auxiliary Electric Power`
| The auxiliary electric power input in watts. This amount of power will be consumed
| whenever the unit is available (as defined by the availability schedule). This
| electric power is used for control purpose only.
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Auxiliary Electric Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `auxiliary_electric_power` or None if not set
"""
return self["Auxiliary Electric Power"]
@auxiliary_electric_power.setter
def auxiliary_electric_power(self, value=None):
"""Corresponds to IDD field `Auxiliary Electric Power`"""
self["Auxiliary Electric Power"] = value
@property
def air_inlet_node_name(self):
"""field `Air Inlet Node Name`
Args:
value (str): value for IDD Field `Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_inlet_node_name` or None if not set
"""
return self["Air Inlet Node Name"]
@air_inlet_node_name.setter
def air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Inlet Node Name`"""
self["Air Inlet Node Name"] = value
@property
def air_outlet_node_name(self):
"""field `Air Outlet Node Name`
Args:
value (str): value for IDD Field `Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_outlet_node_name` or None if not set
"""
return self["Air Outlet Node Name"]
@air_outlet_node_name.setter
def air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Outlet Node Name`"""
self["Air Outlet Node Name"] = value
@property
def water_storage_tank_name(self):
"""field `Water Storage Tank Name`
Args:
value (str): value for IDD Field `Water Storage Tank Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `water_storage_tank_name` or None if not set
"""
return self["Water Storage Tank Name"]
@water_storage_tank_name.setter
def water_storage_tank_name(self, value=None):
"""Corresponds to IDD field `Water Storage Tank Name`"""
self["Water Storage Tank Name"] = value
@property
def inlet_water_temperature_option(self):
"""field `Inlet Water Temperature Option`
| The inlet water temperature can be fixed at 20C as it is done for electric steam
| humidifier or it can be allowed to vary with temperature of the water source.
| Currently allowed water sources are main water or water storage tank in water use objects.
| if FixedInletWaterTemperature is specified, then a fixed 20C water temperature will be
| used, or else if VariableInletWaterTemperature is specified, then inlet water will vary
| depending the source water temperature. If this input field is left blank, then fixed
| inlet water temperature of 20C will be assumed.
| Default value: FixedInletWaterTemperature
Args:
value (str): value for IDD Field `Inlet Water Temperature Option`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `inlet_water_temperature_option` or None if not set
"""
return self["Inlet Water Temperature Option"]
@inlet_water_temperature_option.setter
def inlet_water_temperature_option(
self,
value="FixedInletWaterTemperature"):
"""Corresponds to IDD field `Inlet Water Temperature Option`"""
self["Inlet Water Temperature Option"] = value
class DehumidifierDesiccantNoFans(DataObject):
""" Corresponds to IDD object `Dehumidifier:Desiccant:NoFans`
This object models a solid desiccant dehumidifier. The process
air stream is the air which is dehumidified. The regeneration air
stream is the air which is heated to regenerate the desiccant.
This object determines the process air outlet conditions, the
load on the regeneration heating coil, the electric power consumption
for the wheel rotor motor, and the regeneration air fan mass flow rate.
All other heat exchangers are modeled as separate objects connected
to the inlet and outlet nodes of the dehumidifier. The solid
desiccant dehumidifier is typically used in an AirLoopHVAC:OutdoorAirSystem,
but can also be specified in any AirLoopHVAC.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'process air inlet node name',
{'name': u'Process Air Inlet Node Name',
'pyname': u'process_air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'process air outlet node name',
{'name': u'Process Air Outlet Node Name',
'pyname': u'process_air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration air inlet node name',
{'name': u'Regeneration Air Inlet Node Name',
'pyname': u'regeneration_air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration fan inlet node name',
{'name': u'Regeneration Fan Inlet Node Name',
'pyname': u'regeneration_fan_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'control type',
{'name': u'Control Type',
'pyname': u'control_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'LeavingMaximumHumidityRatioSetpoint',
u'SystemNodeMaximumHumidityRatioSetpoint'],
'autocalculatable': False,
'type': 'alpha'}),
(u'leaving maximum humidity ratio setpoint',
{'name': u'Leaving Maximum Humidity Ratio Setpoint',
'pyname': u'leaving_maximum_humidity_ratio_setpoint',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kgWater/kgDryAir'}),
(u'nominal process air flow rate',
{'name': u'Nominal Process Air Flow Rate',
'pyname': u'nominal_process_air_flow_rate',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'nominal process air velocity',
{'name': u'Nominal Process Air Velocity',
'pyname': u'nominal_process_air_velocity',
'minimum>': 0.0,
'maximum': 6.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm/s'}),
(u'rotor power',
{'name': u'Rotor Power',
'pyname': u'rotor_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'regeneration coil object type',
{'name': u'Regeneration Coil Object Type',
'pyname': u'regeneration_coil_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Heating:Electric',
u'Coil:Heating:Gas',
u'Coil:Heating:Water',
u'Coil:Heating:Steam'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration coil name',
{'name': u'Regeneration Coil Name',
'pyname': u'regeneration_coil_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration fan object type',
{'name': u'Regeneration Fan Object Type',
'pyname': u'regeneration_fan_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Fan:VariableVolume',
u'Fan:ConstantVolume'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration fan name',
{'name': u'Regeneration Fan Name',
'pyname': u'regeneration_fan_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'performance model type',
{'name': u'Performance Model Type',
'pyname': u'performance_model_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Default',
u'UserCurves'],
'autocalculatable': False,
'type': 'alpha'}),
(u'leaving dry-bulb function of entering dry-bulb and humidity ratio curve name',
{'name': u'Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving dry-bulb function of air velocity curve name',
{'name': u'Leaving Dry-Bulb Function of Air Velocity Curve Name',
'pyname': u'leaving_drybulb_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving humidity ratio function of entering dry-bulb and humidity ratio curve name',
{'name': u'Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving humidity ratio function of air velocity curve name',
{'name': u'Leaving Humidity Ratio Function of Air Velocity Curve Name',
'pyname': u'leaving_humidity_ratio_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration energy function of entering dry-bulb and humidity ratio curve name',
{'name': u'Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration energy function of air velocity curve name',
{'name': u'Regeneration Energy Function of Air Velocity Curve Name',
'pyname': u'regeneration_energy_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration velocity function of entering dry-bulb and humidity ratio curve name',
{'name': u'Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration velocity function of air velocity curve name',
{'name': u'Regeneration Velocity Function of Air Velocity Curve Name',
'pyname': u'regeneration_velocity_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'nominal regeneration temperature',
{'name': u'Nominal Regeneration Temperature',
'pyname': u'nominal_regeneration_temperature',
'maximum': 250.0,
'required-field': False,
'autosizable': False,
'minimum': 40.0,
'autocalculatable': False,
'type': u'real',
'unit': u'C'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Dehumidifier:Desiccant:NoFans',
'pyname': u'DehumidifierDesiccantNoFans',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def process_air_inlet_node_name(self):
"""field `Process Air Inlet Node Name`
| This is the node entering the process side of the desiccant wheel.
Args:
value (str): value for IDD Field `Process Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `process_air_inlet_node_name` or None if not set
"""
return self["Process Air Inlet Node Name"]
@process_air_inlet_node_name.setter
def process_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Process Air Inlet Node Name`"""
self["Process Air Inlet Node Name"] = value
@property
def process_air_outlet_node_name(self):
"""field `Process Air Outlet Node Name`
| This is the node leaving the process side of the desiccant wheel.
Args:
value (str): value for IDD Field `Process Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `process_air_outlet_node_name` or None if not set
"""
return self["Process Air Outlet Node Name"]
@process_air_outlet_node_name.setter
def process_air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Process Air Outlet Node Name`"""
self["Process Air Outlet Node Name"] = value
@property
def regeneration_air_inlet_node_name(self):
"""field `Regeneration Air Inlet Node Name`
| This is the node entering the regeneration side of the desiccant wheel
| after the regeneration coil.
Args:
value (str): value for IDD Field `Regeneration Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_inlet_node_name` or None if not set
"""
return self["Regeneration Air Inlet Node Name"]
@regeneration_air_inlet_node_name.setter
def regeneration_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Inlet Node Name`"""
self["Regeneration Air Inlet Node Name"] = value
@property
def regeneration_fan_inlet_node_name(self):
"""field `Regeneration Fan Inlet Node Name`
| Node for air entering the regeneration fan, mass flow is set
| by the desiccant dehumidifier module.
Args:
value (str): value for IDD Field `Regeneration Fan Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_inlet_node_name` or None if not set
"""
return self["Regeneration Fan Inlet Node Name"]
@regeneration_fan_inlet_node_name.setter
def regeneration_fan_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Inlet Node Name`"""
self["Regeneration Fan Inlet Node Name"] = value
@property
def control_type(self):
"""field `Control Type`
| Type of setpoint control:
| LeavingMaximumHumidityRatioSetpoint means that the unit is controlled
| to deliver air at the Leaving Max Humidity Ratio Setpoint (see below),
| SystemNodeMaximumHumidityRatioSetpoint means that the leaving humidity
| ratio setpoint is the System Node Humidity Ratio Max property
| of the Process Air Outlet Node. A Setpoint
| object must be used to control this setpoint.
| Both control types use bypass dampers to prevent over drying.
Args:
value (str): value for IDD Field `Control Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_type` or None if not set
"""
return self["Control Type"]
@control_type.setter
def control_type(self, value=None):
"""Corresponds to IDD field `Control Type`"""
self["Control Type"] = value
@property
def leaving_maximum_humidity_ratio_setpoint(self):
"""field `Leaving Maximum Humidity Ratio Setpoint`
| Fixed setpoint for maximum process air leaving humidity ratio
| Applicable only when Control Type = LeavingMaximumHumidityRatioSetpoint.
| Units: kgWater/kgDryAir
Args:
value (float): value for IDD Field `Leaving Maximum Humidity Ratio Setpoint`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `leaving_maximum_humidity_ratio_setpoint` or None if not set
"""
return self["Leaving Maximum Humidity Ratio Setpoint"]
@leaving_maximum_humidity_ratio_setpoint.setter
def leaving_maximum_humidity_ratio_setpoint(self, value=None):
"""Corresponds to IDD field `Leaving Maximum Humidity Ratio
Setpoint`"""
self["Leaving Maximum Humidity Ratio Setpoint"] = value
@property
def nominal_process_air_flow_rate(self):
"""field `Nominal Process Air Flow Rate`
| Process air flow rate at nominal conditions
| Units: m3/s
Args:
value (float): value for IDD Field `Nominal Process Air Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_process_air_flow_rate` or None if not set
"""
return self["Nominal Process Air Flow Rate"]
@nominal_process_air_flow_rate.setter
def nominal_process_air_flow_rate(self, value=None):
"""Corresponds to IDD field `Nominal Process Air Flow Rate`"""
self["Nominal Process Air Flow Rate"] = value
@property
def nominal_process_air_velocity(self):
"""field `Nominal Process Air Velocity`
| Process air velocity at nominal flow
| When using Performance Model Type of Default, must be 2.032 to 4.064 m/s (400 to 800 fpm)
| Units: m/s
| value <= 6.0
Args:
value (float): value for IDD Field `Nominal Process Air Velocity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_process_air_velocity` or None if not set
"""
return self["Nominal Process Air Velocity"]
@nominal_process_air_velocity.setter
def nominal_process_air_velocity(self, value=None):
"""Corresponds to IDD field `Nominal Process Air Velocity`"""
self["Nominal Process Air Velocity"] = value
@property
def rotor_power(self):
"""field `Rotor Power`
| Power input to wheel rotor motor
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rotor Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rotor_power` or None if not set
"""
return self["Rotor Power"]
@rotor_power.setter
def rotor_power(self, value=None):
"""Corresponds to IDD field `Rotor Power`"""
self["Rotor Power"] = value
@property
def regeneration_coil_object_type(self):
"""field `Regeneration Coil Object Type`
| heating coil type
| works with gas, electric, hot water and steam heating coils
Args:
value (str): value for IDD Field `Regeneration Coil Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_coil_object_type` or None if not set
"""
return self["Regeneration Coil Object Type"]
@regeneration_coil_object_type.setter
def regeneration_coil_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Coil Object Type`"""
self["Regeneration Coil Object Type"] = value
@property
def regeneration_coil_name(self):
"""field `Regeneration Coil Name`
| Name of heating coil object for regeneration air
Args:
value (str): value for IDD Field `Regeneration Coil Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_coil_name` or None if not set
"""
return self["Regeneration Coil Name"]
@regeneration_coil_name.setter
def regeneration_coil_name(self, value=None):
"""Corresponds to IDD field `Regeneration Coil Name`"""
self["Regeneration Coil Name"] = value
@property
def regeneration_fan_object_type(self):
"""field `Regeneration Fan Object Type`
| Type of fan object for regeneration air. When using the Default
| Performance Model Type (see below), only Fan:VariableVolume is valid.
Args:
value (str): value for IDD Field `Regeneration Fan Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_object_type` or None if not set
"""
return self["Regeneration Fan Object Type"]
@regeneration_fan_object_type.setter
def regeneration_fan_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Object Type`"""
self["Regeneration Fan Object Type"] = value
@property
def regeneration_fan_name(self):
"""field `Regeneration Fan Name`
| Name of fan object for regeneration air
Args:
value (str): value for IDD Field `Regeneration Fan Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_name` or None if not set
"""
return self["Regeneration Fan Name"]
@regeneration_fan_name.setter
def regeneration_fan_name(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Name`"""
self["Regeneration Fan Name"] = value
@property
def performance_model_type(self):
"""field `Performance Model Type`
| Specifies whether the default performance model or user-specified
| curves should be used to model the performance. The default model
| is a generic solid desiccant wheel using performance curves of the form:
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*vel + C7*vel**2
| + C8*edb*ew + C9*edb**2*ew**2 + C10*edb*vel + C11*edb**2*vel**2
| + C12*ew*vel + C13*ew**2*vel**2 + C14*ALOG(edb) + C15*ALOG(ew) + C16*ALOG(vel)
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
| vel = process air velocity [m/s]
| If UserCurves are specified, then performance is calculated as follows:
| Leaving Dry-Bulb = (Leaving Dry-Bulb fTW Curve) * (Leaving Dry-Bulb fV Curve)
| Leaving Humidity Ratio = (Leaving Humidity Ratio fTW Curve) * (Leaving Humidity Ratio fV Curve)
| Regen Energy = (Regen Energy fTW Curve) * (Regen Energy fV Curve)
| Regen Velocity = (Regen Velocity fTW Curve) * (Regen Velocity fV Curve)
Args:
value (str): value for IDD Field `Performance Model Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `performance_model_type` or None if not set
"""
return self["Performance Model Type"]
@performance_model_type.setter
def performance_model_type(self, value=None):
"""Corresponds to IDD field `Performance Model Type`"""
self["Performance Model Type"] = value
@property
def leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Leaving dry-bulb of process air as a function of entering dry-bulb
| and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def leaving_drybulb_function_of_air_velocity_curve_name(self):
"""field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
| Leaving dry-bulb of process air as a function of air velocity,
| quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_drybulb_function_of_air_velocity_curve_name` or None if not set
"""
return self["Leaving Dry-Bulb Function of Air Velocity Curve Name"]
@leaving_drybulb_function_of_air_velocity_curve_name.setter
def leaving_drybulb_function_of_air_velocity_curve_name(self, value=None):
""" Corresponds to IDD field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
"""
self["Leaving Dry-Bulb Function of Air Velocity Curve Name"] = value
@property
def leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Leaving humidity ratio of process air as a function of entering dry-bulb
| and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def leaving_humidity_ratio_function_of_air_velocity_curve_name(self):
"""field `Leaving Humidity Ratio Function of Air Velocity Curve Name`
| Leaving humidity ratio of process air as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Leaving Humidity Ratio Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_humidity_ratio_function_of_air_velocity_curve_name` or None if not set
"""
return self[
"Leaving Humidity Ratio Function of Air Velocity Curve Name"]
@leaving_humidity_ratio_function_of_air_velocity_curve_name.setter
def leaving_humidity_ratio_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Leaving Humidity Ratio Function of Air
Velocity Curve Name`"""
self[
"Leaving Humidity Ratio Function of Air Velocity Curve Name"] = value
@property
def regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Regeneration energy [J/kg of water removed] as a function of
| entering dry-bulb and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def regeneration_energy_function_of_air_velocity_curve_name(self):
"""field `Regeneration Energy Function of Air Velocity Curve Name`
| Regeneration energy [J/kg of water removed] as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Regeneration Energy Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_energy_function_of_air_velocity_curve_name` or None if not set
"""
return self["Regeneration Energy Function of Air Velocity Curve Name"]
@regeneration_energy_function_of_air_velocity_curve_name.setter
def regeneration_energy_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Regeneration Energy Function of Air
Velocity Curve Name`"""
self["Regeneration Energy Function of Air Velocity Curve Name"] = value
@property
def regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Regeneration velocity [m/s] as a function of
| entering dry-bulb and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def regeneration_velocity_function_of_air_velocity_curve_name(self):
"""field `Regeneration Velocity Function of Air Velocity Curve Name`
| Regeneration velocity [m/s] as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Regeneration Velocity Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_velocity_function_of_air_velocity_curve_name` or None if not set
"""
return self[
"Regeneration Velocity Function of Air Velocity Curve Name"]
@regeneration_velocity_function_of_air_velocity_curve_name.setter
def regeneration_velocity_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Regeneration Velocity Function of Air
Velocity Curve Name`"""
self[
"Regeneration Velocity Function of Air Velocity Curve Name"] = value
@property
def nominal_regeneration_temperature(self):
"""field `Nominal Regeneration Temperature`
| Nominal regen temperature upon which the regen energy modifier
| curve is based. Not used if Default if chosen for the field Performance Mode Type.
| 121 C is a commonly used value.
| Units: C
| value >= 40.0
| value <= 250.0
Args:
value (float): value for IDD Field `Nominal Regeneration Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_regeneration_temperature` or None if not set
"""
return self["Nominal Regeneration Temperature"]
@nominal_regeneration_temperature.setter
def nominal_regeneration_temperature(self, value=None):
"""Corresponds to IDD field `Nominal Regeneration Temperature`"""
self["Nominal Regeneration Temperature"] = value
class DehumidifierDesiccantSystem(DataObject):
""" Corresponds to IDD object `Dehumidifier:Desiccant:System`
This compound object models a desiccant heat exchanger, an optional
heater, and associated fans. The process air stream is the air which
is dehumidified. The regeneration air stream is the air which is
heated to regenerate the desiccant. The desiccant heat exchanger
transfers both sensible and latent energy between the process and
regeneration air streams. The desiccant dehumidifier is typically used
in an AirLoopHVAC:OutdoorAirSystem, but can also be specified in any AirLoopHVAC.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'desiccant heat exchanger object type',
{'name': u'Desiccant Heat Exchanger Object Type',
'pyname': u'desiccant_heat_exchanger_object_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'HeatExchanger:Desiccant:BalancedFlow'],
'autocalculatable': False,
'type': 'alpha'}),
(u'desiccant heat exchanger name',
{'name': u'Desiccant Heat Exchanger Name',
'pyname': u'desiccant_heat_exchanger_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'sensor node name',
{'name': u'Sensor Node Name',
'pyname': u'sensor_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration air fan object type',
{'name': u'Regeneration Air Fan Object Type',
'pyname': u'regeneration_air_fan_object_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Fan:OnOff',
u'Fan:ConstantVolume'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air fan name',
{'name': u'Regeneration Air Fan Name',
'pyname': u'regeneration_air_fan_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration air fan placement',
{'name': u'Regeneration Air Fan Placement',
'pyname': u'regeneration_air_fan_placement',
'default': u'DrawThrough',
'required-field': False,
'autosizable': False,
'accepted-values': [u'BlowThrough',
u'DrawThrough'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air heater object type',
{'name': u'Regeneration Air Heater Object Type',
'pyname': u'regeneration_air_heater_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Heating:Electric',
u'Coil:Heating:Gas',
u'Coil:Heating:Water',
u'Coil:Heating:Steam'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air heater name',
{'name': u'Regeneration Air Heater Name',
'pyname': u'regeneration_air_heater_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration inlet air setpoint temperature',
{'name': u'Regeneration Inlet Air Setpoint Temperature',
'pyname': u'regeneration_inlet_air_setpoint_temperature',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'companion cooling coil object type',
{'name': u'Companion Cooling Coil Object Type',
'pyname': u'companion_cooling_coil_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Cooling:DX:SingleSpeed',
u'Coil:Cooling:DX:TwoStageWithHumidityControlMode'],
'autocalculatable': False,
'type': 'alpha'}),
(u'companion cooling coil name',
{'name': u'Companion Cooling Coil Name',
'pyname': u'companion_cooling_coil_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'companion cooling coil upstream of dehumidifier process inlet',
{'name': u'Companion Cooling Coil Upstream of Dehumidifier Process Inlet',
'pyname': u'companion_cooling_coil_upstream_of_dehumidifier_process_inlet',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'companion coil regeneration air heating',
{'name': u'Companion Coil Regeneration Air Heating',
'pyname': u'companion_coil_regeneration_air_heating',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'exhaust fan maximum flow rate',
{'name': u'Exhaust Fan Maximum Flow Rate',
'pyname': u'exhaust_fan_maximum_flow_rate',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'exhaust fan maximum power',
{'name': u'Exhaust Fan Maximum Power',
'pyname': u'exhaust_fan_maximum_power',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'exhaust fan power curve name',
{'name': u'Exhaust Fan Power Curve Name',
'pyname': u'exhaust_fan_power_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 8,
'name': u'Dehumidifier:Desiccant:System',
'pyname': u'DehumidifierDesiccantSystem',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def desiccant_heat_exchanger_object_type(self):
"""field `Desiccant Heat Exchanger Object Type`
Args:
value (str): value for IDD Field `Desiccant Heat Exchanger Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `desiccant_heat_exchanger_object_type` or None if not set
"""
return self["Desiccant Heat Exchanger Object Type"]
@desiccant_heat_exchanger_object_type.setter
def desiccant_heat_exchanger_object_type(self, value=None):
"""Corresponds to IDD field `Desiccant Heat Exchanger Object Type`"""
self["Desiccant Heat Exchanger Object Type"] = value
@property
def desiccant_heat_exchanger_name(self):
"""field `Desiccant Heat Exchanger Name`
Args:
value (str): value for IDD Field `Desiccant Heat Exchanger Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `desiccant_heat_exchanger_name` or None if not set
"""
return self["Desiccant Heat Exchanger Name"]
@desiccant_heat_exchanger_name.setter
def desiccant_heat_exchanger_name(self, value=None):
"""Corresponds to IDD field `Desiccant Heat Exchanger Name`"""
self["Desiccant Heat Exchanger Name"] = value
@property
def sensor_node_name(self):
"""field `Sensor Node Name`
Args:
value (str): value for IDD Field `Sensor Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `sensor_node_name` or None if not set
"""
return self["Sensor Node Name"]
@sensor_node_name.setter
def sensor_node_name(self, value=None):
"""Corresponds to IDD field `Sensor Node Name`"""
self["Sensor Node Name"] = value
@property
def regeneration_air_fan_object_type(self):
"""field `Regeneration Air Fan Object Type`
Args:
value (str): value for IDD Field `Regeneration Air Fan Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_object_type` or None if not set
"""
return self["Regeneration Air Fan Object Type"]
@regeneration_air_fan_object_type.setter
def regeneration_air_fan_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Air Fan Object Type`"""
self["Regeneration Air Fan Object Type"] = value
@property
def regeneration_air_fan_name(self):
"""field `Regeneration Air Fan Name`
Args:
value (str): value for IDD Field `Regeneration Air Fan Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_name` or None if not set
"""
return self["Regeneration Air Fan Name"]
@regeneration_air_fan_name.setter
def regeneration_air_fan_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Fan Name`"""
self["Regeneration Air Fan Name"] = value
@property
def regeneration_air_fan_placement(self):
"""field `Regeneration Air Fan Placement`
| Default value: DrawThrough
Args:
value (str): value for IDD Field `Regeneration Air Fan Placement`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_placement` or None if not set
"""
return self["Regeneration Air Fan Placement"]
@regeneration_air_fan_placement.setter
def regeneration_air_fan_placement(self, value="DrawThrough"):
"""Corresponds to IDD field `Regeneration Air Fan Placement`"""
self["Regeneration Air Fan Placement"] = value
@property
def regeneration_air_heater_object_type(self):
"""field `Regeneration Air Heater Object Type`
| works with gas, electric, hot water and steam heating coils
Args:
value (str): value for IDD Field `Regeneration Air Heater Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_heater_object_type` or None if not set
"""
return self["Regeneration Air Heater Object Type"]
@regeneration_air_heater_object_type.setter
def regeneration_air_heater_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Air Heater Object Type`"""
self["Regeneration Air Heater Object Type"] = value
@property
def regeneration_air_heater_name(self):
"""field `Regeneration Air Heater Name`
Args:
value (str): value for IDD Field `Regeneration Air Heater Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_heater_name` or None if not set
"""
return self["Regeneration Air Heater Name"]
@regeneration_air_heater_name.setter
def regeneration_air_heater_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Heater Name`"""
self["Regeneration Air Heater Name"] = value
@property
def regeneration_inlet_air_setpoint_temperature(self):
"""field `Regeneration Inlet Air Setpoint Temperature`
| Units: C
Args:
value (float): value for IDD Field `Regeneration Inlet Air Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `regeneration_inlet_air_setpoint_temperature` or None if not set
"""
return self["Regeneration Inlet Air Setpoint Temperature"]
@regeneration_inlet_air_setpoint_temperature.setter
def regeneration_inlet_air_setpoint_temperature(self, value=None):
"""Corresponds to IDD field `Regeneration Inlet Air Setpoint
Temperature`"""
self["Regeneration Inlet Air Setpoint Temperature"] = value
@property
def companion_cooling_coil_object_type(self):
"""field `Companion Cooling Coil Object Type`
Args:
value (str): value for IDD Field `Companion Cooling Coil Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_object_type` or None if not set
"""
return self["Companion Cooling Coil Object Type"]
@companion_cooling_coil_object_type.setter
def companion_cooling_coil_object_type(self, value=None):
"""Corresponds to IDD field `Companion Cooling Coil Object Type`"""
self["Companion Cooling Coil Object Type"] = value
@property
def companion_cooling_coil_name(self):
"""field `Companion Cooling Coil Name`
Args:
value (str): value for IDD Field `Companion Cooling Coil Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_name` or None if not set
"""
return self["Companion Cooling Coil Name"]
@companion_cooling_coil_name.setter
def companion_cooling_coil_name(self, value=None):
"""Corresponds to IDD field `Companion Cooling Coil Name`"""
self["Companion Cooling Coil Name"] = value
@property
def companion_cooling_coil_upstream_of_dehumidifier_process_inlet(self):
"""field `Companion Cooling Coil Upstream of Dehumidifier Process
Inlet`
| Select Yes if the companion cooling coil is located directly upstream
| of the desiccant heat exchanger's process air inlet node.
| Default value: No
Args:
value (str): value for IDD Field `Companion Cooling Coil Upstream of Dehumidifier Process Inlet`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_upstream_of_dehumidifier_process_inlet` or None if not set
"""
return self[
"Companion Cooling Coil Upstream of Dehumidifier Process Inlet"]
@companion_cooling_coil_upstream_of_dehumidifier_process_inlet.setter
def companion_cooling_coil_upstream_of_dehumidifier_process_inlet(
self,
value="No"):
"""Corresponds to IDD field `Companion Cooling Coil Upstream of
Dehumidifier Process Inlet`"""
self[
"Companion Cooling Coil Upstream of Dehumidifier Process Inlet"] = value
@property
def companion_coil_regeneration_air_heating(self):
"""field `Companion Coil Regeneration Air Heating`
| Default value: No
Args:
value (str): value for IDD Field `Companion Coil Regeneration Air Heating`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_coil_regeneration_air_heating` or None if not set
"""
return self["Companion Coil Regeneration Air Heating"]
@companion_coil_regeneration_air_heating.setter
def companion_coil_regeneration_air_heating(self, value="No"):
"""Corresponds to IDD field `Companion Coil Regeneration Air
Heating`"""
self["Companion Coil Regeneration Air Heating"] = value
@property
def exhaust_fan_maximum_flow_rate(self):
"""field `Exhaust Fan Maximum Flow Rate`
| Units: m3/s
Args:
value (float): value for IDD Field `Exhaust Fan Maximum Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `exhaust_fan_maximum_flow_rate` or None if not set
"""
return self["Exhaust Fan Maximum Flow Rate"]
@exhaust_fan_maximum_flow_rate.setter
def exhaust_fan_maximum_flow_rate(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Maximum Flow Rate`"""
self["Exhaust Fan Maximum Flow Rate"] = value
@property
def exhaust_fan_maximum_power(self):
"""field `Exhaust Fan Maximum Power`
| Units: W
Args:
value (float): value for IDD Field `Exhaust Fan Maximum Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `exhaust_fan_maximum_power` or None if not set
"""
return self["Exhaust Fan Maximum Power"]
@exhaust_fan_maximum_power.setter
def exhaust_fan_maximum_power(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Maximum Power`"""
self["Exhaust Fan Maximum Power"] = value
@property
def exhaust_fan_power_curve_name(self):
"""field `Exhaust Fan Power Curve Name`
| Curve object type must be Curve:Quadratic or Curve:Cubic
Args:
value (str): value for IDD Field `Exhaust Fan Power Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `exhaust_fan_power_curve_name` or None if not set
"""
return self["Exhaust Fan Power Curve Name"]
@exhaust_fan_power_curve_name.setter
def exhaust_fan_power_curve_name(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Power Curve Name`"""
self["Exhaust Fan Power Curve Name"] = value
| 42.350949
| 135
| 0.513049
|
6b2a14f521612f3206484e1440e215292b1b9cf4
| 120
|
py
|
Python
|
abc/196/B.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | 2
|
2022-01-22T07:56:58.000Z
|
2022-01-24T00:29:37.000Z
|
abc/196/B.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
abc/196/B.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
import sys
input = sys.stdin.readline()
S = input
if S.count(".") == 0:
print(S)
else:
print(S.split(".")[0])
| 12
| 28
| 0.558333
|
444fb0a597e84bd2bad47b29c6fe93368fc93336
| 7,757
|
py
|
Python
|
novice_stakes/periodic/quadrature_rs.py
|
nedlrichards/novice_stakes
|
23038da6ee14ad8861938f7307e33b65e3835626
|
[
"MIT"
] | null | null | null |
novice_stakes/periodic/quadrature_rs.py
|
nedlrichards/novice_stakes
|
23038da6ee14ad8861938f7307e33b65e3835626
|
[
"MIT"
] | null | null | null |
novice_stakes/periodic/quadrature_rs.py
|
nedlrichards/novice_stakes
|
23038da6ee14ad8861938f7307e33b65e3835626
|
[
"MIT"
] | null | null | null |
"""
===================================================
Solve for with Fourier coefficents of scatter field
===================================================
Reflection coefficents for a general periodic surface.
"""
import numpy as np
from scipy.special import jv
from scipy.linalg import solve
import numexpr as ne
from math import pi
from . import Bragg
class QuadRs:
"""
Class for the computation of reflection coefficents using DFT
"""
def __init__(self, xaxis, zwave, zp_wave, c=1500, attn=0.):
"""Common parameters for cosine surface"""
self.xaxis = xaxis
self.zwave = zwave
self.zp_wave = zp_wave
self.DX = (xaxis[-1] - xaxis[0]) / (xaxis.size - 1)
self.L = self.DX * self.xaxis.size
self.bragg = Bragg(self.L, c, attn)
def ka(self, theta_inc, qs, facous):
"""
reflection coefficents are calculated from incident field
theta_inc: scalar grazing angle of incident plane wave
facous: frequency of acoustics, 1 / s
num_evanescent: number of evanescent plane waves to include in solution
rs: vector of reflection coefficents
"""
a0, _, b0, _ = self.bragg.bragg_angles(theta_inc, qs, facous)
kacous = self.bragg.kacous(facous)
# normal derivative of pressure at surface
projection = np.dot(np.array([a0, b0]),
np.array([-self.zp_wave, np.ones_like(self.xaxis)]))
dpinc = -2j * projection * np.exp(-1j * b0 * self.zwave)
# integrate surface integral for reflection coefficents
rs = self._r_from_dpdn(dpinc, theta_inc, qs, facous)
return rs
def rfm_1st(self, theta_inc, qs, facous):
"""
Uses the Rayleigh-Fourier method to solve for plane wave coefficents.
The HIE of the 1st kind is used to derive system of equations
Fourier coefficents are calculated using rectangular quadrature
theta_inc: scalar grazing angle of incident plane wave
facous: frequency of acoustics, 1 / s
num_evanescent: number of evanescent plane waves to include in solution
rs: vector of reflection coefficents
"""
_, _, b0, bn = self.bragg.bragg_angles(theta_inc, qs, facous)
# make sure there are enough orders in calculation to drop Nyquest
while self.xaxis.size // 2 <= np.max(np.abs(qs)):
raise(ValueError('Increase sampling of surface'))
# compute scattering orders, note this is not standard order!
ncompute = np.arange(self.xaxis.size) - (self.xaxis.size + 1) // 2
nstarti = int(np.where(ncompute == qs[0])[0])
nendi = int(np.where(ncompute == qs[-1])[0])
# compute Fourier series of incident pressure field
pinc = -np.exp(1j * b0 * self.zwave)
pinc_FT = np.fft.fftshift(np.fft.fft(pinc))
pinc_FT /= self.xaxis.size
# scattered pressure field with unit amplitude reflection coefficents
phase = qs[None, :] * self.bragg.Kper * self.xaxis[:, None] \
- bn * self.zwave[:, None]
psca = np.exp(1j * phase)
psca_FT = np.fft.fftshift(np.fft.fft(psca, axis=0), axes=0)
psca_FT /= self.xaxis.size
# remove high order evanescent waves before inverse computation
pinc_FT = pinc_FT[nstarti: nendi + 1]
psca_FT = psca_FT[nstarti: nendi + 1, :]
rs = solve(psca_FT, pinc_FT)
return rs
def psi_hie_1st(self, theta_inc, qs, facous):
"""
Uses a helmholtz integral equation (HIE) of the first kind to compute
normal derivative of the pressure field at the surface.
theta_inc: scalar grazing angle of incident plane wave
facous: frequency of acoustics, 1 / s
num_n: accelarated sum for HIE includes 2N+1 terms per matrix entry
phi: normal derivative of pressure field at the surface. Result is not
scaled by amplitude of surface normal vector.
"""
L = self.L
Kper = 2 * pi / L
a0, an, b0, bn = self.bragg.bragg_angles(theta_inc, qs, facous)
kacous = self.bragg.kacous(facous)
# compute incident pressure field
p_inc = np.exp(1j * b0 * self.zwave)
# compute position differnces
dx = self.xaxis[:, None] - self.xaxis[None, :]
dz = np.abs(self.zwave[:, None] - self.zwave[None, :])
# compute main diagonal contribution
# difference between G and Ginfty
gd = np.sum(1j / (2 * L * bn)) \
- np.sum(1 / (4 * pi * np.abs(qs[qs != 0])))
# Ginfty continous contribution
gip_cont = np.log(2 * pi) + np.log(1 + self.zp_wave ** 2) / 2
gip_cont /= -(2 * pi)
# Ginfty singularity contribution
gip_sing = (np.log(np.abs(dx[:, 0]) / L) \
+ np.log(1 - np.abs(dx[:, 0]) / L))
gip_sing /= (2 * pi)
gip_sing = np.sum(gip_sing[1: ]) * self.DX + L / pi
# add all contributions for main diagonal
gdiag = (gd + gip_cont) * self.DX + gip_sing
# limit the size of each matrix
nq = (qs.size - 1) // 10
ier = np.array_split(np.arange(qs.size - 1), nq, axis=-1)
# compute naive series term
xs = dx[:, :, None]
zs = dz[:, :, None]
# treat q zero term as a special case
kzns = bn[None, None, qs != 0]
kxns = an[None, None, qs != 0]
qnes = qs[None, None, qs != 0]
# use qx / abs(qx) as a sign function
#nes = """1j * exp(1j * (bx * zs + qx * Kper * xs)) / (2 * L * bx) \
#- exp(-qx * a0 * zs / abs(qx)) \
#* exp(qx * Kper * (1j * xs - qx * zs / abs(qx))) \
#/ (4 * pi * abs(qx))"""
nes = """1j * exp(1j * (bx * zs + qx * Kper * xs)) / (2 * L * bx)"""
# start Gf with zero-th term
Gf = 1j * np.exp(1j * b0 * dz) / (2 * L * b0)
# Add non zero terms
for ix in ier:
ax = kxns[:, :, ix]
bx = kzns[:, :, ix]
qx = qnes[:, :, ix]
temp = ne.evaluate(nes)
temp = np.sum(temp, axis=-1)
Gf += temp
return Gf
# compute positive asymptotic sum result
Gp_total = np.exp(-a0 * dz) * np.log(1 - np.exp(Kper * (1j * dx - dz)))
Gp_total /= -4 * pi
Gn_total = np.exp(a0 * dz) * np.log(1 - np.exp(-Kper * (1j * dx + dz)))
Gn_total /= -4 * pi
# sum results together to approximate final matrix
Gf += Gp_total + Gn_total
return Gf
Gf *= self.DX
# solve for psi_total
Gf[np.diag_indices_from(Gf)] = gdiag
psi_1st = solve(Gf, -p_inc)
# integrate surface integral for reflection coefficents
rs = self._r_from_dpdn(psi_1st, theta_inc, qs, facous)
return rs
def _r_from_dpdn(self, dpdn, theta_inc, qvec, facous):
"""
reflection coefficents calculated from p normal derivative at surface
"""
_, _, _, bn = self.bragg.bragg_angles(theta_inc, qvec, facous)
bn_ = bn[:, None]
q_ = qvec[:, None]
z_ = self.zwave[None, :]
x_ = self.xaxis[None, :]
K = self.bragg.Kper
# greens function at surface
#phase = (bn[:, None] * self.zwave[None, :]
#- qvec[:, None] * self.bragg.Kper * self.xaxis[None, :])
#gra = (1j / (2 * self.L)) * np.exp(-1j * phase) / bn[:, None]
#rs = -np.sum(dpdn * gra, axis=1) * self.DX
igrnd = (1j / (2 * self.L)) \
* ne.evaluate("exp(-1j * (bn_ * z_ - q_ * K * x_)) * dpdn / bn_")
# integrate surface integral for reflection coefficents
rs = -igrnd.sum(axis=1) * self.DX
return rs
| 37.839024
| 80
| 0.559108
|
b9c6e8f45946fcc2196c43d9275ba1b74c47fab1
| 12,449
|
py
|
Python
|
FEP/analysis/harmonic_rest_correction.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | 1
|
2020-04-16T05:10:33.000Z
|
2020-04-16T05:10:33.000Z
|
FEP/analysis/harmonic_rest_correction.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | 7
|
2020-03-16T16:14:28.000Z
|
2020-05-16T16:05:18.000Z
|
FEP/analysis/harmonic_rest_correction.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | null | null | null |
import os, sys, glob
import numpy as np
import xvg_tools
sys.path.append('../scripts')
from expanded_v3 import *
import argparse, textwrap
import pymbar # multistate Bennett acceptance ratio
from pymbar import timeseries # timeseries analysis
def calc_dG_rest(run_clone_datadir, mdp_file, outdir=None, maxlastgens=5, temperature=298.15, verbose=False):
"""
Returns the free energy of harmonic restraint in kT.
INPUT
run_clone_datadir - the pathname of the PROJ*/RUN*/CLONE* data in ~/server2/data.
Example: /home/server/server2/data/SVR2616698070/PROJ14727/RUN0/CLONE0
mdp_file - the pathname of the *.mdp file that contains the force constant equilibrium restraint distance
PARAMETERS
maxlastgens - the max last results* gens to use in the calculation. For instance,
if the trajectory goes out to results35, and maxlastgens = 3, then results33-35 will be used.
outdir - If specified, the pathname of a directory to write the energies, states, and distances.
If outdir=None (default), then no output files will be written.
temperature - the temperature in Kelvin
RETURNS
dG_rest - the free energy of harmonic restraint in kT.
dG_rest_sigma - the uncertainty (std dev) of dG_rest in kT
REQUIREMENTS
pymbar - see installation instructions at https://github.com/choderalab/pymbar
NOTE: as of 2020-505-14, `$ conda install -c omnia pymbar` fails, but `$ pip install pymbar` works
"""
if outdir != None:
# Make an output directory if it doesn't exist
if not os.path.exists(outdir):
os.mkdir(outdir)
# Find all the results* directories in the PROJ*/RUN*/CLONE*
#runclonefor each clone, grab the data in the dhdl.xvg and pullx.xvg in each gen
resultdirs = []
resultdirs1 = glob.glob( os.path.join(run_clone_datadir, 'results?') )
resultdirs1.sort()
resultdirs += resultdirs1
resultdirs2 = glob.glob( os.path.join(run_clone_datadir, 'results??') )
resultdirs2.sort()
resultdirs += resultdirs2
resultdirs3 = glob.glob( os.path.join(run_clone_datadir, 'results???') )
resultdirs3.sort()
resultdirs += resultdirs3
# test to see if the last results directory has any *.xvg files in it...
# if not, remove it -- it's empty!
if len(resultdirs) > 0:
if len(glob.glob(os.path.join(resultdirs[-1], '*.xvg'))) == 0:
resultdirs.pop()
# We will only analyze the last maxlastgens results
while len(resultdirs) > maxlastgens:
resultdirs.pop(0)
if verbose:
for resultdir in resultdirs:
print('\t',resultdir)
###### ENERGIES ########
result_dhdlfiles = [os.path.join(resultdir,'dhdl.xvg') for resultdir in resultdirs if os.path.exists(os.path.join(resultdir,'dhdl.xvg'))]
if len(result_dhdlfiles) > 0:
### Scrape and collect all the dhdl energies!
for resultdir in resultdirs[0:1]:
dhdl_xvgfile = os.path.join(resultdir, 'dhdl.xvg')
time_in_ps, states, energies = xvg_tools.get_dhdl(dhdl_xvgfile)
if verbose:
print(resultdir)
print('\ttime_in_ps', time_in_ps)
print('\tstates', states)
print('\tenergies.shape', energies.shape)
print('\tenergies', energies)
for resultdir in resultdirs[1:]:
dhdl_xvgfile = os.path.join(resultdir, 'dhdl.xvg')
more_time_in_ps, more_states, more_energies = xvg_tools.get_dhdl(dhdl_xvgfile)
time_in_ps = np.concatenate( (time_in_ps, more_time_in_ps[1:]), axis=0)
states = np.concatenate( (states, more_states[1:]), axis=0 )
energies = np.concatenate( (energies, more_energies[1:,:]), axis=0 )
if outdir != None:
states_outfile = os.path.join(outdir, 'states.npy')
np.save(states_outfile, states)
print('Wrote:', states_outfile)
energies_outfile = os.path.join(outdir, 'energies.npy')
np.save(energies_outfile, energies)
print('Wrote:', energies_outfile)
### Scrape and collect all the pullx distances!
for resultdir in resultdirs[0:1]:
pullx_xvgfile = os.path.join(resultdir, 'pullx.xvg')
times, distances = xvg_tools.get_distances(pullx_xvgfile)
if verbose:
print('distances', distances)
for resultdir in resultdirs[1:]:
pullx_xvgfile = os.path.join(resultdir, 'pullx.xvg')
more_times, more_distances = xvg_tools.get_distances(pullx_xvgfile)
times = np.concatenate( (times, more_times[1:]), axis=0)
distances = np.concatenate( (distances, more_distances[1:]), axis=0)
if outdir != None:
distances_outfile = os.path.join(outdir, 'distances.npy')
np.save(distances_outfile, distances)
print('Wrote:', distances_outfile)
if verbose:
print('Are all 40 intermediates sampled adequately?')
print('np.histogram(states, bins=np.arange(40))', np.histogram(states, bins=np.arange(40)))
##########################################
### Do the MBAR calculation here!
###########################################
# Get the temperature and equilibrium restraint distance r0 from the mdp_file
e = expanded_ensemble_mdpfile()
e.read_parms_from_mdpfile(mdp_file, VERBOSE=verbose)
kvalue = float(e.pull_coord1_k)
r0 = float(e.pull_coord1_init)
if verbose:
print('kvalue', kvalue, 'r0', r0)
dG_rest, sigma_dG_rest = estimate_free_energy(states, energies, distances, kvalue=kvalue, r0=r0, temperature=temperature, verbose=verbose)
return dG_rest, sigma_dG_rest
def estimate_free_energy(states, energies, distances,
kvalue=800.0, r0=0.4, temperature=300., verbose=False):
"""Use MBAR to estimate the free energy vs. lambda.
N is the number of samples
K is the number of thermodynamic states
INPUTS
states - state indices in a numpy array of shape (N,)
energies - a numpy array of shape (N, K) with the dhdl info
distances - restraint distances numpy array of shape (N,)
PARAMETERS
kvalue - the harmonic force constant in kJ/nm^2 (Default: 400.0)
r0 - the restraint distance umbrella center, in nm.
temperature - in K (Default: 300.0)
verbose - print verbose output
OUTPUT
"""
###########################
# Main
# Physical constants (in kJ)
kB = 1.381e-23 * 6.022e23 / 1000.0 # Boltzmann constant in kJ/mol/K
# In addition to the given K thermo ensembles with indices 0,..K-1,
# there is one more -- the *unbiased* ensemble with no harmonic restraint.
# -- let's make it state index K
K = energies.shape[1] + 1
unbiased_state_index = K - 1
# maximum number of snapshots/simulation:
N_max = energies.shape[0]
### TO DO in the future collect *all* run and clone energies and flatten
T_k = np.ones(K,float)*temperature # inital temperatures are all equal
beta = 1.0 / (kB * temperature) # inverse temperature of simulations (in 1/(kJ/mol))
if verbose:
print('beta', beta)
# Allocate storage for simulation data
N_k = np.zeros([K], np.int32) # N_k[k] is the number of snapshots from umbrella simulation k
# rvalues[k] is the spring center location (in nm) for umbrella simulation k
x_kn = np.zeros([K,N_max], np.float64) # x_kn[k,n] is the Val122_CA-TRP_CA distance (in nm) for snapshot n from umbrella simulation k
u_kn = np.zeros([K,N_max], np.float64) # u_kn[k,n] is the reduced potential energy without umbrella restraints of snapshot n of umbrella simulation k
g_k = np.zeros([K],np.float32);
### To do MBAR, we need to convert to data to u_kn format
if verbose:
print('np.argsort(states)', np.argsort(states))
Ind = np.argsort(states)
energies_sorted = energies[Ind,:]
states_sorted = states[Ind]
distances_sorted = distances[Ind]
for k in range(K-1):
# Count how many snapshots belong to each k
N_k[k] = np.where(states_sorted == k, 1, 0).sum()
# fill the energies
u_kn[k, :] = energies_sorted[:, k]
# for the last (unbiased) ensemble (with no samples), subtract the harmonic potential from the
# state index 0 (totally coupled) eneries
u_kn[K-1, :] = u_kn[0, :] - beta * (kvalue/2.0) * (distances_sorted - r0)**2
if verbose:
print('u_kn', u_kn)
print('N_k', N_k)
# Initialize MBAR.
if verbose:
print('Running MBAR...')
mbar = pymbar.MBAR(u_kn, N_k, verbose=verbose) #, maximum_iterations=100000, initialize='BAR')
# MBAR(u_kn, N_k, maximum_iterations=10000, relative_tolerance=1e-07, verbose=False, initial_f_k=None, solver_protocol=None, initialize='zeros', x_kindices=None, **kwargs))
# Set zero of u_kn -- this is arbitrary.
u_kn -= u_kn.min()
results = mbar.getFreeEnergyDifferences()
Deltaf_ij, dDeltaf_ij = results[0], results[1]
if verbose:
print('Deltaf_ij, dDeltaf_ij', Deltaf_ij, dDeltaf_ij)
df, sigma_df = np.zeros(K), np.zeros(K)
for i in range(K-1):
# print('Deltaf_%d,%d = '%(i,i+1), Deltaf_ij[i,i+1], '+/-', dDeltaf_ij[i,i+1])
df[i+1] = df[i] + Deltaf_ij[i,i+1]
sigma_df[i+1] = dDeltaf_ij[0,i+1]
dG_rest = df[0] - df[-1] # THIS should be the same as +f[-1] of the MBAR object!
sigma_dG_rest = sigma_df[-1]
if verbose:
print('Delta f (norest, lam=1 -> rest, lam=1) =', df[0] - df[-1])
print('dDelta f (norest, lam=1 -> rest, lam=1) =', sigma_df[-1])
return dG_rest, sigma_dG_rest
#############################################
# Main
if __name__ == '__main__':
import argparse, textwrap
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Computes an MBAR estimate of the free energy of *adding* the harmonic restraint, $\Delta G_rest
EXAMPLE
$ python harmonic_rest_correction.py ~/server2/projects/p14727 ~/server2/data/SVR2616698070 14727 0 0
''' ))
parser.add_argument('setupdir', type=str, help='The setup dir in which RUN0etup')
parser.add_argument('datadir', type=str, help='The direcory where projdata is saved')
parser.add_argument('projnum', type=int, help='The project number')
parser.add_argument('run', type=int, help='The run number')
parser.add_argument('clone', type=int, help='The clone number')
parser.add_argument('-o', '--outdir', required=False)
parser.add_argument('-n', '--maxlastgens', type=int, required=False, help='Limit data analyzed to the last n gens. (Default: 5)')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='If specified, print output verbosely')
args = parser.parse_args()
# process the input arguments
index_file = os.path.join(args.setupdir, 'RUN%d/index.ndx'%args.projnum)
mdp_file = os.path.join(args.setupdir, 'RUN%d/prod.mdp'%args.run)
run_clone_datadir = os.path.join(args.datadir, 'PROJ%d/RUN%d/CLONE%d'%(args.projnum, args.run, args.clone))
if args.verbose:
print('### INPUTS from argparse ###')
print('args.setupdir', args.setupdir)
print('args.datadir', args.datadir)
print('args.projnum', args.projnum)
print('args.run', args.run)
print('args.clone', args.clone)
print('args.outdir', args.outdir)
print('args.maxlastgens', args.maxlastgens)
print('args.verbose', args.verbose)
print('--------')
print('\tindex_file =', index_file)
print('\tmdp_file =', mdp_file)
print('\trun_clone_datadir =', run_clone_datadir)
if len(sys.argv) < 4:
print(usage)
sys.exit(1)
# Calculate the free energy of restraint
dG_rest, sigma_dG_rest = calc_dG_rest(run_clone_datadir, mdp_file, outdir=args.outdir, verbose=args.verbose, maxlastgens=args.maxlastgens)
print('# PROJ', args.projnum)
print('# RUN', args.run)
print('# CLONE', args.clone)
print('dG_rest = %6.6f +/- %6.6f kT.'%(dG_rest, sigma_dG_rest))
| 39.025078
| 176
| 0.634669
|
9442e23a6d4681ce4a1b2e57984b386392d9ff71
| 12,417
|
py
|
Python
|
intersight/model/niatelemetry_smart_license.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/niatelemetry_smart_license.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/niatelemetry_smart_license.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_complex_type import MoBaseComplexType
from intersight.model.niatelemetry_smart_license_all_of import NiatelemetrySmartLicenseAllOf
globals()['MoBaseComplexType'] = MoBaseComplexType
globals()['NiatelemetrySmartLicenseAllOf'] = NiatelemetrySmartLicenseAllOf
class NiatelemetrySmartLicense(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
},
('object_type',): {
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'active_mode': (str,), # noqa: E501
'auth_status': (str,), # noqa: E501
'license_udi': (str,), # noqa: E501
'smart_account': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'active_mode': 'ActiveMode', # noqa: E501
'auth_status': 'AuthStatus', # noqa: E501
'license_udi': 'LicenseUdi', # noqa: E501
'smart_account': 'SmartAccount', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetrySmartLicense - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "niatelemetry.SmartLicense", must be one of ["niatelemetry.SmartLicense", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "niatelemetry.SmartLicense", must be one of ["niatelemetry.SmartLicense", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
active_mode (str): Indicate the mode smart license is curerntly running.. [optional] # noqa: E501
auth_status (str): Authorization status of the smart license.. [optional] # noqa: E501
license_udi (str): License Udi of the smart license.. [optional] # noqa: E501
smart_account (str): Smart licensing account name in CSSM and is retrieved from CSSM after regsitration.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "niatelemetry.SmartLicense")
object_type = kwargs.get('object_type', "niatelemetry.SmartLicense")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseComplexType,
NiatelemetrySmartLicenseAllOf,
],
'oneOf': [
],
}
| 48.694118
| 1,678
| 0.634694
|
6d89ca4b77321eb0d29a7967449463f8e186d1fb
| 3,136
|
py
|
Python
|
Lib/test/test_msilib.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 18
|
2016-03-04T15:44:24.000Z
|
2021-12-31T11:06:25.000Z
|
Lib/test/test_msilib.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 49
|
2016-02-29T17:59:52.000Z
|
2019-05-05T04:59:26.000Z
|
Lib/test/test_msilib.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 5
|
2018-02-21T02:13:36.000Z
|
2019-10-07T02:01:32.000Z
|
""" Test suite for the code in msilib """
import os.path
import unittest
from test.support import TESTFN, import_module, unlink
msilib = import_module('msilib')
import msilib.schema
def init_database():
path = TESTFN + '.msi'
db = msilib.init_database(
path,
msilib.schema,
'Python Tests',
'product_code',
'1.0',
'PSF',
)
return db, path
class MsiDatabaseTestCase(unittest.TestCase):
def test_view_fetch_returns_none(self):
db, db_path = init_database()
properties = []
view = db.OpenView('SELECT Property, Value FROM Property')
view.Execute(None)
while True:
record = view.Fetch()
if record is None:
break
properties.append(record.GetString(1))
view.Close()
db.Close()
self.assertEqual(
properties,
[
'ProductName', 'ProductCode', 'ProductVersion',
'Manufacturer', 'ProductLanguage',
]
)
self.addCleanup(unlink, db_path)
def test_database_open_failed(self):
with self.assertRaises(msilib.MSIError) as cm:
msilib.OpenDatabase('non-existent.msi', msilib.MSIDBOPEN_READONLY)
self.assertEqual(str(cm.exception), 'open failed')
def test_database_create_failed(self):
db_path = os.path.join(TESTFN, 'test.msi')
with self.assertRaises(msilib.MSIError) as cm:
msilib.OpenDatabase(db_path, msilib.MSIDBOPEN_CREATE)
self.assertEqual(str(cm.exception), 'create failed')
def test_get_property_vt_empty(self):
db, db_path = init_database()
summary = db.GetSummaryInformation(0)
self.assertIsNone(summary.GetProperty(msilib.PID_SECURITY))
db.Close()
self.addCleanup(unlink, db_path)
class Test_make_id(unittest.TestCase):
#http://msdn.microsoft.com/en-us/library/aa369212(v=vs.85).aspx
"""The Identifier data type is a text string. Identifiers may contain the
ASCII characters A-Z (a-z), digits, underscores (_), or periods (.).
However, every identifier must begin with either a letter or an
underscore.
"""
def test_is_no_change_required(self):
self.assertEqual(
msilib.make_id("short"), "short")
self.assertEqual(
msilib.make_id("nochangerequired"), "nochangerequired")
self.assertEqual(
msilib.make_id("one.dot"), "one.dot")
self.assertEqual(
msilib.make_id("_"), "_")
self.assertEqual(
msilib.make_id("a"), "a")
#self.assertEqual(
# msilib.make_id(""), "")
def test_invalid_first_char(self):
self.assertEqual(
msilib.make_id("9.short"), "_9.short")
self.assertEqual(
msilib.make_id(".short"), "_.short")
def test_invalid_any_char(self):
self.assertEqual(
msilib.make_id(".s\x82ort"), "_.s_ort")
self.assertEqual (
msilib.make_id(".s\x82o?*+rt"), "_.s_o___rt")
if __name__ == '__main__':
unittest.main()
| 31.049505
| 78
| 0.608099
|
abffae8bce8d84d4d75263261a8083dadb160cc1
| 11,739
|
py
|
Python
|
dmipy/tissue_response/three_tissue_response.py
|
weningerleon/dmipy
|
6eeb4cf803722ba8c7910c67974bff6a6a01a14e
|
[
"MIT"
] | null | null | null |
dmipy/tissue_response/three_tissue_response.py
|
weningerleon/dmipy
|
6eeb4cf803722ba8c7910c67974bff6a6a01a14e
|
[
"MIT"
] | null | null | null |
dmipy/tissue_response/three_tissue_response.py
|
weningerleon/dmipy
|
6eeb4cf803722ba8c7910c67974bff6a6a01a14e
|
[
"MIT"
] | null | null | null |
import warnings
from scipy.optimize import brute
from scipy.stats import pearsonr
import numpy as np
from dipy.reconst import dti
from ..core.acquisition_scheme import gtab_dmipy2dipy
from dipy.segment.mask import median_otsu
from . import white_matter_response
from ..signal_models.tissue_response_models import (
estimate_TR1_isotropic_tissue_response_model)
_white_matter_response_algorithms = {
'tournier07': white_matter_response.white_matter_response_tournier07,
'tournier13': white_matter_response.white_matter_response_tournier13
}
def three_tissue_response_dhollander16(
acquisition_scheme, data, wm_algorithm='tournier13',
wm_N_candidate_voxels=300, gm_perc=0.02, csf_perc=0.1, **kwargs):
"""
Heuristic approach to estimating the white matter, grey matter and CSF
tissue response kernels [1]_, to be used in e.g. Multi-Tissue CSD [2]_. The
method makes used of so-called 'optimal' thresholds between grey-scale
images and segmentations [3]_, with iteratively refined binary thresholds
based on an ad-hoc 'signal decay metric', to finally find candidate voxels
from which to estimate the three tissue response kernels.
Parameters
----------
acquisition_scheme : DmipyAcquisitionScheme instance,
An acquisition scheme that has been instantiated using dMipy.
data : NDarray,
Measured diffusion signal array.
wm_algorithm : string,
selection of white matter response estimation algorithm:
- 'tournier07': classic FA-based estimation,
- 'tournier13': iterative peak-ratio based estimation.
wm_N_candidate_voxels : positive integer,
number of voxels to be included in the white matter response function.
Default: 300 as done in [4]_.
gm_perc : positive float between [0, 1],
fraction of candidate voxels to use in grey matter response function.
Default: 0.02 as done in [1]_.
csf_perc : positive float between [0, 1],
fraction of candidate voxels to use in CSF response function.
Default: 0.1 as done in [1]_.
kwargs : optional keyword arguments for WM algorithm,
see white matter algorithms themselves for possible arguments.
Returns
-------
S0_wm : float,
white matter S0 response value.
TR2_wm_model : Dmipy TR2AnisotropicTissueResponseModel,
ModelFree representation of white matter response.
S0_gm : float,
grey matter S0 response value.
TR1_gm_model : Dmipy TR1IsotropicTissueResponseModel,
ModelFree representation of grey matter response.
S0_csf : float,
csf S0 response value.
TR1_csf_model : Dmipy TR1IsotropicTissueResponseModel,
ModelFree representation of csf response.
three_tissue_selection: array of size (x, y, z, 3),
RGB mask of selected voxels used for white/grey matter and CSD.
References
----------
.. [1] Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue
response function estimation from single-shell or multi-shell diffusion
MR data without a co-registered T1 image. ISMRM Workshop on Breaking
the Barriers of Diffusion MRI, 2016, 5
.. [2] Tournier, J-Donald, Fernando Calamante, and Alan Connelly.
"Determination of the appropriate b value and number of gradient
directions for high-angular-resolution diffusion-weighted imaging."
NMR in Biomedicine 26.12 (2013): 1775-1786.
.. [3] Ridgway, Gerard R., et al. "Issues with threshold masking in
voxel-based morphometry of atrophied brains." Neuroimage 44.1 (2009):
99-111.
.. [4] Tournier, J-Donald, Fernando Calamante, and Alan Connelly.
"Determination of the appropriate b value and number of gradient
directions for high-angular-resolution diffusion-weighted imaging."
NMR in Biomedicine 26.12 (2013): 1775-1786.
"""
# Create Signal Decay Metric (SDM)
mean_b0 = np.mean(data[..., acquisition_scheme.b0_mask], axis=-1)
SDM = signal_decay_metric(acquisition_scheme, data)
# Make Mask
b0_mask, mask = median_otsu(data, 2, 1)
gtab = gtab_dmipy2dipy(acquisition_scheme)
tenmod = dti.TensorModel(gtab)
tenfit = tenmod.fit(b0_mask)
fa = tenfit.fa
mask_WM = fa > 0.2
# Separate grey and CSF based on optimal threshold
# take FA < 0.2 but inside brain mask.
opt = optimal_threshold(SDM[np.all([fa < 0.2, mean_b0 > 0], axis=0)])
mask_CSF = np.all([mean_b0 > 0, mask, fa < 0.2, SDM > opt], axis=0)
mask_GM = np.all([mean_b0 > 0, mask, fa < 0.2, SDM < opt], axis=0)
# Refine Mask, high WM SDM outliers above Q 3 +(Q 3 -Q 1 ) are removed.
median_WM = np.median(SDM[mask_WM])
Q1 = (SDM[mask_WM].min() + median_WM) / 2.0
Q3 = (SDM[mask_WM].max() + median_WM) / 2.0
SDM_upper_threshold = Q3 + (Q3 - Q1)
mask_WM_refine = np.all([mask_WM, SDM < SDM_upper_threshold], axis=0)
WM_outlier = np.all([mask_WM, SDM > SDM_upper_threshold], axis=0)
# For both the voxels below and above the GM SDM median, optimal thresholds
# [4] are computed and both parts closer to the initial GM median are
# retained.
SDM_GM = SDM[mask_GM]
median_GM = np.median(SDM_GM)
optimal_threshold_upper = optimal_threshold(SDM_GM[SDM_GM > median_GM])
optimal_threshold_lower = optimal_threshold(SDM_GM[SDM_GM < median_GM])
mask_GM_refine = np.all(
[mask_GM,
SDM > optimal_threshold_lower,
SDM < optimal_threshold_upper], axis=0)
# The high SDM outliers that were removed from the WM are reconsidered for
# the CSF if they have higher SDM than the current minimal CSF SDM.
SDM_CSF_min = SDM[mask_CSF].min()
WM_outlier_to_include = np.all([WM_outlier, SDM > SDM_CSF_min], axis=0)
mask_CSF_updated = np.any([mask_CSF, WM_outlier_to_include], axis=0)
# An optimal threshold [4] is computed for the resulting CSF and only the
# higher SDM valued voxels are retained.
optimal_threshold_CSF = optimal_threshold(SDM[mask_CSF_updated])
mask_CSF_refine = np.all(
[mask_CSF_updated, SDM > optimal_threshold_CSF], axis=0)
data_wm = data[mask_WM_refine]
# for WM we use WM response selection algorithm
response_wm_algorithm = _white_matter_response_algorithms[wm_algorithm]
S0_wm, TR2_wm_model, indices_wm_selected = response_wm_algorithm(
acquisition_scheme, data_wm, N_candidate_voxels=wm_N_candidate_voxels,
**kwargs)
# for GM, the voxels closest 2% to GM SDM median are selected.
median_GM = np.median(SDM[mask_GM_refine])
N_threshold = int(np.sum(mask_GM_refine) * gm_perc)
indices_gm_selected = np.argsort(
np.abs(SDM[mask_GM_refine] - median_GM))[:N_threshold]
S0_gm, TR1_gm_model = estimate_TR1_isotropic_tissue_response_model(
acquisition_scheme, data[mask_GM_refine][indices_gm_selected])
# for GM, the 10% highest SDM valued voxels are selected.
N_threshold = int(np.sum(mask_CSF_refine) * csf_perc)
indices_csf_selected = np.argsort(SDM[mask_CSF_refine])[::-1][:N_threshold]
S0_csf, TR1_csf_model = estimate_TR1_isotropic_tissue_response_model(
acquisition_scheme, data[mask_CSF_refine][indices_csf_selected])
# generate selected WM/GM/CSF response function voxels masks.
pos_WM_refine = np.c_[np.where(mask_WM_refine)]
mask_WM_selected = np.zeros_like(mask_WM_refine)
pos_WM_selected = pos_WM_refine[indices_wm_selected]
for pos in pos_WM_selected:
mask_WM_selected[pos[0], pos[1], pos[2]] = 1
pos_GM_refine = np.c_[np.where(mask_GM_refine)]
mask_GM_selected = np.zeros_like(mask_GM_refine)
pos_GM_selected = pos_GM_refine[indices_gm_selected]
for pos in pos_GM_selected:
mask_GM_selected[pos[0], pos[1], pos[2]] = 1
pos_CSF_refine = np.c_[np.where(mask_CSF_refine)]
mask_CSF_selected = np.zeros_like(mask_CSF_refine)
pos_CSF_selected = pos_CSF_refine[indices_csf_selected]
for pos in pos_CSF_selected:
mask_CSF_selected[pos[0], pos[1], pos[2]] = 1
three_tissue_selection = np.array(
[mask_WM_selected, mask_GM_selected, mask_CSF_selected], dtype=float)
three_tissue_selection = np.transpose(three_tissue_selection, (1, 2, 3, 0))
return ([S0_wm, S0_gm, S0_csf],
[TR2_wm_model, TR1_gm_model, TR1_csf_model],
three_tissue_selection)
def signal_decay_metric(acquisition_scheme, data):
"""
Estimation of the Signal Decay Metric (SDM) for the three-tissue tissue
response kernel estimation [1]_. The metric is a simple division of the S0
signal intensity by the b>0 shell's signal intensity - of the mean of their
intensities if there are multiple shells.
Parameters
----------
acquisition_scheme : DmipyAcquisitionScheme instance,
An acquisition scheme that has been instantiated using dMipy.
data : NDarray,
Measured diffusion signal array.
Returns
-------
SDM : array of size data,
Estimated Signal Decay Metric (SDK)
References
----------
.. [1] Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue
response function estimation from single-shell or multi-shell diffusion
MR data without a co-registered T1 image. ISMRM Workshop on Breaking
the Barriers of Diffusion MRI, 2016, 5
"""
mean_b0 = np.mean(data[..., acquisition_scheme.b0_mask], axis=-1)
data_shape = data.shape[:-1]
mean_dwi_shells = np.zeros(
np.r_[data_shape, len(acquisition_scheme.unique_dwi_indices)])
for i, index in enumerate(acquisition_scheme.unique_dwi_indices):
shell_mask = acquisition_scheme.shell_indices == index
mean_dwi_shells[..., i] = np.mean(data[..., shell_mask], axis=-1)
SDM = np.zeros(data_shape)
mask = np.min(np.concatenate((np.expand_dims(mean_b0, axis=-1),
mean_dwi_shells), axis=-1), axis=-1)>0
ratio = np.log(mean_b0[mask, None] / mean_dwi_shells[mask])
SDM[mask] = np.mean(ratio, axis=-1)
if np.max(SDM)>10 or np.min(SDM)<0:
warnings.warn(("The signal decay metric reached unrealistically " +
"high or negative values and was clipped to [0, 10]"),
RuntimeWarning)
SDM = np.clip(SDM, 0, 10)
return SDM
def optimal_threshold(data):
"""Optimal image threshold based on pearson correlation [1]_. The idea is
that an 'optimal' mask of some arbitrary image data should be found by
thresholding at a value that maximizes the pearson correlation between the
original image and the mask, i.e:
T* = argmax_T (\rho(data, data>T))
= argmin_T -(\rho(data, data>T))
This function estimates T* based on the second equation on arbitrary input
arrays.
Parameters
----------
scalar_data: 1D array,
scalar array to estimate an 'optimal' threshold on.
Returns
-------
optimal_threshold: float,
optimal threshold value that maximizes correlation between the original
and masked data.
References
----------
.. [1] Ridgway, Gerard R., et al. "Issues with threshold masking in
voxel-based morphometry of atrophied brains." Neuroimage 44.1 (2009):
99-111.
"""
min_bound = data.min()
max_bound = data.max()
eps = 1e-10
optimal_threshold = brute(
func=_cost_function,
Ns=100,
args=(data,),
ranges=([min_bound + eps, max_bound - eps],))[0]
return optimal_threshold
def _cost_function(threshold, image):
"The cost function used by the optimal_threshold function."
rho = -pearsonr(image, image > threshold)[0]
return rho
| 42.226619
| 79
| 0.693841
|
e79b1211632cd8e094970b8d8075096aa182b77a
| 164
|
py
|
Python
|
dict.py
|
NickYxy/JustPractice
|
d8b6eacf12da2bef2b3ff4e1f8bc8b53c00bb619
|
[
"MIT"
] | null | null | null |
dict.py
|
NickYxy/JustPractice
|
d8b6eacf12da2bef2b3ff4e1f8bc8b53c00bb619
|
[
"MIT"
] | null | null | null |
dict.py
|
NickYxy/JustPractice
|
d8b6eacf12da2bef2b3ff4e1f8bc8b53c00bb619
|
[
"MIT"
] | null | null | null |
__author__ = 'nickyuan'
dict = [{'a':'a','b':'b'}]
print(dict[0]['a'])
c = [('course_pic', '<FileStorage: 4e6fefdc79b4ee5186fd5534366d7a0b_hd.jpg')]
print(c[0])
| 18.222222
| 77
| 0.634146
|
d1cbcd2b36756164b3430cfa02c202c71d6f7d49
| 469
|
py
|
Python
|
crop.py
|
cyitianyou/KingOfGlory-Scripts-IOS
|
91d7a7f75e53505515fa24e0bc3fe21df1443ed9
|
[
"Apache-2.0"
] | 1
|
2018-09-18T05:11:51.000Z
|
2018-09-18T05:11:51.000Z
|
crop.py
|
cyitianyou/KingOfGlory-Scripts-IOS
|
91d7a7f75e53505515fa24e0bc3fe21df1443ed9
|
[
"Apache-2.0"
] | null | null | null |
crop.py
|
cyitianyou/KingOfGlory-Scripts-IOS
|
91d7a7f75e53505515fa24e0bc3fe21df1443ed9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from PIL import Image
if __name__ == '__main__':
name = '6'
img = Image.open('images/bg/{}.png'.format(name))
# region = (920, 80, 1112, 150)
# region = (1180, 680, 1310, 720)
# region = (1220, 697, 1270, 745)
# region = (575, 30, 758, 60)
# region = (1020, 35, 1200, 90)
region = (1134, 412, 1273, 430)
# 裁切图片
cropImg = img.crop(region)
# 保存裁切后的图片
cropImg.save('images/0{}.png'.format(name))
| 23.45
| 53
| 0.552239
|
1af6e8c030cfbd929c1262ef410abec286f3a029
| 7,365
|
py
|
Python
|
kitsune/users/tests/test_forms.py
|
turtleloveshoes/kitsune
|
7e5524644eab7f608a44c44c63d242cda3aef7f0
|
[
"BSD-3-Clause"
] | 1
|
2015-03-09T05:48:58.000Z
|
2015-03-09T05:48:58.000Z
|
kitsune/users/tests/test_forms.py
|
rlr/kitsune
|
591e996a3a115a7b235cbca19f5dec58fc9b6249
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T20:33:35.000Z
|
2021-04-30T20:33:35.000Z
|
kitsune/users/tests/test_forms.py
|
rlr/kitsune
|
591e996a3a115a7b235cbca19f5dec58fc9b6249
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from django.forms import ValidationError
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.users.forms import (
AuthenticationForm, ProfileForm, RegisterForm, SetPasswordForm,
ForgotUsernameForm, username_allowed)
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import TestCaseBase, user
from kitsune.users.validators import TwitterValidator
class AuthenticationFormTests(TestCaseBase):
"""AuthenticationForm tests."""
def setUp(self):
# create active and inactive users
self.active_user = user(save=True,
username='activeuser',
is_active=True)
self.inactive_user = user(save=True,
username='inactiveuser',
is_active=False)
def test_only_active(self):
# Verify with active user
form = AuthenticationForm(data={'username': self.active_user.username,
'password': 'testpass'})
assert form.is_valid()
# Verify with inactive user
form = AuthenticationForm(data={
'username': self.inactive_user.username,
'password': 'testpass'})
assert not form.is_valid()
def test_allow_inactive(self):
# Verify with active user
form = AuthenticationForm(only_active=False,
data={'username': self.active_user.username,
'password': 'testpass'})
assert form.is_valid()
# Verify with inactive user
form = AuthenticationForm(only_active=False, data={
'username': self.inactive_user.username,
'password': 'testpass'})
assert form.is_valid()
def test_at_in_username(self):
u = user(username='test@example.com', save=True)
form = AuthenticationForm(data={'username': u.username,
'password': 'testpass'})
assert form.is_valid()
FACEBOOK_URLS = (
('https://facebook.com/valid', True),
('http://www.facebook.com/valid', True),
('htt://facebook.com/invalid', False),
('http://notfacebook.com/invalid', False),
('http://facebook.com/', False),
)
class ProfileFormTestCase(TestCaseBase):
form = ProfileForm()
def setUp(self):
self.form.cleaned_data = {}
def test_facebook_pattern_attr(self):
"""Facebook field has the correct pattern attribute."""
fragment = pq(self.form.as_ul())
facebook = fragment('#id_facebook')[0]
assert 'pattern' in facebook.attrib
pattern = re.compile(facebook.attrib['pattern'])
for url, match in FACEBOOK_URLS:
eq_(bool(pattern.match(url)), match)
def test_clean_facebook(self):
clean = self.form.clean_facebook
for url, match in FACEBOOK_URLS:
self.form.cleaned_data['facebook'] = url
if match:
clean() # Should not raise.
else:
self.assertRaises(ValidationError, clean)
class TwitterValidatorTestCase(TestCase):
def setUp(self):
def test_valid(self):
TwitterValidator('a_valid_name')
def test_has_number(self):
TwitterValidator('valid123')
def test_has_letter_number_underscore(self):
TwitterValidator('valid_name_123')
def test_has_slash(self):
# Twitter usernames can not have slash "/"
self.assertRaises(ValidationError, lambda: TwitterValidator('x/'))
def test_has_at_sign(self):
# Dont Accept Twitter Username with "@"
self.assertRaises(ValidationError, lambda: TwitterValidator('@x'))
class RegisterFormTests(TestCaseBase):
"""RegisterForm tests."""
def test_common_password(self):
form = RegisterForm({'username': 'newuser',
'password': 'password',
'password2': 'password',
'email': 'newuser@example.com'})
assert not form.is_valid()
def test_strong_password(self):
form = RegisterForm({'username': 'newuser',
'password': 'fksjvaj1',
'password2': 'fksjvaj1',
'email': 'newuser@example.com'})
assert form.is_valid()
def test_bad_username(self):
# Simple match.
form = RegisterForm({'username': 'ass',
'password': 'adssadfsadf1',
'password2': 'adssadfsadf1',
'email': 'newuser@example.com'})
assert not form.is_valid()
# Simple obfuscation.
form = RegisterForm({'username': 'a.s.s',
'password': 'adssadfsadf1',
'password2': 'adssadfsadf1',
'email': 'newuser@example.com'})
assert not form.is_valid()
# Partial match.
form = RegisterForm({'username': 'ass.assassin',
'password': 'adssadfsadf1',
'password2': 'adssadfsadf1',
'email': 'newuser@example.com'})
assert not form.is_valid()
# Plus sign
form = RegisterForm({'username': 'ass+assin',
'password': 'adssadfsadf1',
'password2': 'adssadfsadf1',
'email': 'newuser@example.com'})
assert not form.is_valid()
# No match.
form = RegisterForm({'username': 'assassin',
'password': 'adssadfsadf1',
'password2': 'adssadfsadf1',
'email': 'newuser@example.com'})
assert form.is_valid()
class SetPasswordFormTests(TestCaseBase):
"""SetPasswordForm tests."""
def test_common_password(self):
form = SetPasswordForm(None, data={'new_password1': 'password',
'new_password2': 'password'})
assert not form.is_valid()
class PasswordChangeFormFormTests(TestCaseBase):
"""PasswordChangeForm tests."""
def test_common_password(self):
u = user(save=True)
form = SetPasswordForm(u, data={'new_password1': 'password',
'new_password2': 'password',
'old_password': 'testpass'})
assert not form.is_valid()
class ForgotUsernameFormTests(TestCaseBase):
"""ForgotUsernameForm tests."""
def test_email_doesnt_exist(self):
"""If no account with email exists, form isn't valid."""
form = ForgotUsernameForm({'email': 'a@b.com'})
assert not form.is_valid()
def test_valid_email(self):
""""If an account with email exists, form is valid."""
u = user(save=True, email='a@b.com', is_active=True)
form = ForgotUsernameForm({'email': u.email})
assert form.is_valid()
class Testusername_allowed(TestCase):
def test_good_names(self):
data = [
('ana', True),
('rlr', True),
('anal', False),
]
for name, expected in data:
eq_(username_allowed(name), expected)
| 34.740566
| 78
| 0.559403
|
87303ff5e203384ab11184b2441011b85f26cc4b
| 2,352
|
py
|
Python
|
Regression/AllAtOnce/power.py
|
vnc-edu/machine-learning
|
5a212ae303c5db319c0474077a13eb663aff2c54
|
[
"Apache-2.0"
] | null | null | null |
Regression/AllAtOnce/power.py
|
vnc-edu/machine-learning
|
5a212ae303c5db319c0474077a13eb663aff2c54
|
[
"Apache-2.0"
] | null | null | null |
Regression/AllAtOnce/power.py
|
vnc-edu/machine-learning
|
5a212ae303c5db319c0474077a13eb663aff2c54
|
[
"Apache-2.0"
] | null | null | null |
# import libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# Importing the dataset
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
dataset = pd.read_csv("Data.csv")
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# Splitting the dataset into the Training set and Test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# Training the Simple Linear Regression model on the Training set
linear_regressor = LinearRegression()
linear_regressor.fit(x_train, y_train)
# Predicting the Test set results
y_pred = linear_regressor.predict(x_test)
lr = r2_score(y_test, y_pred)
print('R2 for linear regression')
print(lr)
# Training the Polynomial Regression model on the whole dataset
poly_reg = PolynomialFeatures(degree=4)
x_poly = poly_reg.fit_transform(x_train)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(x_poly,y_train)
x_test_poly = poly_reg.transform(x_test)
y_test_pred_poly = lin_reg_2.predict(x_test_poly)
pr = r2_score(y_test, y_test_pred_poly)
print('R2 for polynomial regression')
print(pr)
# support vector
y1 = y.reshape(len(y), 1)
x_train1, x_test1, y_train1, y_test1 = train_test_split(x, y1, test_size=0.2, random_state=0)
sc_x = StandardScaler()
xt = sc_x.fit_transform(x_train1)
sc_y = StandardScaler()
yt = sc_y.fit_transform(y_train1)
svr_regressor = SVR(kernel='rbf')
svr_regressor.fit(xt, yt)
x1t = sc_x.transform(x_test1)
y1t = svr_regressor.predict(x1t)
y_pred_svr = sc_y.inverse_transform(y1t)
svr_r2 = r2_score(y_test1, y_pred_svr)
print('R2 for support vector')
print(svr_r2)
#decision tree
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(x_train, y_train)
y_pred_dcn = regressor.predict(x_test)
dcn_r2 = r2_score(y_test, y_pred_dcn)
print('R2 for decision tree')
print(dcn_r2)
# random forest
regressor = RandomForestRegressor(n_estimators=10, random_state=0)
regressor.fit(x_train, y_train)
y_pred_rndf = regressor.predict(x_test)
rndf_r2 = r2_score(y_test, y_pred_rndf)
print('R2 for random forest')
print(rndf_r2)
| 27.670588
| 93
| 0.796344
|
f214fee22e2cc79f15b3993afe7f035646af25fe
| 7,926
|
py
|
Python
|
google/cloud/compute_v1/services/autoscalers/transports/base.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/autoscalers/transports/base.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/autoscalers/transports/base.py
|
georgiyekkert/python-compute
|
d128efbb3bf10af9b41e55b20aaa8080b3221e77
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AutoscalersTransport(abc.ABC):
"""Abstract transport class for Autoscalers."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
self.update: gapic_v1.method.wrap_method(
self.update, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListAutoscalersRequest],
Union[
compute.AutoscalerAggregatedList,
Awaitable[compute.AutoscalerAggregatedList],
],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetAutoscalerRequest],
Union[compute.Autoscaler, Awaitable[compute.Autoscaler]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListAutoscalersRequest],
Union[compute.AutoscalerList, Awaitable[compute.AutoscalerList]],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def update(
self,
) -> Callable[
[compute.UpdateAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("AutoscalersTransport",)
| 35.226667
| 101
| 0.643452
|
d845b03f152430d6be5980ff8186838a3b203abd
| 330
|
py
|
Python
|
ridecost_cal.py
|
Tusar6701/python1.0
|
22dbdba3693597e3cc61a585a348702461567ee1
|
[
"Apache-2.0"
] | null | null | null |
ridecost_cal.py
|
Tusar6701/python1.0
|
22dbdba3693597e3cc61a585a348702461567ee1
|
[
"Apache-2.0"
] | null | null | null |
ridecost_cal.py
|
Tusar6701/python1.0
|
22dbdba3693597e3cc61a585a348702461567ee1
|
[
"Apache-2.0"
] | null | null | null |
dist = input("Enter the distance travelled by youto and fro in kms : ")
f_avg = input("Enter the fuel average in your area [km/litre]: ")
cost_of_diesel = input("Enter the cost of diesel [int INR]: ")
f_cons = float(dist) / float(f_avg)
cost = float(f_cons) * float(cost_of_diesel)
print("The cost of travel is : ", cost)
| 41.25
| 72
| 0.690909
|
f2ed0f0a8530c78d7f19a5f9b6a1c8a0f0db7a59
| 1,799
|
py
|
Python
|
cnnlstm.py
|
chaudharyhimanshu/Violence-Detection
|
728931fff012463f71b3011c697741a154aeed42
|
[
"MIT"
] | null | null | null |
cnnlstm.py
|
chaudharyhimanshu/Violence-Detection
|
728931fff012463f71b3011c697741a154aeed42
|
[
"MIT"
] | null | null | null |
cnnlstm.py
|
chaudharyhimanshu/Violence-Detection
|
728931fff012463f71b3011c697741a154aeed42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 16:03:26 2019
@author: Himanshu
"""
from keras.models import Sequential
from keras.layers import Flatten, Conv3D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers import MaxPooling3D
from keras.layers import Dense
from keras.layers import Dropout
# Initialising
def cnnlstm(frames, channels, pixels_x, pixels_y):
classifier = Sequential()
classifier.add(ConvLSTM2D(filters=60, kernel_size=(3, 3),
input_shape=(frames, channels, pixels_x, pixels_y),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(MaxPooling3D(pool_size=(1, 3, 3), padding='same', data_format='channels_first'))
classifier.add(ConvLSTM2D(filters=128, kernel_size=(3, 3),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(MaxPooling3D(pool_size=(1, 3, 3), padding='same', data_format='channels_first'))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
'''
classifier.add(BatchNormalization())
classifier.add(Conv3D(filters=1, kernel_size=(3, 3, 3),
activation='sigmoid',
padding='same', data_format = 'channels_first'))'''
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
| 33.314815
| 99
| 0.675931
|
a53d310401f0fd065c0f5de63a7417dfae0364e7
| 1,099
|
py
|
Python
|
src/modules/IcmpFilter.py
|
olivervbk/inactmon
|
5ef2c0795f2e146db2311e6ec3f8aed231bfb6cc
|
[
"RSA-MD"
] | null | null | null |
src/modules/IcmpFilter.py
|
olivervbk/inactmon
|
5ef2c0795f2e146db2311e6ec3f8aed231bfb6cc
|
[
"RSA-MD"
] | null | null | null |
src/modules/IcmpFilter.py
|
olivervbk/inactmon
|
5ef2c0795f2e146db2311e6ec3f8aed231bfb6cc
|
[
"RSA-MD"
] | null | null | null |
from impacket import ImpactDecoder
class IcmpFilter():
attributes = None
myIpAddresses = None
logger = None
def __init__(self, attributes, logger, myIpAddresses):
self.attributes = attributes
self.logger = logger
self.myIpAddresses = myIpAddresses
def rule(self):
rule = "icmp[icmptype] == icmp-echo"
#rule = "icmp-echo"
#rule = "icmp"
return rule
def run(self, header, payload):
rip = ImpactDecoder.EthDecoder().decode(payload)
#print rip
proto = -1
try:
proto = rip.child().get_ip_p()
except AttributeError:
pass
# NOT ICMP
if proto != 1:
self.logger.warn('got packet that was not ICMP?!')
return None
icmpType = rip.child().child().get_icmp_type()
if(icmpType == rip.child().child().ICMP_ECHOREPLY):
self.logger.warn('got icmp ECHOREPLY?!')
return None
#if(icmpType == rip.child().child().ICMP_ECHO):
# status = 'echo'
dstAddr = rip.child().get_ip_dst()
srcAddr = rip.child().get_ip_src()
message = 'icmp echo request from '+srcAddr+' to '+dstAddr
self.logger.debug("msg rcvd: "+str(message))
return message
| 21.54902
| 60
| 0.675159
|
6b5046e42a27c05fffcf9cc0239f069983b935ec
| 2,556
|
py
|
Python
|
exercise/tests/test_email_thread.py
|
jugraj/phishtray
|
1fa3cec7c34bcaead8f524fc9cbb2b9bf737eff6
|
[
"MIT"
] | null | null | null |
exercise/tests/test_email_thread.py
|
jugraj/phishtray
|
1fa3cec7c34bcaead8f524fc9cbb2b9bf737eff6
|
[
"MIT"
] | null | null | null |
exercise/tests/test_email_thread.py
|
jugraj/phishtray
|
1fa3cec7c34bcaead8f524fc9cbb2b9bf737eff6
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from exercise.models import *
from exercise.tests.helpers.assert_emal_response import *
class ExerciseEmailThreadTests(TestCase):
def test_exercise_email_thread_not_found(self):
response = self.client.get('/exercise/thread/1/')
self.assertEqual(response.status_code, 404)
self.assertEqual((json.loads(response.content))['detail'], 'Not found.')
def test_emails_thread_by_id(self):
self.create_emails()
response = self.client.get('/exercise/thread/1/')
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content)
self.assertCoverEmail(self, json_data)
self.assertEmailResponseBody(self, json_data['emails'])
@staticmethod
def assertCoverEmail(self, json_data):
self.assertEqual(1, json_data['id'])
self.assertEqual('test email from unit test case', json_data['subject'])
self.assertEqual('test@cybsafe.com', json_data['from_address'])
self.assertEqual('Cybsafe Admin', json_data['from_name'])
@staticmethod
def assertEmailResponseBody(self, json_data):
EmailAssertHelper.assert_first_email(self, json_data[0])
EmailAssertHelper.assert_second_email(self, json_data[1])
@staticmethod
def create_emails():
replies = ExerciseEmailReply(
id=1,
reply_type=1,
message='I have received your email-1'
)
replies.save()
attachment = ExerciseAttachment(
id = 1,
filename ='location of file name'
)
attachment.save()
email1 = ExerciseEmail(
id=1,
subject='test email from unit test case',
from_address='test@cybsafe.com',
from_name='Cybsafe Admin',
to_address='sendTo1@cybsafe.com',
to_name='Cybsafe Admin-1',
phish_type=0,
content="Hello world",
)
email1.save()
email1.replies.add(replies)
email1.attachments.add(attachment)
email2 = ExerciseEmail(
id=2,
subject='test email from unit test case-2',
from_address='test2@cybsafe.com',
from_name='Cybsafe Admin-2',
to_address='sendTo2@cybsafe.com',
to_name='Cybsafe Admin-2',
phish_type=1,
content="Hello world-2",
belongs_to=email1
)
email2.save()
email2.replies.add(replies)
email2.attachments.add(attachment)
| 34.540541
| 80
| 0.622066
|
851909a6bbce97dbe2eeda27c9ffe459e25e9b2c
| 465
|
py
|
Python
|
recipes/jsoncpp/all/test_package/conanfile.py
|
marcvilletard/conan-center-index
|
e8ed6467021c025702ebf3b529972ce39285a650
|
[
"MIT"
] | null | null | null |
recipes/jsoncpp/all/test_package/conanfile.py
|
marcvilletard/conan-center-index
|
e8ed6467021c025702ebf3b529972ce39285a650
|
[
"MIT"
] | null | null | null |
recipes/jsoncpp/all/test_package/conanfile.py
|
marcvilletard/conan-center-index
|
e8ed6467021c025702ebf3b529972ce39285a650
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake_find_package"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| 25.833333
| 58
| 0.63871
|
9a6dbd011a9339d6de3a8aace0649a4b1e9bc943
| 1,924
|
py
|
Python
|
test1.py
|
stableShip/photo_analysis
|
0759df88f95cdc45eaf0e68b9bddb7950a765e6c
|
[
"MIT"
] | 1
|
2016-08-30T15:15:04.000Z
|
2016-08-30T15:15:04.000Z
|
test1.py
|
stableShip/photo_analysis
|
0759df88f95cdc45eaf0e68b9bddb7950a765e6c
|
[
"MIT"
] | null | null | null |
test1.py
|
stableShip/photo_analysis
|
0759df88f95cdc45eaf0e68b9bddb7950a765e6c
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import sys,os
from PIL import Image,ImageDraw
import pytesseract
#二值判断,如果确认是噪声,用改点的上面一个点的灰度进行替换
#该函数也可以改成RGB判断的,具体看需求如何
def getPixel(image,x,y,G,N):
L = image.getpixel((x,y))
if L > G:
L = True
else:
L = False
nearDots = 0
if L == (image.getpixel((x - 1,y - 1)) > G):
nearDots += 1
if L == (image.getpixel((x - 1,y)) > G):
nearDots += 1
if L == (image.getpixel((x - 1,y + 1)) > G):
nearDots += 1
if L == (image.getpixel((x,y - 1)) > G):
nearDots += 1
if L == (image.getpixel((x,y + 1)) > G):
nearDots += 1
if L == (image.getpixel((x + 1,y - 1)) > G):
nearDots += 1
if L == (image.getpixel((x + 1,y)) > G):
nearDots += 1
if L == (image.getpixel((x + 1,y + 1)) > G):
nearDots += 1
if nearDots < N:
return image.getpixel((x,y-1))
else:
return None
# 降噪
# 根据一个点A的RGB值,与周围的8个点的RBG值比较,设定一个值N(0 <N <8),当A的RGB值与周围8个点的RGB相等数小于N时,此点为噪点
# G: Integer 图像二值化阀值
# N: Integer 降噪率 0 <N <8
# Z: Integer 降噪次数
# 输出
# 0:降噪成功
# 1:降噪失败
def clearNoise(image,G,N,Z):
draw = ImageDraw.Draw(image)
for i in xrange(0,Z):
for x in xrange(1,image.size[0] - 1):
for y in xrange(1,image.size[1] - 1):
color = getPixel(image,x,y,G,N)
if color != None:
draw.point((x,y),color)
#测试代码
def main(image_path):
#打开图片
image = Image.open(image_path)
#将图片转换成灰度图片
image = image.convert("L")
#去噪,G = 50,N = 4,Z = 4
clearNoise(image,159,4,4)
#保存图片
return image
if __name__ == '__main__':
image = main("./test/1.png")
image.save("result.jpg")
result = pytesseract.image_to_string(image, config='-psm 7')
print result
| 25.653333
| 78
| 0.50104
|
acd73155fa9dc941edd06f2c4099a739a61fda98
| 6,374
|
py
|
Python
|
fm.py
|
kn45/FactorizatioMachine
|
02df4075aaeb7afb36fdc616f3c4346ded40a881
|
[
"MIT"
] | 9
|
2018-06-06T08:38:49.000Z
|
2019-07-15T02:04:37.000Z
|
fm.py
|
kn45/FactorizatioMachine
|
02df4075aaeb7afb36fdc616f3c4346ded40a881
|
[
"MIT"
] | null | null | null |
fm.py
|
kn45/FactorizatioMachine
|
02df4075aaeb7afb36fdc616f3c4346ded40a881
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import tensorflow as tf
class FMCore(object):
"""Factorization Machine Core
"""
def _sparse_mul(self, sp_x, w):
"""dense_res = sparse_x * dense_w
return dense matrix
"""
# this could achieve sparse gradient
return tf.sparse_tensor_dense_matmul(sp_x, w, name='mul_sparse')
def _build_graph(self, input_dim=None, hidden_dim=8, lambda_w=0.0, lambda_v=0.0, loss=None):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.inp_x = tf.sparse_placeholder(dtype=tf.float32, name='input_x')
self.inp_y = tf.placeholder(tf.float32, [None, 1], name='input_y')
# forward path
with tf.name_scope('1-way'):
self.w0 = tf.Variable(tf.constant(0.1, shape=[1]), name='w0')
self.W = tf.get_variable(
'W', shape=[input_dim, 1],
initializer=tf.contrib.layers.xavier_initializer())
self.degree1 = self._sparse_mul(self.inp_x, self.W) + self.w0
with tf.name_scope('2-way'):
self.V = tf.get_variable(
'V', shape=[input_dim, hidden_dim],
initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope('2-way_left'):
self.left = tf.pow(
self._sparse_mul(self.inp_x, self.V),
tf.constant(2, dtype=tf.float32, name='const_2')) # (bs, hidden_dim)
with tf.name_scope('2-way_right'):
# use tf.square supporting sparse_pow(x, 2)
self.right = self._sparse_mul(
tf.square(self.inp_x), tf.pow(self.V, 2))
self.degree2 = tf.reduce_sum(tf.subtract(self.left, self.right), 1, keep_dims=True) * \
tf.constant(0.5, dtype=tf.float32, name='const_05')
with tf.name_scope('prediction'):
self.scores = self.degree1 + self.degree2
# loss and opt
with tf.name_scope('loss'):
self.reg_loss = lambda_w * tf.nn.l2_loss(self.w0) + \
lambda_w * tf.nn.l2_loss(self.W) + \
lambda_v * tf.nn.l2_loss(self.V)
if loss == 'cross_entropy':
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.inp_y, logits=self.scores))
if loss == 'rmse':
self.loss = tf.reduce_mean(
tf.square(tf.subtract(self.inp_y, self.scores)))
self.summary_loss = tf.summary.scalar('loss_without_reg', self.loss)
self.total_loss = self.loss + self.reg_loss
with tf.name_scope('opt'):
self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
self.opt = tf.contrib.opt.LazyAdamOptimizer(self.learning_rate).minimize(
self.total_loss, global_step=self.global_step)
# saver and loader
self.ckpt_saver = tf.train.Saver()
self.saver = tf.train.Saver(
var_list=tf.trainable_variables() + [self.global_step],
max_to_keep=1)
# get embedding vector
self.embedding = self._sparse_mul(self.inp_x, self.V)
def train_step(self, sess, inp_x, inp_y, lr=1e-3):
input_dict = {
self.inp_x: inp_x,
self.inp_y: inp_y,
self.learning_rate: lr}
return sess.run([self.summary_loss, self.loss, self.opt], feed_dict=input_dict)
def eval_loss(self, sess, inp_x, inp_y):
eval_dict = {
self.inp_x: inp_x,
self.inp_y: inp_y}
return sess.run([self.summary_loss, self.loss], feed_dict=eval_dict)
def get_embedding(self, sess, inp_x):
input_dict = {
self.inp_x: inp_x}
return sess.run(self.embedding, feed_dict=input_dict)
class FMClassifier(FMCore):
"""Factorization Machine Classifier
"""
def __init__(self, input_dim=None, hidden_dim=16, lambda_w=0.0, lambda_v=0.0):
# init graph from input to predict y_hat
self._task = 'classification'
self._build_graph(input_dim, hidden_dim, lambda_w, lambda_v, loss='cross_entropy')
with tf.name_scope('prediction/'):
self.proba = tf.sigmoid(self.scores)
with tf.name_scope('metrics'):
self.auc, self.update_auc = tf.metrics.auc(
labels=self.inp_y,
predictions=self.proba,
num_thresholds=1000)
self.summary_auc = tf.summary.scalar('AUC', self.auc)
# all summary
self.summary_all = tf.summary.merge_all()
def predict_proba(self, sess, inp_x):
input_dict = {
self.inp_x: inp_x}
return sess.run(self.proba, feed_dict=input_dict)
def eval_auc(self, sess, inp_x, inp_y):
eval_dict = {
self.inp_x: inp_x,
self.inp_y: inp_y}
sess.run(tf.local_variables_initializer())
sess.run(self.update_auc, feed_dict=eval_dict)
return sess.run([self.summary_auc, self.auc])
def eval_metrics(self, sess, inp_x, inp_y):
eval_dict = {
self.inp_x: inp_x,
self.inp_y: inp_y}
sess.run(tf.local_variables_initializer())
sess.run(self.update_auc, feed_dict=eval_dict)
return sess.run([self.summary_all, self.loss, self.auc], feed_dict=eval_dict)
class FMRegressor(FMCore):
"""Factorization Machine Regressor
"""
def __init__(self, input_dim=None, hidden_dim=16, lambda_w=0.0, lambda_v=0.0):
# init graph from input to predict y_hat
self._task = 'regression'
self._build_graph(input_dim, hidden_dim, lambda_w, lambda_v, loss='rmse')
with tf.name_scope('metrics'):
# all summary
self.summary_all = tf.summary.merge_all()
def predict(self, sess, inp_x):
input_dict = {
self.inp_x: inp_x}
return sess.run(self.scores, feed_dict=input_dict)
def eval_metrics(self, sess, inp_x, inp_y):
eval_dict = {
self.inp_x: inp_x,
self.inp_y: inp_y}
return sess.run([self.summary_all, self.loss], feed_dict=eval_dict)
if __name__ == '__main__':
mdl = FMClassifier(5)
sess = tf.Session()
file_writer = tf.summary.FileWriter('./log', sess.graph)
sess.close()
| 40.08805
| 99
| 0.597741
|
c2009c3b492f2bd4f6be0ae976e7e2ca410baf92
| 4,107
|
py
|
Python
|
dist.py
|
opendxl/opendxl-openc2-client-python
|
ce80246526d49cc8fdbe134e87aa725456d6a376
|
[
"Apache-2.0"
] | 1
|
2020-12-22T17:35:17.000Z
|
2020-12-22T17:35:17.000Z
|
dist.py
|
opendxl/opendxl-openc2-client-python
|
ce80246526d49cc8fdbe134e87aa725456d6a376
|
[
"Apache-2.0"
] | null | null | null |
dist.py
|
opendxl/opendxl-openc2-client-python
|
ce80246526d49cc8fdbe134e87aa725456d6a376
|
[
"Apache-2.0"
] | 1
|
2021-02-02T17:08:09.000Z
|
2021-02-02T17:08:09.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
from tempfile import mkstemp
from shutil import move
from distutils.dir_util import copy_tree, remove_tree
from distutils.file_util import copy_file, move_file
from distutils.core import run_setup
from distutils.archive_util import make_archive
# Run clean
import clean # pylint: disable=unused-import
def replace(file_path, pattern, subst):
# Create temp file
fh, abs_path = mkstemp()
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
os.close(fh)
# Remove original file
os.remove(file_path)
# Move new file
move(abs_path, file_path)
print("Starting dist.\n") # pylint: disable=unused-import
VERSION = __import__('dxlopenc2client').get_version()
RELEASE_NAME = "dxlopenc2client-python-dist-" + str(VERSION)
DIST_PY_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__))
DIST_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dist")
DIST_DOCTMP_DIR = os.path.join(DIST_DIRECTORY, "doctmp")
SETUP_PY = os.path.join(DIST_PY_FILE_LOCATION, "setup.py")
DIST_LIB_DIRECTORY = os.path.join(DIST_DIRECTORY, "lib")
DIST_RELEASE_DIR = os.path.join(DIST_DIRECTORY, RELEASE_NAME)
SAMPLE_RELEASE_DIR = os.path.join(DIST_DIRECTORY, "sample")
# Remove the dist directory if it exists
if os.path.exists(DIST_DIRECTORY):
print("\nRemoving dist directory: " + DIST_DIRECTORY + "\n")
remove_tree(DIST_DIRECTORY, verbose=1)
print("\nMaking dist directory: " + DIST_DIRECTORY + "\n")
os.makedirs(DIST_DIRECTORY)
print("\nCalling sphinx-apidoc\n")
subprocess.check_call(["sphinx-apidoc",
"--force",
"--separate",
"--no-toc",
"--output-dir=" + DIST_DOCTMP_DIR,
os.path.join(DIST_PY_FILE_LOCATION, "dxlopenc2client")])
print("\nCopying conf.py, docutils.conf, and sdk directory\n")
copy_file(os.path.join(DIST_PY_FILE_LOCATION, "doc", "conf.py"),
os.path.join(DIST_DOCTMP_DIR, "conf.py"))
copy_file(os.path.join(DIST_PY_FILE_LOCATION, "doc", "docutils.conf"),
os.path.join(DIST_DOCTMP_DIR, "docutils.conf"))
copy_tree(os.path.join(DIST_PY_FILE_LOCATION, "doc", "sdk"), DIST_DOCTMP_DIR)
print("\nCalling sphinx-build\n")
subprocess.check_call(["sphinx-build",
"-b",
"html",
DIST_DOCTMP_DIR,
os.path.join(DIST_DIRECTORY, "doc")])
replace(os.path.join(DIST_DIRECTORY, "doc", "_static", "classic.css"),
"text-align: justify", "text-align: none")
# Delete .doctrees
remove_tree(os.path.join(os.path.join(DIST_DIRECTORY, "doc"), ".doctrees"), verbose=1)
# Delete .buildinfo
os.remove(os.path.join(os.path.join(DIST_DIRECTORY, "doc"), ".buildinfo"))
print("\nMoving README.html\n")
move_file(os.path.join(DIST_DOCTMP_DIR, "README.html"), DIST_DIRECTORY)
print("\nDeleting doctmp directory\n")
remove_tree(DIST_DOCTMP_DIR)
print("\nRunning setup.py sdist\n")
run_setup(SETUP_PY,
["sdist",
"--format",
"zip",
"--dist-dir",
DIST_LIB_DIRECTORY])
print("\nRunning setup.py bdist_wheel\n")
run_setup(SETUP_PY,
["bdist_wheel",
"--dist-dir",
DIST_LIB_DIRECTORY,
"--python-tag",
"py2.py3"])
print("\nCopying sample into dist directory\n")
copy_tree(os.path.join(DIST_PY_FILE_LOCATION, "sample"), SAMPLE_RELEASE_DIR)
print("\nCopying dist to " + DIST_RELEASE_DIR + "\n")
copy_tree(DIST_DIRECTORY, DIST_RELEASE_DIR)
print("\nRemoving build directory\n")
remove_tree(os.path.join(DIST_PY_FILE_LOCATION, "build"))
print("\nRemoving dxlopenc2client.egg-info\n")
remove_tree(os.path.join(DIST_PY_FILE_LOCATION, "dxlopenc2client.egg-info"))
print("\nMaking dist zip\n")
make_archive(DIST_RELEASE_DIR, "zip", DIST_DIRECTORY, RELEASE_NAME)
print("\nRemoving " + DIST_RELEASE_DIR + "\n")
remove_tree(DIST_RELEASE_DIR)
| 34.805085
| 86
| 0.689311
|
110881a4ade0d28cbd7fb304d8086ee0f64590c1
| 568
|
py
|
Python
|
doc/test_example1.py
|
tmarktaylor/phmdoctest
|
6974c95f8019aeea7e99a0cd7cafb06846291a35
|
[
"MIT"
] | 16
|
2020-06-01T21:50:04.000Z
|
2022-03-10T00:47:47.000Z
|
doc/test_example1.py
|
Borda/phmdoctest
|
36b657fae364b5a6dcf59f8b4d92e5fb6dd713bd
|
[
"MIT"
] | 18
|
2020-06-01T22:08:59.000Z
|
2022-03-30T15:26:10.000Z
|
doc/test_example1.py
|
Borda/phmdoctest
|
36b657fae364b5a6dcf59f8b4d92e5fb6dd713bd
|
[
"MIT"
] | 3
|
2020-06-23T02:04:25.000Z
|
2022-02-10T21:32:19.000Z
|
"""pytest file built from doc/example1.md"""
from phmdoctest.functions import _phm_compare_exact
def session_00001_line_6():
r"""
>>> print("Hello World!")
Hello World!
"""
def test_code_14_output_28(capsys):
from enum import Enum
class Floats(Enum):
APPLES = 1
CIDER = 2
CHERRIES = 3
ADUCK = 4
for floater in Floats:
print(floater)
_phm_expected_str = """\
Floats.APPLES
Floats.CIDER
Floats.CHERRIES
Floats.ADUCK
"""
_phm_compare_exact(a=_phm_expected_str, b=capsys.readouterr().out)
| 18.322581
| 70
| 0.653169
|
79f8bf69cbcecc57c80afc259f70e24d533de352
| 18,516
|
py
|
Python
|
finance_datareader_py/_version.py
|
GuQiangJS/finance-datareader-py
|
4b9307b5ad9ed07907d5188d753d908ec3998316
|
[
"Apache-2.0"
] | 7
|
2018-12-04T02:11:01.000Z
|
2022-02-22T06:11:27.000Z
|
finance_datareader_py/_version.py
|
GuQiangJS/finance-datareader-py
|
4b9307b5ad9ed07907d5188d753d908ec3998316
|
[
"Apache-2.0"
] | 2
|
2018-07-12T08:34:07.000Z
|
2018-07-12T15:06:59.000Z
|
finance_datareader_py/_version.py
|
GuQiangJS/finance-datareader-py
|
4b9307b5ad9ed07907d5188d753d908ec3998316
|
[
"Apache-2.0"
] | null | null | null |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "finance_datareader_py/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {'long': full_out, 'short': full_out[:7], 'error': None}
# pieces["long"] = full_out
# pieces["short"] = full_out[:7] # maybe improved later
# pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.471264
| 79
| 0.585062
|
aac561c4934023a1db8912322be9e0d32399ceb6
| 2,756
|
py
|
Python
|
reverse_search.py
|
Shinpachi8/passiveDomain
|
e69a933365a216eaa849293b0f786418361c7d61
|
[
"Apache-2.0"
] | null | null | null |
reverse_search.py
|
Shinpachi8/passiveDomain
|
e69a933365a216eaa849293b0f786418361c7d61
|
[
"Apache-2.0"
] | null | null | null |
reverse_search.py
|
Shinpachi8/passiveDomain
|
e69a933365a216eaa849293b0f786418361c7d61
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import os
import json
import time
import sys
FPATH = '/mnt/e/2 tools/5 dnsdata/fdns_a.sort.txt'
MAXLINESIZE = 500
WALKBYTES = 10000
LIMIT = {
'MaxScan': 100,
'MaxOutputLines': 100000
}
def get_string(f, offset):
try:
f.seek(offset, 0)
line = f.read(MAXLINESIZE).split('\n')
if len(line) < 2:
return ''
return line[1]
print(line)
except Exception as e:
return ''
def get_line_detail(f, offset, search):
line = get_string(f, offset)
if line == '':
return ''
if len(line) > len(search):
line = line[:len(search)]
return line
def binary_search(f, left, filesize, search):
right = filesize
# 基本判断
if right >= left:
mid = int(left + (right - left) / 2)
# 元素整好的中间位置
line = get_line_detail(f, mid, search)
# print(line)
if line == search:
return mid
# 元素小于中间位置的元素,只需要再比较左边的元素
elif line > search:
# print('bigger')
return binary_search(f, left, mid - 1, search)
# 元素大于中间位置的元素,只需要再比较右边的元素
else:
# print('lower')
return binary_search(f, mid + 1, right, search)
else:
# 不存在
return -1
def get_all_match(f, offset, search):
# back to search non match line
max_scan = LIMIT['MaxScan']
min_search_offset = offset
search_length = len(search)
all_line = []
while True:
if max_scan <= 0:
break
min_search_offset = offset - WALKBYTES
if min_search_offset < 0:
return []
line = get_line_detail(f, min_search_offset, search)
if line == '':
return []
if line != search:
break
max_scan -= 1
f.seek(min_search_offset)
max_output_line = LIMIT['MaxOutputLines']
first_match = False
while True:
if max_output_line <= 0:
break
line = f.readline().strip()
match_string = line[:len(search)] if len(
line) > search_length else line
if match_string == search:
all_line.append(line)
if first_match is False:
first_match = True
elif first_match:
break
return all_line
def search_rapid7(search):
f = open(FPATH, 'r')
fsize = os.path.getsize(FPATH)
search = search[::-1]
offset = binary_search(f, 0, fsize, search)
all_line = get_all_match(f, offset, search)
all_line = [i[::-1] for i in all_line]
f.close()
# print("total result: {}".format(len(all_line)))
# print(end - start)
return all_line
if __name__ == '__main__':
search_rapid7(".baidu.com")
| 22.77686
| 60
| 0.560958
|
fe65c32f6421f0b4291964a9de414aa6aab9f539
| 1,559
|
py
|
Python
|
pymote/tests/test_network.py
|
darbula/pymote
|
fd581cc12fcd10beec7e4d72e90bf6250ac17699
|
[
"BSD-3-Clause"
] | 16
|
2015-01-22T11:52:48.000Z
|
2021-01-31T23:30:45.000Z
|
pymote/tests/test_network.py
|
nkitic/pymote
|
84d852c922b39afc64950e967b8309ccd8faf6a5
|
[
"BSD-3-Clause"
] | 5
|
2015-10-30T10:45:04.000Z
|
2017-06-28T17:42:04.000Z
|
pymote/tests/test_network.py
|
nkitic/pymote
|
84d852c922b39afc64950e967b8309ccd8faf6a5
|
[
"BSD-3-Clause"
] | 17
|
2015-06-14T08:53:55.000Z
|
2021-08-25T00:53:07.000Z
|
import unittest
from pymote.network import Network
from pymote.node import Node
from pymote.environment import Environment2D
from pymote.channeltype import ChannelType
from pymote.conf import settings
class TestNetworkCreation(unittest.TestCase):
def setUp(self):
self.net = Network()
self.net.environment.im[22, 22] = 0
self.node1 = self.net.add_node(pos=[22.8, 21.8])
self.node2 = self.net.add_node(pos=[21.9, 22.9])
self.node3 = self.net.add_node(pos=[21.7, 21.7])
def test_nodes(self):
"""Make sure the nodes are added."""
self.assertTrue(isinstance(self.node1, Node))
self.assertEqual(len(self.net.node), 3)
if (isinstance(self.net.environment, Environment2D)):
self.assertEqual(self.net.environment.im.shape,
settings.ENVIRONMENT2D_SHAPE,
'incorrect default size')
self.assertTrue(isinstance(self.net.channelType, ChannelType))
def test_visibility(self):
"""
Pixel 22,22 is not space so node1 and node2 should not be visible
but node3 is visible.
"""
self.assertFalse(self.net.environment\
.are_visible(self.net.pos[self.node1],
self.net.pos[self.node2]))
self.assertTrue(self.net.environment\
.are_visible(self.net.pos[self.node2],
self.net.pos[self.node3]))
| 39.974359
| 74
| 0.583066
|
00cdb768f52307fc907a0ed4e501a925a132b5a8
| 11,270
|
py
|
Python
|
tests/test_andata.py
|
CHIMEFRB/ch_util
|
37b2f2510ba39def44ecce0e8cb729e2ea108603
|
[
"MIT"
] | 1
|
2021-02-16T15:59:08.000Z
|
2021-02-16T15:59:08.000Z
|
tests/test_andata.py
|
CHIMEFRB/ch_util
|
37b2f2510ba39def44ecce0e8cb729e2ea108603
|
[
"MIT"
] | 20
|
2021-01-22T01:43:14.000Z
|
2022-02-15T05:08:30.000Z
|
tests/test_andata.py
|
CHIMEFRB/ch_util
|
37b2f2510ba39def44ecce0e8cb729e2ea108603
|
[
"MIT"
] | 2
|
2021-03-10T00:47:09.000Z
|
2021-06-03T18:51:23.000Z
|
"""Unit tests for analysis data format."""
from functools import wraps
import h5py
import numpy as np
import os
import pytest
import warnings
from ch_util import andata
from caput.memh5 import MemGroup
import data_paths
# All the test data file names.
acq_fname_list = data_paths.paths1_0
acq_fname = acq_fname_list[0]
acq_fname_root = data_paths.dir1_0
# Old testdata is known to create warnings due to missing gain information.
# Unfortunately, this kill warnings for all modules, not just this one.
def ignore_warnings(test_func):
@wraps(test_func)
def do_test(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
return test_func(*args, **kwargs)
return do_test
# Inspect test data to figure out some some of the expectations.
with h5py.File(acq_fname, "r") as f:
NTIME1 = len(f["vis"])
if "amtel_adc_therm" in f["cal"]:
ATEL = "amtel_adc_therm"
elif "atmel_adc_therm" in f["cal"]:
ATEL = "atmel_adc_therm"
else:
raise RuntimeError("Funky test data.")
NTIME = 0
for fname in acq_fname_list:
with h5py.File(fname, "r") as f:
NTIME += len(f["vis"])
# Tests for high level data reader.
@pytest.fixture
def reader():
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
yield andata.Reader(acq_fname_list)
@ignore_warnings
def test_select_read(reader):
reader.time_sel = (2, 8)
reader.prod_sel = 9
sl = slice(100, 900, 40)
reader.freq_sel = sl
reader.dataset_sel = ("vis", "timestamp")
data = reader.read()
assert data.vis.shape == (20, 1, 6)
all_data = andata.AnData.from_acq_h5(acq_fname_list)
assert np.allclose(data.vis, all_data.vis[sl, [9], 2:8])
assert set(data.datasets.keys()) == {
"vis",
"timestamp_cpu_s",
"timestamp_cpu_us",
"timestamp_fpga_count",
"gain",
}
@ignore_warnings
def test_select_time_range(reader):
dt = 10.0 # seconds
time = reader.time
start_time = time[0] + 1.6 * dt
stop_time = time[0] + 6.9 * dt
reader.select_time_range(start_time, stop_time)
data = reader.read()
assert data.ntime == 5
assert np.all(data.timestamp >= start_time)
assert np.all(data.timestamp < stop_time)
# Test near edge behaviour.
start_time = time[0] + 0.9999999 * dt
stop_time = time[0] + 5.9999999 * dt
reader.select_time_range(start_time, stop_time)
data = reader.read()
assert data.ntime == 5
assert np.all(data.timestamp >= start_time)
assert np.all(data.timestamp < stop_time)
@ignore_warnings
def test_select_freq_range(reader):
low_freq = 452.0
high_freq = 727.0
reader.select_freq_range(low_freq, high_freq)
data = reader.read()
assert np.all(data.index_map["freq"]["centre"] >= low_freq)
assert np.all(data.index_map["freq"]["centre"] < high_freq)
step_freq = 50.0
reader.select_freq_range(low_freq, high_freq, step_freq)
data = reader.read()
expected_centres = np.arange(low_freq, high_freq, step_freq)[::-1]
diff_centres = data.index_map["freq"]["centre"] - expected_centres
assert np.all(abs(diff_centres) <= 400.0 / 1024)
@ignore_warnings
def test_select_frequencies(reader):
freqs = [784.0, 465.0, 431.0]
reader.select_freq_physical(freqs)
data = reader.read()
diff_centres = data.index_map["freq"]["centre"] - np.array(freqs)
assert np.all(abs(diff_centres) <= 400.0 / 1024 / 2)
with pytest.raises(ValueError):
reader.select_freq_physical([324.0])
# Tests for loading early acquisition data.
@ignore_warnings
def test_load_fname():
data = andata.AnData.from_acq_h5(acq_fname)
check_result(data, NTIME1)
@ignore_warnings
def test_load_file_obj():
F = h5py.File(acq_fname, "r")
data = andata.AnData.from_acq_h5(F)
F.close()
check_result(data, NTIME1)
@ignore_warnings
def test_subset_vis():
data = andata.AnData.from_acq_h5(acq_fname, prod_sel=[1, 3, 9], freq_sel=6)
assert data.nprod == 3
assert data.nfreq == 1
assert data.datasets["vis_flag_rfi"].shape == data.vis.shape
assert data.index_map["freq"]["centre"] == pytest.approx(797.65625)
assert data.index_map["freq"]["width"] == pytest.approx(0.390625)
all_data = andata.AnData.from_acq_h5(acq_fname)
assert np.allclose(data.vis, all_data.vis[6, [1, 3, 9], :])
prod = data.index_map["prod"]
assert list(prod[0]) == [0, 1]
assert list(prod[2]) == [1, 2]
@ignore_warnings
def test_subset_datasets():
data = andata.AnData.from_acq_h5(acq_fname, datasets=("fpga_hk",))
assert list(data.datasets.keys()) == ["fpga_hk"]
# Tests for AnData class.
@pytest.fixture
def corr_data():
yield andata.CorrData()
def test_properties(corr_data):
# Check that the "properties" are read only.
with pytest.raises(AttributeError):
corr_data.__setattr__("datasets", {})
with pytest.raises(AttributeError):
corr_data.__setattr__("cal", {})
with pytest.raises(AttributeError):
corr_data.__setattr__("attrs", {})
def test_vis_shortcuts(corr_data):
# More sophisticated base calculation
def getbase(a):
b = a.base
if b is None:
return a
else:
return getbase(b)
vis = np.arange(60)
vis.shape = (3, 2, 10)
corr_data.create_dataset("vis", data=vis)
corr_data["vis"].attrs["cal"] = "stuff"
assert getbase(corr_data.vis[:]) is vis
assert corr_data.vis.attrs == {"cal": "stuff"}
assert np.allclose(corr_data.vis[0:2:, 0, 1:3:9], vis[0:2:, 0, 1:3:9])
# Tests for loading and saving to/from file.
@pytest.fixture
def test_fname():
return "tmp_test_AnData.hdf5"
@pytest.fixture
def write_data(test_fname):
"""Makes sure there is test file to work with."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
data = andata.AnData.from_acq_h5(acq_fname)
data.save(test_fname, mode="w")
yield
# Remove test data.
if os.path.isfile(test_fname):
os.remove(test_fname)
def test_load_fname2(test_fname, write_data):
data = andata.AnData.from_file(test_fname)
check_result(data, NTIME1)
def test_load_FO(test_fname, write_data):
F = h5py.File(test_fname, mode="r")
data = andata.AnData.from_file(F)
check_result(data, NTIME1)
# Make sure I can write to data, since it should be in memory.
data.vis[0, 0, 0] = 10
data.datasets["timestamp_cpu_s"][10] = 12
assert data.datasets["timestamp_cpu_s"][10] == 12
# Make sure the file is still open.
assert F["timestamp_cpu_s"][10] != 12
F.close()
# Tests for loading multible acq files."""
@ignore_warnings
def test_load_list():
data = andata.AnData.from_acq_h5(acq_fname_list)
check_result(data, NTIME)
@ignore_warnings
def test_load_glob():
data = andata.AnData.from_acq_h5(acq_fname_root + "/*")
check_result(data, NTIME)
@ignore_warnings
def test_load_subsets():
data = andata.AnData.from_acq_h5(acq_fname_list, start=6, stop=-5)
check_result(data, NTIME - 6 - 5)
data = andata.AnData.from_acq_h5(acq_fname_list, start=20, stop=-1)
check_result(data, NTIME - 20 - 1)
data = andata.AnData.from_acq_h5(acq_fname_list, start=2, stop=8)
check_result(data, 6)
# Tests for converting from acq format to analysis format on disk."""
@ignore_warnings
def test_stores():
group = MemGroup()
data = andata.AnData.from_acq_h5(acq_fname_list, out_group=group)
assert data._data is group
assert "vis" in group
assert group["vis"].name == data.vis.name
# Basic acq format loading error checking."""
@pytest.fixture
def acq_list():
yield [MemGroup.from_hdf5(f) for f in acq_fname_list]
@ignore_warnings
def test_extra_dataset(acq_list):
nt = acq_list[1]["vis"].shape[0]
acq_list[1].create_dataset("stuff", shape=(nt,), dtype=float)
with pytest.raises(ValueError):
andata.AnData.from_acq_h5(acq_list)
@ignore_warnings
def test_missing_dataset(acq_list):
nt = acq_list[0]["vis"].shape[0]
acq_list[0].create_dataset("stuff", shape=(nt,), dtype=float)
with pytest.raises(ValueError):
andata.AnData.from_acq_h5(acq_list)
# Test getting timestamps from acq files."""
@pytest.fixture
def data():
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
yield andata.AnData.from_acq_h5(acq_fname)
def test_timestamp(data):
"""Just makes sure timestamps are calculated for acq data without any
validation."""
timestamp = data.timestamp
assert len(timestamp) == NTIME1
@pytest.fixture
def data_list():
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
yield [andata.AnData.from_acq_h5(fname) for fname in acq_fname_list]
@ignore_warnings
def test_works(data_list):
right_answer = andata.AnData.from_acq_h5(acq_fname_list)
merged_data = andata.concatenate(data_list)
assert np.allclose(merged_data.vis, right_answer.vis)
assert len(merged_data.index_map["time"]) == len(right_answer.index_map["time"])
@ignore_warnings
def test_start_end_inds(data_list):
right_answer = andata.AnData.from_acq_h5(acq_fname_list, start=3, stop=26)
merged_data = andata.concatenate(data_list, start=3, stop=26)
assert np.allclose(merged_data.vis, right_answer.vis)
assert len(merged_data.index_map["time"]) == len(right_answer.index_map["time"])
def check_result(data, ntime):
"""Checks that the data more or less looks like it should if converted from
the test data."""
# Make sure the data is there and that it has the expected shape.
assert data.vis.shape == (1024, 36, ntime)
assert data.vis.dtype == np.complex64
# Check that 'serial_adc was properly split into 8 datasets.
count_serial_adc = 0
for dset_name in data.datasets.keys():
if dset_name[:10] == "serial_adc":
count_serial_adc += 1
assert "cal" in data.datasets[dset_name].attrs
assert data.datasets[dset_name].attrs["cal"] == ATEL
assert count_serial_adc == 8
# Make sure the cal data is acctually there.
assert ATEL in data.cal
# Check the fpga_hk dataset, as it is the only one that didn't need to be split.
assert data.datasets["fpga_hk"].shape == (1, ntime)
assert data.datasets["fpga_hk"].dtype == np.float32
# Check a few of the attributes for known values.
assert "n_antenna" in data.history["acq"]
assert data.history["acq"]["n_antenna"] == 8
# Check a few of the cal entries.
assert "b" in data.cal[ATEL]
assert data.cal[ATEL]["b"] == "3900"
# This is the only actual check of the content of the datasets. Make sure
# the timestamp is increasing in exactly 10s increments.
assert np.all(np.diff(data.datasets["timestamp_cpu_s"]) == 10)
# Check that the data attributes are correctly calculated.
freq_width = 400.0 / 1024
assert np.allclose(data.index_map["freq"]["width"], freq_width)
freq_centre = np.linspace(800.0, 400.0, 1024, endpoint=False)
assert np.allclose(data.index_map["freq"]["centre"], freq_centre)
| 31.218837
| 84
| 0.687578
|
89fb02ecce1dc458aa5cc83d0191920b51358fd2
| 370
|
py
|
Python
|
pyamg/krylov/setup.py
|
rsmedleystevenson/pyamg
|
72c3b43e0d14223e3849cb7e5f0bc8817b342373
|
[
"MIT"
] | null | null | null |
pyamg/krylov/setup.py
|
rsmedleystevenson/pyamg
|
72c3b43e0d14223e3849cb7e5f0bc8817b342373
|
[
"MIT"
] | null | null | null |
pyamg/krylov/setup.py
|
rsmedleystevenson/pyamg
|
72c3b43e0d14223e3849cb7e5f0bc8817b342373
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('krylov', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 23.125
| 62
| 0.724324
|
f55ed85cd6fca22d1f5b9b13ea30d41ea6d9e1db
| 116,481
|
py
|
Python
|
python/ccxt/async_support/huobi.py
|
dedeisep/ccxt
|
b45680acf521f2881733a29161b40d457c935b80
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/huobi.py
|
dedeisep/ccxt
|
b45680acf521f2881733a29161b40d457c935b80
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/huobi.py
|
dedeisep/ccxt
|
b45680acf521f2881733a29161b40d457c935b80
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class huobi(Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': ['CN'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome39'],
'certified': True,
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
# 'test': {
# 'market': 'https://api.testnet.huobi.pro',
# 'public': 'https://api.testnet.huobi.pro',
# 'private': 'https://api.testnet.huobi.pro',
# },
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'hostnames': {
'contract': 'api.hbdm.com',
'spot': 'api.huobi.pro',
# recommended for AWS
# 'contract': 'api.hbdm.vn',
# 'spot': 'api-aws.huobi.pro',
},
'api': {
'contract': 'https://{hostname}',
'spot': 'https://{hostname}',
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
# 'referral': {
# 'url': 'https://www.huobi.com/en-us/topic/double-reward/?invite_code=6rmm2223',
# 'discount': 0.15,
# },
'doc': [
'https://huobiapi.github.io/docs/spot/v1/cn/',
'https://huobiapi.github.io/docs/dm/v1/cn/',
'https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/',
'https://huobiapi.github.io/docs/usdt_swap/v1/cn/',
'https://huobiapi.github.io/docs/option/v1/cn/',
],
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
# ------------------------------------------------------------
# old api definitions
'v2Public': {
'get': {
'reference/currencies': 1, # 币链参考信息
'market-status': 1, # 获取当前市场状态
},
},
'v2Private': {
'get': {
'account/ledger': 1,
'account/withdraw/quota': 1,
'account/withdraw/address': 1, # 提币地址查询(限母用户可用)
'account/deposit/address': 1,
'account/repayment': 5, # 还币交易记录查询
'reference/transact-fee-rate': 1,
'account/asset-valuation': 0.2, # 获取账户资产估值
'point/account': 5, # 点卡余额查询
'sub-user/user-list': 1, # 获取子用户列表
'sub-user/user-state': 1, # 获取特定子用户的用户状态
'sub-user/account-list': 1, # 获取特定子用户的账户列表
'sub-user/deposit-address': 1, # 子用户充币地址查询
'sub-user/query-deposit': 1, # 子用户充币记录查询
'user/api-key': 1, # 母子用户API key信息查询
'user/uid': 1, # 母子用户获取用户UID
'algo-orders/opening': 1, # 查询未触发OPEN策略委托
'algo-orders/history': 1, # 查询策略委托历史
'algo-orders/specific': 1, # 查询特定策略委托
'c2c/offers': 1, # 查询借入借出订单
'c2c/offer': 1, # 查询特定借入借出订单及其交易记录
'c2c/transactions': 1, # 查询借入借出交易记录
'c2c/repayment': 1, # 查询还币交易记录
'c2c/account': 1, # 查询账户余额
'etp/reference': 1, # 基础参考信息
'etp/transactions': 5, # 获取杠杆ETP申赎记录
'etp/transaction': 5, # 获取特定杠杆ETP申赎记录
'etp/rebalance': 1, # 获取杠杆ETP调仓记录
'etp/limit': 1, # 获取ETP持仓限额
},
'post': {
'account/transfer': 1,
'account/repayment': 5, # 归还借币(全仓逐仓通用)
'point/transfer': 5, # 点卡划转
'sub-user/management': 1, # 冻结/解冻子用户
'sub-user/creation': 1, # 子用户创建
'sub-user/tradable-market': 1, # 设置子用户交易权限
'sub-user/transferability': 1, # 设置子用户资产转出权限
'sub-user/api-key-generation': 1, # 子用户API key创建
'sub-user/api-key-modification': 1, # 修改子用户API key
'sub-user/api-key-deletion': 1, # 删除子用户API key
'sub-user/deduct-mode': 1, # 设置子用户手续费抵扣模式
'algo-orders': 1, # 策略委托下单
'algo-orders/cancel-all-after': 1, # 自动撤销订单
'algo-orders/cancellation': 1, # 策略委托(触发前)撤单
'c2c/offer': 1, # 借入借出下单
'c2c/cancellation': 1, # 借入借出撤单
'c2c/cancel-all': 1, # 撤销所有借入借出订单
'c2c/repayment': 1, # 还币
'c2c/transfer': 1, # 资产划转
'etp/creation': 5, # 杠杆ETP换入
'etp/redemption': 5, # 杠杆ETP换出
'etp/{transactId}/cancel': 10, # 杠杆ETP单个撤单
'etp/batch-cancel': 50, # 杠杆ETP批量撤单
},
},
'market': {
'get': {
'history/kline': 1, # 获取K线数据
'detail/merged': 1, # 获取聚合行情(Ticker)
'depth': 1, # 获取 Market Depth 数据
'trade': 1, # 获取 Trade Detail 数据
'history/trade': 1, # 批量获取最近的交易记录
'detail': 1, # 获取 Market Detail 24小时成交量数据
'tickers': 1,
'etp': 1, # 获取杠杆ETP实时净值
},
},
'public': {
'get': {
'common/symbols': 1, # 查询系统支持的所有交易对
'common/currencys': 1, # 查询系统支持的所有币种
'common/timestamp': 1, # 查询系统当前时间
'common/exchange': 1, # order limits
'settings/currencys': 1, # ?language=en-US
},
},
'private': {
'get': {
'account/accounts': 0.2, # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance': 0.2, # 查询指定账户的余额
'account/accounts/{sub-uid}': 1,
'account/history': 4,
'cross-margin/loan-info': 1,
'margin/loan-info': 1, # 查询借币币息率及额度
'fee/fee-rate/get': 1,
'order/openOrders': 0.4,
'order/orders': 0.4,
'order/orders/{id}': 0.4, # 查询某个订单详情
'order/orders/{id}/matchresults': 0.4, # 查询某个订单的成交明细
'order/orders/getClientOrder': 0.4,
'order/history': 1, # 查询当前委托、历史委托
'order/matchresults': 1, # 查询当前成交、历史成交
# 'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw': 1,
# 'margin/loan-info', # duplicate
'margin/loan-orders': 0.2, # 借贷订单
'margin/accounts/balance': 0.2, # 借贷账户详情
'cross-margin/loan-orders': 1, # 查询借币订单
'cross-margin/accounts/balance': 1, # 借币账户详情
'points/actions': 1,
'points/orders': 1,
'subuser/aggregate-balance': 10,
'stable-coin/exchange_rate': 1,
'stable-coin/quote': 1,
},
'post': {
'account/transfer': 1, # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer': 1,
'order/batch-orders': 0.4,
'order/orders/place': 0.2, # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder': 0.2,
'order/orders/batchCancelOpenOrders': 0.4,
# 'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
# 'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel': 0.2, # 申请撤销一个订单请求
'order/orders/batchcancel': 0.4, # 批量撤销订单
# 'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create': 1, # 申请提现虚拟币
# 'dw/withdraw-virtual/create', # 申请提现虚拟币
# 'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel': 1, # 申请取消提现虚拟币
'dw/transfer-in/margin': 10, # 现货账户划入至借贷账户
'dw/transfer-out/margin': 10, # 借贷账户划出至现货账户
'margin/orders': 10, # 申请借贷
'margin/orders/{id}/repay': 10, # 归还借贷
'cross-margin/transfer-in': 1, # 资产划转
'cross-margin/transfer-out': 1, # 资产划转
'cross-margin/orders': 1, # 申请借币
'cross-margin/orders/{id}/repay': 1, # 归还借币
'stable-coin/exchange': 1,
'subuser/transfer': 10,
},
},
# ------------------------------------------------------------
# new api definitions
# 'https://status.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-dm.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-swap.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-linear-swap.huobigroup.com/api/v2/summary.json': 1,
'spot': {
'public': {
'get': {
'v2/market-status': 1,
'v1/common/symbols': 1,
'v1/common/currencys': 1,
'v2/reference/currencies': 1,
'v1/common/timestamp': 1,
'v1/common/exchange': 1, # order limits
# Market Data
'market/history/kline': 1,
'market/detail/merged': 1,
'market/tickers': 1,
'market/depth': 1,
'market/trade': 1,
'market/history/trade': 1,
'market/detail/': 1,
'market/etp': 1,
# ETP
'v2/etp/reference': 1,
'v2/etp/rebalance': 1,
},
},
'private': {
'get': {
# Account
'v1/account/accounts': 0.2,
'v1/account/accounts/{account-id}/balance': 0.2,
'v2/account/valuation': 1,
'v2/account/asset-valuation': 0.2,
'v1/account/history': 4,
'v2/account/ledger': 1,
'v2/point/account': 5,
# Wallet(Deposit and Withdraw)
'v2/account/deposit/address': 1,
'v2/account/withdraw/quota': 1,
'v2/account/withdraw/address': 1,
'v2/reference/currencies': 1,
'v1/query/deposit-withdraw': 1,
# Sub user management
'v2/user/api-key': 1,
'v2/user/uid': 1,
'v2/sub-user/user-list': 1,
'v2/sub-user/user-state': 1,
'v2/sub-user/account-list': 1,
'v2/sub-user/deposit-address': 1,
'v2/sub-user/query-deposit': 1,
'v1/subuser/aggregate-balance': 10,
'v1/account/accounts/{sub-uid}': 1,
# Trading
'v1/order/openOrders': 0.4,
'v1/order/orders/{order-id}': 0.4,
'v1/order/orders/getClientOrder': 0.4,
'v1/order/orders/{order-id}/matchresults': 0.4,
'v1/order/orders': 0.4,
'v1/order/history': 1,
'v1/order/matchresults': 1,
'v2/reference/transact-fee-rate': 1,
# Conditional Order
'v2/algo-orders/opening': 1,
'v2/algo-orders/history': 1,
'v2/algo-orders/specific': 1,
# Margin Loan(Cross/Isolated)
'v1/margin/loan-info': 1,
'v1/margin/loan-orders': 0.2,
'v1/margin/accounts/balance': 0.2,
'v1/cross-margin/loan-info': 1,
'v1/cross-margin/loan-orders': 1,
'v1/cross-margin/accounts/balance': 1,
'v2/account/repayment': 5,
# Stable Coin Exchange
'v1/stable-coin/quote': 1,
# ETP
'v2/etp/transactions': 5,
'v2/etp/transaction': 5,
'v2/etp/limit': 1,
},
'post': {
# Account
'v1/account/transfer': 1,
'v1/futures/transfer': 1, # future transfers
'v2/point/transfer': 5,
'v2/account/transfer': 1, # swap transfers
# Wallet(Deposit and Withdraw)
'v1/dw/withdraw/api/create': 1,
'v1/dw/withdraw-virtual/{withdraw-id}/cancel': 1,
# Sub user management
'v2/sub-user/deduct-mode': 1,
'v2/sub-user/creation': 1,
'v2/sub-user/management': 1,
'v2/sub-user/tradable-market': 1,
'v2/sub-user/transferability': 1,
'v2/sub-user/api-key-generation': 1,
'v2/sub-user/api-key-modification': 1,
'v2/sub-user/api-key-deletion': 1,
'v1/subuser/transfer': 10,
# Trading
'v1/order/orders/place': 0.2,
'v1/order/batch-orders': 0.4,
'v1/order/orders/{order-id}/submitcancel': 0.2,
'v1/order/orders/submitCancelClientOrder': 0.2,
'v1/order/orders/batchCancelOpenOrders': 0.4,
'v1/order/orders/batchcancel': 0.4,
'v2/algo-orders/cancel-all-after': 1,
# Conditional Order
'v2/algo-orders': 1,
'v2/algo-orders/cancellation': 1,
# Margin Loan(Cross/Isolated)
'v2/account/repayment': 5,
'v1/dw/transfer-in/margin': 10,
'v1/dw/transfer-out/margin': 10,
'v1/margin/orders': 10,
'v1/margin/orders/{order-id}/repay': 10,
'v1/cross-margin/transfer-in': 1,
'v1/cross-margin/transfer-out': 1,
'v1/cross-margin/orders': 1,
'v1/cross-margin/orders/{order-id}/repay': 1,
# Stable Coin Exchange
'v1/stable-coin/exchange': 1,
# ETP
'v2/etp/creation': 5,
'v2/etp/redemption': 5,
'v2/etp/{transactId}/cancel': 10,
'v2/etp/batch-cancel': 50,
},
},
},
'contract': {
'public': {
'get': {
'api/v1/timestamp': 1,
# Future Market Data interface
'api/v1/contract_contract_info': 1,
'api/v1/contract_index': 1,
'api/v1/contract_price_limit': 1,
'api/v1/contract_open_interest': 1,
'api/v1/contract_delivery_price': 1,
'market/depth': 1,
'market/bbo': 1,
'market/history/kline': 1,
'index/market/history/mark_price_kline': 1,
'market/detail/merged': 1,
'market/detail/batch_merged': 1,
'market/trade': 1,
'market/history/trade': 1,
'api/v1/contract_risk_info': 1,
'api/v1/contract_insurance_fund': 1,
'api/v1/contract_adjustfactor': 1,
'api/v1/contract_his_open_interest': 1,
'api/v1/contract_ladder_margin': 1,
'api/v1/contract_api_state': 1,
'api/v1/contract_elite_account_ratio': 1,
'api/v1/contract_elite_position_ratio': 1,
'api/v1/contract_liquidation_orders': 1,
'api/v1/contract_settlement_records': 1,
'index/market/history/index': 1,
'index/market/history/basis': 1,
'api/v1/contract_estimated_settlement_price': 1,
# Swap Market Data interface
'swap-api/v1/swap_contract_info': 1,
'swap-api/v1/swap_index': 1,
'swap-api/v1/swap_price_limit': 1,
'swap-api/v1/swap_open_interest': 1,
'swap-ex/market/depth': 1,
'swap-ex/market/bbo': 1,
'swap-ex/market/history/kline': 1,
'index/market/history/swap_mark_price_kline': 1,
'swap-ex/market/detail/merged': 1,
'swap-ex/market/detail/batch_merged': 1,
'swap-ex/market/trade': 1,
'swap-ex/market/history/trade': 1,
'swap-api/v1/swap_risk_info': 1,
'swap-api/v1/swap_insurance_fund': 1,
'swap-api/v1/swap_adjustfactor': 1,
'swap-api/v1/swap_his_open_interest': 1,
'swap-api/v1/swap_ladder_margin': 1,
'swap-api/v1/swap_api_state': 1,
'swap-api/v1/swap_elite_account_ratio': 1,
'swap-api/v1/swap_elite_position_ratio': 1,
'swap-api/v1/swap_estimated_settlement_price': 1,
'swap-api/v1/swap_liquidation_orders': 1,
'swap-api/v1/swap_settlement_records': 1,
'swap-api/v1/swap_funding_rate': 1,
'swap-api/v1/swap_batch_funding_rate': 1,
'swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/swap_premium_index_kline': 1,
'index/market/history/swap_estimated_rate_kline': 1,
'index/market/history/swap_basis': 1,
# Swap Market Data interface
'linear-swap-api/v1/swap_contract_info': 1,
'linear-swap-api/v1/swap_index': 1,
'linear-swap-api/v1/swap_price_limit': 1,
'linear-swap-api/v1/swap_open_interest': 1,
'linear-swap-ex/market/depth': 1,
'linear-swap-ex/market/bbo': 1,
'linear-swap-ex/market/history/kline': 1,
'index/market/history/linear_swap_mark_price_kline': 1,
'linear-swap-ex/market/detail/merged': 1,
'linear-swap-ex/market/detail/batch_merged': 1,
'linear-swap-ex/market/trade': 1,
'linear-swap-ex/market/history/trade': 1,
'linear-swap-api/v1/swap_risk_info': 1,
'swap-api/v1/linear-swap-api/v1/swap_insurance_fund': 1,
'linear-swap-api/v1/swap_adjustfactor': 1,
'linear-swap-api/v1/swap_cross_adjustfactor': 1,
'linear-swap-api/v1/swap_his_open_interest': 1,
'linear-swap-api/v1/swap_ladder_margin': 1,
'linear-swap-api/v1/swap_cross_ladder_margin': 1,
'linear-swap-api/v1/swap_api_state': 1,
'linear-swap-api/v1/swap_cross_transfer_state': 1,
'linear-swap-api/v1/swap_cross_trade_state': 1,
'linear-swap-api/v1/swap_elite_account_ratio': 1,
'linear-swap-api/v1/swap_elite_position_ratio': 1,
'linear-swap-api/v1/swap_liquidation_orders': 1,
'linear-swap-api/v1/swap_settlement_records': 1,
'linear-swap-api/v1/swap_funding_rate': 1,
'linear-swap-api/v1/swap_batch_funding_rate': 1,
'linear-swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/linear_swap_premium_index_kline': 1,
'index/market/history/linear_swap_estimated_rate_kline': 1,
'index/market/history/linear_swap_basis': 1,
'linear-swap-api/v1/swap_estimated_settlement_price': 1,
},
},
'private': {
'get': {
# Future Account Interface
'api/v1/contract_api_trading_status': 1,
# Swap Account Interface
'swap-api/v1/swap_api_trading_status': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_api_trading_status': 1,
},
'post': {
# Future Account Interface
'api/v1/contract_balance_valuation': 1,
'api/v1/contract_account_info': 1,
'api/v1/contract_position_info': 1,
'api/v1/contract_sub_auth': 1,
'api/v1/contract_sub_account_list': 1,
'api/v1/contract_sub_account_info_list': 1,
'api/v1/contract_sub_account_info': 1,
'api/v1/contract_sub_position_info': 1,
'api/v1/contract_financial_record': 1,
'api/v1/contract_financial_record_exact': 1,
'api/v1/contract_user_settlement_records': 1,
'api/v1/contract_order_limit': 1,
'api/v1/contract_fee': 1,
'api/v1/contract_transfer_limit': 1,
'api/v1/contract_position_limit': 1,
'api/v1/contract_account_position_info': 1,
'api/v1/contract_master_sub_transfer': 1,
'api/v1/contract_master_sub_transfer_record': 1,
'api/v1/contract_available_level_rate': 1,
# Future Trade Interface
'api/v1/contract_order': 1,
'v1/contract_batchorder': 1,
'api/v1/contract_cancel': 1,
'api/v1/contract_cancelall': 1,
'api/v1/contract_switch_lever_rate': 1,
'api/v1/lightning_close_position': 1,
'api/v1/contract_order_info': 1,
'api/v1/contract_order_detail': 1,
'api/v1/contract_openorders': 1,
'api/v1/contract_hisorders': 1,
'api/v1/contract_hisorders_exact': 1,
'api/v1/contract_matchresults': 1,
'api/v1/contract_matchresults_exact': 1,
# Contract Strategy Order Interface
'api/v1/contract_trigger_order': 1,
'api/v1/contract_trigger_cancel': 1,
'api/v1/contract_trigger_cancelall': 1,
'api/v1/contract_trigger_openorders': 1,
'api/v1/contract_trigger_hisorders': 1,
'api/v1/contract_tpsl_order': 1,
'api/v1/contract_tpsl_cancel': 1,
'api/v1/contract_tpsl_cancelall': 1,
'api/v1/contract_tpsl_openorders': 1,
'api/v1/contract_tpsl_hisorders': 1,
'api/v1/contract_relation_tpsl_order': 1,
'api/v1/contract_track_order': 1,
'api/v1/contract_track_cancel': 1,
'api/v1/contract_track_cancelall': 1,
'api/v1/contract_track_openorders': 1,
'api/v1/contract_track_hisorders': 1,
# Swap Account Interface
'swap-api/v1/swap_balance_valuation': 1,
'swap-api/v1/swap_account_info': 1,
'swap-api/v1/swap_position_info': 1,
'swap-api/v1/swap_account_position_info': 1,
'swap-api/v1/swap_sub_auth': 1,
'swap-api/v1/swap_sub_account_list': 1,
'swap-api/v1/swap_sub_account_info_list': 1,
'swap-api/v1/swap_sub_account_info': 1,
'swap-api/v1/swap_sub_position_info': 1,
'swap-api/v1/swap_financial_record': 1,
'swap-api/v1/swap_financial_record_exact': 1,
'swap-api/v1/swap_user_settlement_records': 1,
'swap-api/v1/swap_available_level_rate': 1,
'swap-api/v1/swap_order_limit': 1,
'swap-api/v1/swap_fee': 1,
'swap-api/v1/swap_transfer_limit': 1,
'swap-api/v1/swap_position_limit': 1,
'swap-api/v1/swap_master_sub_transfer': 1,
'swap-api/v1/swap_master_sub_transfer_record': 1,
# Swap Trade Interface
'swap-api/v1/swap_order': 1,
'swap-api/v1/swap_batchorder': 1,
'swap-api/v1/swap_cancel': 1,
'swap-api/v1/swap_cancelall': 1,
'swap-api/v1/swap_lightning_close_position': 1,
'swap-api/v1/swap_switch_lever_rate': 1,
'swap-api/v1/swap_order_info': 1,
'swap-api/v1/swap_order_detail': 1,
'swap-api/v1/swap_openorders': 1,
'swap-api/v1/swap_hisorders': 1,
'swap-api/v1/swap_hisorders_exact': 1,
'swap-api/v1/swap_matchresults': 1,
'swap-api/v1/swap_matchresults_exact': 1,
# Swap Strategy Order Interface
'swap-api/v1/swap_trigger_order': 1,
'swap-api/v1/swap_trigger_cancel': 1,
'swap-api/v1/swap_trigger_cancelall': 1,
'swap-api/v1/swap_trigger_openorders': 1,
'swap-api/v1/swap_trigger_hisorders': 1,
'swap-api/v1/swap_tpsl_order': 1,
'swap-api/v1/swap_tpsl_cancel': 1,
'swap-api/v1/swap_tpsl_cancelall': 1,
'swap-api/v1/swap_tpsl_openorders': 1,
'swap-api/v1/swap_tpsl_hisorders': 1,
'swap-api/v1/swap_relation_tpsl_order': 1,
'swap-api/v1/swap_track_order': 1,
'swap-api/v1/swap_track_cancel': 1,
'swap-api/v1/swap_track_cancelall': 1,
'swap-api/v1/swap_track_openorders': 1,
'swap-api/v1/swap_track_hisorders': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_balance_valuation': 1,
'linear-swap-api/v1/swap_account_info': 1,
'linear-swap-api/v1/swap_cross_account_info': 1,
'linear-swap-api/v1/swap_position_info': 1,
'linear-swap-api/v1/swap_cross_position_info': 1,
'linear-swap-api/v1/swap_account_position_info': 1,
'linear-swap-api/v1/swap_cross_account_position_info': 1,
'linear-swap-api/v1/swap_sub_auth': 1,
'linear-swap-api/v1/swap_sub_account_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_list': 1,
'linear-swap-api/v1/swap_sub_account_info_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_info_list': 1,
'linear-swap-api/v1/swap_sub_account_info': 1,
'linear-swap-api/v1/swap_cross_sub_account_info': 1,
'linear-swap-api/v1/swap_sub_position_info': 1,
'linear-swap-api/v1/swap_cross_sub_position_info': 1,
'linear-swap-api/v1/swap_financial_record': 1,
'linear-swap-api/v1/swap_financial_record_exact': 1,
'linear-swap-api/v1/swap_user_settlement_records': 1,
'linear-swap-api/v1/swap_cross_user_settlement_records': 1,
'linear-swap-api/v1/swap_available_level_rate': 1,
'linear-swap-api/v1/swap_cross_available_level_rate': 1,
'linear-swap-api/v1/swap_order_limit': 1,
'linear-swap-api/v1/swap_fee': 1,
'linear-swap-api/v1/swap_transfer_limit': 1,
'linear-swap-api/v1/swap_cross_transfer_limit': 1,
'linear-swap-api/v1/swap_position_limit': 1,
'linear-swap-api/v1/swap_cross_position_limit': 1,
'linear-swap-api/v1/swap_master_sub_transfer': 1,
'linear-swap-api/v1/swap_master_sub_transfer_record': 1,
'linear-swap-api/v1/swap_transfer_inner': 1,
# Swap Trade Interface
'linear-swap-api/v1/swap_order': 1,
'linear-swap-api/v1/swap_cross_order': 1,
'linear-swap-api/v1/swap_batchorder': 1,
'linear-swap-api/v1/swap_cross_batchorder': 1,
'linear-swap-api/v1/swap_cancel': 1,
'linear-swap-api/v1/swap_cross_cancel': 1,
'linear-swap-api/v1/swap_cancelall': 1,
'linear-swap-api/v1/swap_cross_cancelall': 1,
'linear-swap-api/v1/swap_switch_lever_rate': 1,
'linear-swap-api/v1/swap_cross_switch_lever_rate': 1,
'linear-swap-api/v1/swap_lightning_close_position': 1,
'linear-swap-api/v1/swap_cross_lightning_close_position': 1,
'linear-swap-api/v1/swap_order_info': 1,
'linear-swap-api/v1/swap_cross_order_info': 1,
'linear-swap-api/v1/swap_order_detail': 1,
'linear-swap-api/v1/swap_cross_order_detail': 1,
'linear-swap-api/v1/swap_openorders': 1,
'linear-swap-api/v1/swap_cross_openorders': 1,
'linear-swap-api/v1/swap_hisorders': 1,
'linear-swap-api/v1/swap_cross_hisorders': 1,
'linear-swap-api/v1/swap_hisorders_exact': 1,
'linear-swap-api/v1/swap_cross_hisorders_exact': 1,
'linear-swap-api/v1/swap_matchresults': 1,
'linear-swap-api/v1/swap_cross_matchresults': 1,
'linear-swap-api/v1/swap_matchresults_exact': 1,
'linear-swap-api/v1/swap_cross_matchresults_exact': 1,
# Swap Strategy Order Interface
'linear-swap-api/v1/swap_trigger_order': 1,
'linear-swap-api/v1/swap_cross_trigger_order': 1,
'linear-swap-api/v1/swap_trigger_cancel': 1,
'linear-swap-api/v1/swap_cross_trigger_cancel': 1,
'linear-swap-api/v1/swap_trigger_cancelall': 1,
'linear-swap-api/v1/swap_cross_trigger_cancelall': 1,
'linear-swap-api/v1/swap_trigger_openorders': 1,
'linear-swap-api/v1/swap_cross_trigger_openorders': 1,
'linear-swap-api/v1/swap_trigger_hisorders': 1,
'linear-swap-api/v1/swap_cross_trigger_hisorders': 1,
'linear-swap-api/v1/swap_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_tpsl_order': 1,
'linear-swap-api/v1/swap_tpsl_cancel': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancel': 1,
'linear-swap-api/v1/swap_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_tpsl_openorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_openorders': 1,
'linear-swap-api/v1/swap_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_track_order': 1,
'linear-swap-api/v1/swap_cross_track_order': 1,
'linear-swap-api/v1/swap_track_cancel': 1,
'linear-swap-api/v1/swap_cross_track_cancel': 1,
'linear-swap-api/v1/swap_track_cancelall': 1,
'linear-swap-api/v1/swap_cross_track_cancelall': 1,
'linear-swap-api/v1/swap_track_openorders': 1,
'linear-swap-api/v1/swap_cross_track_openorders': 1,
'linear-swap-api/v1/swap_track_hisorders': 1,
'linear-swap-api/v1/swap_cross_track_hisorders': 1,
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'bad-request': BadRequest,
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
'require-symbol': BadSymbol, # {"status":"error","err-code":"require-symbol","err-msg":"Parameter `symbol` is required.","data":null}
},
},
'precisionMode': TICK_SIZE,
'options': {
'defaultType': 'spot', # spot, future, swap
'defaultSubType': 'inverse', # inverse, linear
'defaultNetwork': 'ERC20',
'networks': {
'ETH': 'erc20',
'TRX': 'trc20',
'HRC20': 'hrc20',
'HECO': 'hrc20',
'HT': 'hrc20',
'ALGO': 'algo',
'OMNI': '',
},
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'spot_private_get_v1_order_orders', # 'spot_private_get_v1_order_history' # https://github.com/ccxt/ccxt/pull/5392
'fetchOpenOrdersMethod': 'fetch_open_orders_v1', # 'fetch_open_orders_v2' # https://github.com/ccxt/ccxt/issues/5388
'createMarketBuyOrderRequiresPrice': True,
'language': 'en-US',
'broker': {
'id': 'AA03022abc',
},
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
async def fetch_time(self, params={}):
options = self.safe_value(self.options, 'fetchTime', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
method = 'spotPublicGetV1CommonTimestamp'
if (type == 'future') or (type == 'swap'):
method = 'contractPublicGetApiV1Timestamp'
response = await getattr(self, method)(params)
#
# spot
#
# {"status":"ok","data":1637504261099}
#
# future, swap
#
# {"status":"ok","ts":1637504164707}
#
return self.safe_integer_2(response, 'data', 'ts')
async def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
await self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = await self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
async def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = await self.spotPublicGetV1CommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
async def fetch_markets(self, params={}):
options = self.safe_value(self.options, 'fetchMarkets', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
if (type != 'spot') and (type != 'future') and (type != 'swap'):
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to 'spot', 'future', 'swap'") # eslint-disable-line quotes
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
if (subType != 'inverse') and (subType != 'linear'):
raise ExchangeError(self.id + " does not support '" + subType + "' type, set exchange.options['defaultSubType'] to 'inverse' or 'linear'") # eslint-disable-line quotes
method = 'spotPublicGetV1CommonSymbols'
query = self.omit(params, ['type', 'subType'])
spot = (type == 'spot')
contract = (type != 'spot')
future = (type == 'future')
swap = (type == 'swap')
linear = None
inverse = None
if contract:
linear = (subType == 'linear')
inverse = (subType == 'inverse') or future
if future:
method = 'contractPublicGetApiV1ContractContractInfo'
elif swap:
if inverse:
method = 'contractPublicGetSwapApiV1SwapContractInfo'
elif linear:
method = 'contractPublicGetLinearSwapApiV1SwapContractInfo'
response = await getattr(self, method)(query)
#
# spot
#
# {
# "status":"ok",
# "data":[
# {
# "base-currency":"xrp3s",
# "quote-currency":"usdt",
# "price-precision":4,
# "amount-precision":4,
# "symbol-partition":"innovation",
# "symbol":"xrp3susdt",
# "state":"online",
# "value-precision":8,
# "min-order-amt":0.01,
# "max-order-amt":1616.4353,
# "min-order-value":5,
# "limit-order-min-order-amt":0.01,
# "limit-order-max-order-amt":1616.4353,
# "limit-order-max-buy-amt":1616.4353,
# "limit-order-max-sell-amt":1616.4353,
# "sell-market-min-order-amt":0.01,
# "sell-market-max-order-amt":1616.4353,
# "buy-market-max-order-value":2500,
# "max-order-value":2500,
# "underlying":"xrpusdt",
# "mgmt-fee-rate":0.035000000000000000,
# "charge-time":"23:55:00",
# "rebal-time":"00:00:00",
# "rebal-threshold":-5,
# "init-nav":10.000000000000000000,
# "api-trading":"enabled",
# "tags":"etp,nav,holdinglimit"
# },
# ]
# }
#
# future
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC211126",
# "contract_type":"self_week",
# "contract_size":100.000000000000000000,
# "price_tick":0.010000000000000000,
# "delivery_date":"20211126",
# "delivery_time":"1637913600000",
# "create_date":"20211112",
# "contract_status":1,
# "settlement_time":"1637481600000"
# },
# ],
# "ts":1637474595140
# }
#
# swaps
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "contract_size":0.001000000000000000,
# "price_tick":0.100000000000000000,
# "delivery_time":"",
# "create_date":"20201021",
# "contract_status":1,
# "settlement_date":"1637481600000",
# "support_margin_mode":"all", # isolated
# },
# ],
# "ts":1637474774467
# }
#
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' fetchMarkets() returned an empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = None
quoteId = None
settleId = None
id = None
if contract:
id = self.safe_string(market, 'contract_code')
if swap:
parts = id.split('-')
baseId = self.safe_string(market, 'symbol')
quoteId = self.safe_string(parts, 1)
settleId = baseId if inverse else quoteId
elif future:
baseId = self.safe_string(market, 'symbol')
quoteId = 'USD'
settleId = baseId
else:
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
expiry = None
if contract:
if inverse:
symbol += ':' + base
elif linear:
symbol += ':' + quote
if future:
expiry = self.safe_integer(market, 'delivery_time')
symbol += '-' + self.yymmdd(expiry)
contractSize = self.safe_number(market, 'contract_size')
pricePrecision = None
amountPrecision = None
costPrecision = None
if spot:
pricePrecision = self.safe_string(market, 'price-precision')
pricePrecision = self.parse_number('1e-' + pricePrecision)
amountPrecision = self.safe_string(market, 'amount-precision')
amountPrecision = self.parse_number('1e-' + amountPrecision)
costPrecision = self.safe_string(market, 'value-precision')
costPrecision = self.parse_number('1e-' + costPrecision)
else:
pricePrecision = self.safe_number(market, 'price_tick')
amountPrecision = 1
precision = {
'amount': amountPrecision,
'price': pricePrecision,
'cost': costPrecision,
}
maker = None
taker = None
if spot:
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_number(market, 'min-order-amt', math.pow(10, -precision['amount']))
maxAmount = self.safe_number(market, 'max-order-amt')
minCost = self.safe_number(market, 'min-order-value', 0)
active = None
if spot:
state = self.safe_string(market, 'state')
active = (state == 'online')
elif contract:
contractStatus = self.safe_integer(market, 'contract_status')
active = (contractStatus == 1)
# 0 Delisting
# 1 Listing
# 2 Pending Listing
# 3 Suspension
# 4 Suspending of Listing
# 5 In Settlement
# 6 Delivering
# 7 Settlement Completed
# 8 Delivered
# 9 Suspending of Trade
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'contract': contract,
'spot': spot,
'future': future,
'swap': swap,
'linear': linear,
'inverse': inverse,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'contractSize': contractSize,
'active': active,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
'leverage': {
'max': self.safe_number(market, 'leverage-ratio', 1),
'superMax': self.safe_number(market, 'super-margin-leverage-ratio', 1),
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
#
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
marketId = self.safe_string_2(ticker, 'symbol', 'contract_code')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_number(ticker['bid'], 0)
bidVolume = self.safe_number(ticker['bid'], 1)
else:
bid = self.safe_number(ticker, 'bid')
bidVolume = self.safe_value(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_number(ticker['ask'], 0)
askVolume = self.safe_number(ticker['ask'], 1)
else:
ask = self.safe_number(ticker, 'ask')
askVolume = self.safe_value(ticker, 'askSize')
open = self.safe_number(ticker, 'open')
close = self.safe_number(ticker, 'close')
baseVolume = self.safe_number(ticker, 'amount')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
fieldName = 'symbol'
method = 'spotPublicGetMarketDetailMerged'
if market['future']:
method = 'contractPublicGetMarketDetailMerged'
elif market['swap']:
if market['inverse']:
method = 'contractPublicGetSwapExMarketDetailMerged'
elif market['linear']:
method = 'contractPublicGetLinearSwapExMarketDetailMerged'
fieldName = 'contract_code'
request[fieldName] = market['id']
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
# future, swap
#
# {
# "ch":"market.BTC211126.detail.merged",
# "status":"ok",
# "tick":{
# "amount":"669.3385682049668320322569544150680718474",
# "ask":[59117.44,48],
# "bid":[59082,48],
# "close":"59087.97",
# "count":5947,
# "high":"59892.62",
# "id":1637502670,
# "low":"57402.87",
# "open":"57638",
# "ts":1637502670059,
# "vol":"394598"
# },
# "ts":1637502670059
# }
#
tick = self.safe_value(response, 'tick', {})
ticker = self.parse_ticker(tick, market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
async def fetch_tickers(self, symbols=None, params={}):
options = self.safe_value(self.options, 'fetchTickers', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
method = 'spotPublicGetMarketTickers'
query = self.omit(params, ['type', 'subType'])
if type == 'future':
method = 'contractPublicGetMarketDetailBatchMerged'
elif type == 'swap':
if subType == 'inverse':
method = 'contractPublicGetSwapExMarketDetailBatchMerged'
elif subType == 'linear':
method = 'contractPublicGetLinearSwapExMarketDetailBatchMerged'
response = await getattr(self, method)(query)
#
# future
#
# {
# "status":"ok",
# "ticks":[
# {
# "id":1637504679,
# "ts":1637504679372,
# "ask":[0.10644,100],
# "bid":[0.10624,26],
# "symbol":"TRX_CW",
# "open":"0.10233",
# "close":"0.10644",
# "low":"0.1017",
# "high":"0.10725",
# "amount":"2340267.415144052378486261756692535687481566",
# "count":882,
# "vol":"24706"
# }
# ],
# "ts":1637504679376
# }
#
tickers = self.safe_value_2(response, 'data', 'ticks', [])
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': 'step0',
}
response = await self.spotPublicGetMarketDepth(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ts":1583474832008,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# },
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string(trade, 'role')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
fee = None
feeCost = self.safe_number(trade, 'filled-fees')
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-currency'))
filledPoints = self.safe_number(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string(trade, 'id', tradeId)
return {
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'order-id': id,
}
response = await self.spotPrivateGetV1OrderOrdersOrderIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # 1-100 orders, default is 100
if since is not None:
request['start-time'] = since # a date within 120 days from today
# request['end-time'] = self.sum(since, 172800000) # 48 hours window
response = await self.spotPrivateGetV1OrderMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = await self.spotPublicGetMarketHistoryTrade(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = await self.spotPublicGetMarketHistoryKline(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
async def fetch_accounts(self, params={}):
await self.load_markets()
response = await self.spotPrivateGetV1AccountAccounts(params)
return response['data']
async def fetch_currencies(self, params={}):
response = await self.spotPublicGetV2ReferenceCurrencies()
# {
# "code": 200,
# "data": [
# {
# "currency": "sxp",
# "assetType": "1",
# "chains": [
# {
# "chain": "sxp",
# "displayName": "ERC20",
# "baseChain": "ETH",
# "baseChainProtocol": "ERC20",
# "isDynamic": True,
# "numOfConfirmations": "12",
# "numOfFastConfirmations": "12",
# "depositStatus": "allowed",
# "minDepositAmt": "0.23",
# "withdrawStatus": "allowed",
# "minWithdrawAmt": "0.23",
# "withdrawPrecision": "8",
# "maxWithdrawAmt": "227000.000000000000000000",
# "withdrawQuotaPerDay": "227000.000000000000000000",
# "withdrawQuotaPerYear": null,
# "withdrawQuotaTotal": null,
# "withdrawFeeType": "fixed",
# "transactFeeWithdraw": "11.1653",
# "addrWithTag": False,
# "addrDepositTag": False
# }
# ],
# "instStatus": "normal"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(data)):
entry = data[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
chains = self.safe_value(entry, 'chains', [])
networks = {}
instStatus = self.safe_string(entry, 'instStatus')
currencyActive = instStatus == 'normal'
fee = None
minPrecision = None
minWithdraw = None
maxWithdraw = None
for j in range(0, len(chains)):
chain = chains[j]
networkId = self.safe_string(chain, 'chain')
baseChainProtocol = self.safe_string(chain, 'baseChainProtocol')
huobiToken = 'h' + currencyId
if baseChainProtocol is None:
if huobiToken == networkId:
baseChainProtocol = 'ERC20'
else:
baseChainProtocol = self.safe_string(chain, 'displayName')
network = self.safe_network(baseChainProtocol)
minWithdraw = self.safe_number(chain, 'minWithdrawAmt')
maxWithdraw = self.safe_number(chain, 'maxWithdrawAmt')
withdraw = self.safe_string(chain, 'withdrawStatus')
deposit = self.safe_string(chain, 'depositStatus')
active = (withdraw == 'allowed') and (deposit == 'allowed')
precision = self.safe_string(chain, 'withdrawPrecision')
if precision is not None:
precision = self.parse_number('1e-' + precision)
minPrecision = precision if (minPrecision is None) else max(precision, minPrecision)
fee = self.safe_number(chain, 'transactFeeWithdraw')
networks[network] = {
'info': chain,
'id': networkId,
'network': network,
'limits': {
'withdraw': {
'min': minWithdraw,
'max': maxWithdraw,
},
},
'active': active,
'fee': fee,
'precision': precision,
}
networksKeys = list(networks.keys())
networkLength = len(networksKeys)
result[code] = {
'info': entry,
'code': code,
'id': currencyId,
'active': currencyActive,
'fee': fee if (networkLength <= 1) else None,
'name': None,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': minWithdraw if (networkLength <= 1) else None,
'max': maxWithdraw if (networkLength <= 1) else None,
},
},
'precision': minPrecision,
'networks': networks,
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
request = {
'account-id': self.accounts[0]['id'],
}
response = await self.spotPrivateGetV1AccountAccountsAccountIdBalance(self.extend(request, params))
balances = self.safe_value(response['data'], 'list', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
return self.parse_balance(result)
async def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'spot_private_get_v1_order_orders')
response = await getattr(self, method)(self.extend(request, params))
#
# {status: "ok",
# data: [{ id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 } ]}
#
return self.parse_orders(response['data'], market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order-id': id,
}
response = await self.spotPrivateGetV1OrderOrdersOrderId(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOpenOrdersMethod', 'fetch_open_orders_v1')
return await getattr(self, method)(symbol, since, limit, params)
async def fetch_open_orders_v1(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrdersV1() requires a symbol argument')
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders_v2(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
await self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request['account-id'] = accountId
if limit is not None:
request['size'] = limit
omitted = self.omit(params, 'account-id')
response = await self.spotPrivateGetV1OrderOpenOrders(self.extend(request, omitted))
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# { id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
# { id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(order, 'created-at')
clientOrderId = self.safe_string(order, 'client-order-id')
amount = self.safe_string(order, 'amount')
filled = self.safe_string_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
price = self.safe_string(order, 'price')
cost = self.safe_string_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
feeCost = self.safe_number_2(order, 'filled-fees', 'field-fees') # typo in their API, filled fees
fee = None
if feeCost is not None:
feeCurrency = None
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'symbol': market['id'],
'type': side + '-' + type,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id'])
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
if type == 'limit' or type == 'ioc' or type == 'limit-maker' or type == 'stop-limit' or type == 'stop-limit-fok':
request['price'] = self.price_to_precision(symbol, price)
response = await self.spotPrivatePostV1OrderOrdersPlace(self.extend(request, params))
timestamp = self.milliseconds()
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
async def cancel_order(self, id, symbol=None, params={}):
clientOrderId = self.safe_string_2(params, 'client-order-id', 'clientOrderId')
request = {}
method = 'spotPrivatePostV1OrderOrdersOrderIdSubmitcancel'
if clientOrderId is None:
request['order-id'] = id
else:
request['client-order-id'] = clientOrderId
method = 'spotPrivatePostV1OrderOrdersSubmitCancelClientOrder'
params = self.omit(params, ['client-order-id', 'clientOrderId'])
response = await getattr(self, method)(self.extend(request, params))
#
# {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
clientOrderIds = self.safe_value_2(params, 'clientOrderIds', 'client-order-ids')
params = self.omit(params, ['clientOrderIds', 'client-order-ids'])
request = {}
if clientOrderIds is None:
request['order-ids'] = ids
else:
request['client-order-ids'] = clientOrderIds
response = await self.spotPrivatePostV1OrderOrdersBatchcancel(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
return response
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {
# 'account-id' string False NA The account id used for self cancel Refer to GET /v1/account/accounts
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.spotPrivatePostV1OrderOrdersBatchCancelOpenOrders(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def safe_network(self, networkId):
lastCharacterIndex = len(networkId) - 1
lastCharacter = networkId[lastCharacterIndex]
if lastCharacter == '1':
networkId = networkId[0:lastCharacterIndex]
networksById = {}
return self.safe_string(networksById, networkId, networkId)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "usdt",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "usdterc20", # trc20usdt, hrc20usdt, usdt, algousdt
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
if tag == '':
tag = None
currencyId = self.safe_string(depositAddress, 'currency')
currency = self.safe_currency(currencyId, currency)
code = self.safe_currency_code(currencyId, currency)
networkId = self.safe_string(depositAddress, 'chain')
networks = self.safe_value(currency, 'networks', {})
networksById = self.index_by(networks, 'id')
networkValue = self.safe_value(networksById, networkId, networkId)
network = self.safe_string(networkValue, 'network')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'info': depositAddress,
}
async def fetch_deposit_addresses_by_network(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.spotPrivateGetV2AccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
parsed = self.parse_deposit_addresses(data, [code], False)
return self.index_by(parsed, 'network')
async def fetch_deposit_address(self, code, params={}):
rawNetwork = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(networks, rawNetwork, rawNetwork)
params = self.omit(params, 'network')
response = await self.fetch_deposit_addresses_by_network(code, params)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find deposit address for ' + code)
return result
result = self.safe_value(response, network)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find ' + network + ' deposit address for ' + code)
return result
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'),
'tag': tag,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if network == 'erc20':
request['chain'] = currency['id'] + network
else:
request['chain'] = network + currency['id']
params = self.omit(params, 'network')
response = await self.spotPrivatePostV1DwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
query = self.omit(params, self.extract_params(path))
if isinstance(api, basestring):
# signing implementation for the old endpoints
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, self.hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if query:
url += '?' + self.urlencode(query)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
else:
# signing implementation for the new endpoints
# type, access = api
type = self.safe_string(api, 0)
access = self.safe_string(api, 1)
url += self.implode_params(path, params)
hostname = self.safe_string(self.urls['hostnames'], type)
if access == 'public':
if query:
url += '?' + self.urlencode(query)
elif access == 'private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
url = self.implode_params(self.urls['api'][type], {
'hostname': hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_integer(config, 'cost', 1)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string(response, 'err-msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
| 48.655388
| 355
| 0.46927
|
1adefcc5e91479526668e6c42a9429fb644d613e
| 4,969
|
py
|
Python
|
aioyoutube/exceptions.py
|
PlasticStrawActivist/aioyoutube.py
|
08f00cc8f7921c3a2b42c80b932e39287e07ae6b
|
[
"MIT"
] | 3
|
2021-02-25T14:30:17.000Z
|
2021-10-30T03:37:21.000Z
|
aioyoutube/exceptions.py
|
PlasticStrawActivist/aioyoutube.py
|
08f00cc8f7921c3a2b42c80b932e39287e07ae6b
|
[
"MIT"
] | 2
|
2021-02-21T19:17:22.000Z
|
2021-03-11T01:01:22.000Z
|
aioyoutube/exceptions.py
|
PlasticStrawActivist/aioyoutube.py
|
08f00cc8f7921c3a2b42c80b932e39287e07ae6b
|
[
"MIT"
] | 1
|
2021-10-04T08:15:09.000Z
|
2021-10-04T08:15:09.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2021 im-mde
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import ast
from typing import Union, MutableMapping
from .http import YouTubeAPIResponse
from .valid import get_youtube_resources, get_ratings
async def is_http_exception(
status: int,
data: Union[MutableMapping, bytes]
) -> None:
if status < 200 or status >= 300:
data_ = data
if type(data_) == bytes:
data_= ast.literal_eval(data_.decode('UTF8'))
print(data_)
raise HTTPException(status, data_)
else:
return
class HTTPException(Exception):
"""
HTTP exception for the YouTube Data API.
This exception is raised if the response status is not between 200 and 299.
Parent(s):
Exception
Attribute(s):
status type(int): status status of http response
json type(MutableMapping): json of http response
"""
def __init__(self, status: int, json: MutableMapping) -> None:
self.message = 'Status {}: {}'.format(
str(status), json['error']['message'])
super().__init__(self.message)
class YouTubeAPIException(Exception):
"""
Generic exception for errors related to the YouTube Data API.
This exception occurs for API specific errors such as an invalid value
for an argument in a client coroutine.
Parent(s):
Exception
Attribute(s):
message type(str): string displaying information about the error.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class ResourceInvalidException(YouTubeAPIException):
"""
Invalid YouTube resource exception.
This exception occurs when a resource is not valid but is input into
the resource argument of a client class coroutine.
Parent(s):
YouTubeAPIException
Attribute(s):
None
"""
def __init__(self) -> None:
self.message = 'Resource argument must be one of: {}'.format(
get_youtube_resources())
super().__init__(self.message)
class RatingInvalidException(YouTubeAPIException):
"""
Invalid YouTube rating exception.
This exception occurs when a rating value is not valid but is input
into the rating argument of the rate coroutine of a client class.
Parent(s):
YouTubeAPIException
Attribute(s):
None
"""
def __init__(self) -> None:
self.message = 'Rating argument must be one of: {}'.format(
get_ratings())
super().__init__(self.message)
class NoneValueException(Exception):
"""
Generic None value exception.
This exception occurs when a required client class attribute is set
to None.
Parent(s):
Exception
Attribute(s):
None
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class YouTubeKeyNoneException(NoneValueException):
"""
YouTube key None exception.
This exception occurs when the key is set to None instead
of a string when initializing a client class.
Parent(s):
NoneValueException
Attribute(s):
None
"""
def __init__(self) -> None:
self.message = 'YouTube API key is set to "None"'
super().__init__(self.message)
class OAuthTokenNoneException(NoneValueException):
"""
Oauth token None exception.
This exception occurs when the OAuth token is set to None instead
of a string when initializing a client class.
Parent(s):
YouTubeNoneValueException
Attribute(s):
None
"""
def __init__(self) -> None:
self.message = 'OAuth token is set to "None"'
super().__init__(self.message)
| 25.880208
| 83
| 0.64842
|
74e197f9a6b768508f7c720c40294c0ae939c25b
| 3,400
|
py
|
Python
|
configs/topologies/BaseTopology.py
|
caihuayi/gem5
|
96fce476785a834f102ae69a895e661cf08e47cd
|
[
"BSD-3-Clause"
] | 16
|
2020-09-24T00:17:36.000Z
|
2021-08-12T06:11:52.000Z
|
configs/topologies/BaseTopology.py
|
caihuayi/gem5
|
96fce476785a834f102ae69a895e661cf08e47cd
|
[
"BSD-3-Clause"
] | 5
|
2021-01-27T23:09:06.000Z
|
2022-01-07T03:19:39.000Z
|
configs/topologies/BaseTopology.py
|
caihuayi/gem5
|
96fce476785a834f102ae69a895e661cf08e47cd
|
[
"BSD-3-Clause"
] | 15
|
2020-11-18T00:15:28.000Z
|
2021-12-12T03:18:34.000Z
|
# Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from __future__ import absolute_import
import m5
class BaseTopology(object):
description = "BaseTopology"
def __init__(self):
""" When overriding place any objects created in
configs/ruby/<protocol>.py that are needed in
makeTopology (below) here. The minimum is usually
all of the controllers created in the above file.
"""
def makeTopology(self, options, network, IntLink, ExtLink, Router):
""" Called from configs/ruby/Ruby.py
The return value is ( list(Router), list(IntLink), list(ExtLink))
The API of this function cannot change when subclassing!!
Any additional information needed to create this topology should
be passed into the constructor when it's instantiated in
configs/ruby/<protocol>.py
"""
m5.util.fatal("BaseTopology should have been overridden!!")
def registerTopology(self, options):
""" Called from configs/ruby/Ruby.py
There is no return value. This should only be called in
SE mode. It is used by some topology objects to populate
the faux filesystem with accurate file contents.
No need to implement if not using FilesystemRegister
functionality.
"""
class SimpleTopology(BaseTopology):
""" Provides methods needed for the topologies included in Ruby before
topology changes.
These topologies are "simple" in the sense that they only use a flat
list of controllers to construct the topology.
"""
description = "SimpleTopology"
def __init__(self, controllers):
self.nodes = controllers
def addController(self, controller):
self.nodes.append(controller)
def __len__(self):
return len(self.nodes)
| 44.155844
| 77
| 0.722941
|
90b7ee00807ee696610a121f93f81c2f352de845
| 774
|
py
|
Python
|
chat_app/main/helpers.py
|
Jaygaur99/WebChatApp
|
35f5e51de0b51a2fecf9e26d12edc1f987304458
|
[
"MIT"
] | 1
|
2021-09-20T16:44:27.000Z
|
2021-09-20T16:44:27.000Z
|
chat_app/main/helpers.py
|
Jaygaur99/WebChatApp
|
35f5e51de0b51a2fecf9e26d12edc1f987304458
|
[
"MIT"
] | 1
|
2021-10-05T15:49:09.000Z
|
2021-10-05T15:49:09.000Z
|
chat_app/main/helpers.py
|
Jaygaur99/WebChatApp
|
35f5e51de0b51a2fecf9e26d12edc1f987304458
|
[
"MIT"
] | null | null | null |
import random
from django.core.mail import send_mail
from django.conf import settings
def generate_otp():
otp = random.randint(100000, 999999)
return otp
def generate_otp_mail_fields(otp, fname):
subject = "Password Change OTP"
message = f"""Hey {fname}
We noticed that you are trying to change your password.
Your verification otp for changing password is {otp}.
Please do not disclose or share this one time password (otp) with others.
"""
return subject, message
def send_mail_helper(subject, body, mail):
send_mail(subject, body, settings.EMAIL_HOST_USER, [mail, ])
if __name__ == '__main__':
print(generate_otp())
sender, message = generate_otp_mail_fields(12456, "Jay")
send_mail_helper(sender, message, 'jaygaur99@gmail.com')
| 29.769231
| 73
| 0.74031
|
b81a284cf86b842e769ea7ce0b61fddb5b6ab183
| 7,005
|
py
|
Python
|
sites/google_codejam/2019/4_DatBae/local_testing_tool.py
|
NoelBird/chocochip
|
7ab9477b9e4153927805c9473ee44a15d809b549
|
[
"MIT"
] | null | null | null |
sites/google_codejam/2019/4_DatBae/local_testing_tool.py
|
NoelBird/chocochip
|
7ab9477b9e4153927805c9473ee44a15d809b549
|
[
"MIT"
] | null | null | null |
sites/google_codejam/2019/4_DatBae/local_testing_tool.py
|
NoelBird/chocochip
|
7ab9477b9e4153927805c9473ee44a15d809b549
|
[
"MIT"
] | null | null | null |
# Usage: `testing_tool.py test_number`, where the argument test_number
# is 0 for Test Set 1 or 1 for Test Set 2.
from __future__ import print_function
import random
import sys
import re
# Use raw_input in Python2.
try:
input = raw_input
except NameError:
pass
_ERROR_MSG_EXTRA_NEW_LINES = "Input has extra newline characters."
_ERROR_MSG_INCORRECT_ARG_NUM = "Answer has wrong number of tokens."
_ERROR_MSG_NOT_SORTED = "Worker IDs in answer must be sorted."
_ERROR_MSG_NOT_UNIQUE = "Worker IDs in answer must be distinct."
_ERROR_MSG_INVALID_TOKEN = "Input has invalid token."
_ERROR_MSG_OUT_OF_RANGE = "Input includes an out-of-range value."
_ERROR_MSG_READ_FAILURE = "Read for input fails."
_QUERY_LIMIT_EXCEEDED_MSG = "Query Limit Exceeded."
_WRONG_ANSWER_MSG = "Wrong Answer."
_ERROR_MSG_INTERNAL_FAILURE = ("The judge failed due to an internal error. "
"This should not happen, please raise an issue "
"to the Code Jam team.")
class Case:
def __init__(self, bad_set, N, F):
self.__bad_set = set(bad_set) # The set of broken computers
self.__N = N # The total number of computers
self.__max_num_tries = F # The number of allowed guesses
self.__raw_input = input
def _parse_contestant_query(self, bitstring):
"""Tries to parse a contestant's input as if it were a query bitstring.
Returns:
(string, string): The first argument is the bitstring, the second is
the error string in case of error.
If the parsing succeeds, the return value should be (str, None).
If the parsing fails, the return value should be (None, str).
"""
# Must be of length exactly N
if len(bitstring) != self.__N:
return (None, _ERROR_MSG_INVALID_TOKEN)
# Bitstring must contain only 0 and 1
if not all([x in '01' for x in bitstring]):
return (None, _ERROR_MSG_INVALID_TOKEN)
return (bitstring, None)
def _parse_contestant_answer(self, tokens):
"""Tries to parse a contestant's input as if it were answering a testcase.
Returns:
(list string): The first argument is the answer, the second is
the error string in case of error.
If the parsing succeeds, the return value should be (list, None).
If the parsing fails, the return value should be (None, str).
"""
if len(tokens) != len(self.__bad_set):
return (None, _ERROR_MSG_INCORRECT_ARG_NUM)
try:
contestant_answer = list(map(int, tokens))
except Exception:
return (None, _ERROR_MSG_INVALID_TOKEN)
if sorted(contestant_answer) != contestant_answer:
return (None, _ERROR_MSG_NOT_SORTED)
if len(set(contestant_answer)) != len(contestant_answer):
return (None, _ERROR_MSG_NOT_UNIQUE)
for x in contestant_answer:
if (x < 0) or (x >= self.__N):
return (None, _ERROR_MSG_OUT_OF_RANGE)
return (contestant_answer, None)
def _parse_contestant_input(self, response):
"""Parses contestant's input.
Parse contestant's input which should be either a string of N bits or
a list of len(bad_set) space-separated integers.
Args:
response: (str or list) one-line of input given by the contestant.
Returns:
(int or list, string): The bitstring sent by the contestant if making
a query, or a list of ints if the contestant is answering the test case.
the second argument is an error string in case of error.
If the parsing succeeds, the return value should be (int or list, None).
If the parsing fails, the return value should be (None, str).
"""
if ("\n" in response) or ("\r" in response):
return None, _ERROR_MSG_EXTRA_NEW_LINES
if not re.match("^[\s0-9-]+$", response):
return None, _ERROR_MSG_INVALID_TOKEN
tokens = response.split()
if len(tokens) == 1 and len(tokens[0]) == self.__N:
# If there is exactly one token and it has length N, it must be a query.
# A number with N digits has to be at least 10**N which is always > N,
# so there is no way for a valid answer to be mistaken as a query.
return self._parse_contestant_query(tokens[0])
else:
# It's not a query, so it must parse as an answer.
return self._parse_contestant_answer(tokens)
def _answer_query(self, bitstring):
answer = ""
for i in range(self.__N):
if i not in self.__bad_set:
answer += bitstring[i]
return answer
def Judge(self):
"""Judge one single case; should only be called once per test case.
Returns:
An error string, or None if the attempt was correct.
"""
print(self.__N, len(self.__bad_set), self.__max_num_tries)
sys.stdout.flush()
# +1 for the answer they have to give
for queries in range(self.__max_num_tries + 1):
try:
contestant_input = self.__raw_input()
except Exception:
return _ERROR_MSG_READ_FAILURE
contestant_input, err = self._parse_contestant_input(contestant_input)
if err is not None:
return err
if type(contestant_input) is str:
# Query
if queries == self.__max_num_tries:
# Too many queries
return _QUERY_LIMIT_EXCEEDED_MSG
else:
print(self._answer_query(contestant_input))
sys.stdout.flush()
else:
# Answer
assert(type(contestant_input) is list)
if set(contestant_input) == self.__bad_set:
# Testcase answered correctly
print(1)
sys.stdout.flush()
return None
else:
return _WRONG_ANSWER_MSG
return _QUERY_LIMIT_EXCEEDED_MSG
def getTestCases(test_number):
F = (10, 5)[test_number]
# You can edit or add your own test cases here.
cases = [Case([1, 2, 3], 4, F), Case([2, 3, 5], 6, F), Case([1000], 1024, F)]
return cases
def JudgeAllCases(test_number):
"""Sends input to contestant and judges contestant output.
In the case of any error (other than extra input after all testcases are
finished), -1 is printed to stdout.
Returns:
An error string, or None if the attempt was correct.
"""
try:
cases = getTestCases(test_number)
except Exception:
return _ERROR_MSG_INTERNAL_FAILURE
print(len(cases))
sys.stdout.flush()
for idx, case in enumerate(cases):
err = case.Judge()
if err is not None:
print(-1)
sys.stdout.flush()
return "Case #{} fails:\n{}".format(idx+1, err)
# Make sure nothing other than EOF is printed after all cases finish.
try:
response = input()
except EOFError:
return None
except:
return "Exception raised while reading input after all cases finish."
return "Additional input after all cases finish: {}".format(response[:1000])
def main():
random.seed(379009)
test_number = int(sys.argv[1])
if test_number != 1:
test_number = 0
result = JudgeAllCases(test_number)
if result is not None:
print(result, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| 33.042453
| 79
| 0.676089
|
7028e0f1a23d4a7b751d347988e6d940b044f4de
| 1,787
|
py
|
Python
|
desktop/libs/indexer/src/indexer/urls.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/urls.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/urls.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('indexer.views',
url(r'^$', 'collections', name='collections'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
)
urlpatterns += patterns('indexer.api',
url(r'^api/fields/parse/$', 'parse_fields', name='api_parse_fields'),
url(r'^api/collections/$', 'collections', name='api_collections'),
url(r'^api/collections/create/$', 'collections_create', name='api_collections_create'),
url(r'^api/collections/import/$', 'collections_import', name='api_collections_import'),
url(r'^api/collections/remove/$', 'collections_remove', name='api_collections_remove'),
url(r'^api/collections/(?P<collection>\w+)/fields/$', 'collections_fields', name='api_collections_fields'),
url(r'^api/collections/(?P<collection>\w+)/update/$', 'collections_update', name='api_collections_update'),
url(r'^api/collections/(?P<collection>\w+)/data/$', 'collections_data', name='api_collections_data')
)
| 51.057143
| 109
| 0.745943
|
ffc2a04334ffcf004cb1903bdc48df7ea7feff5f
| 6,872
|
py
|
Python
|
pandas/tests/tseries/offsets/test_business_day.py
|
hvardhan20/pandas
|
fa28c61e63a887aee303380be914286a3aad711a
|
[
"BSD-3-Clause"
] | 2
|
2021-11-16T06:33:42.000Z
|
2021-11-16T06:33:47.000Z
|
pandas/tests/tseries/offsets/test_business_day.py
|
yuquanl/pandas
|
700be617eb567fb4ab82aa8151d5c4ee02c22b95
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/tseries/offsets/test_business_day.py
|
yuquanl/pandas
|
700be617eb567fb4ab82aa8151d5c4ee02c22b95
|
[
"BSD-3-Clause"
] | 1
|
2021-11-17T06:32:30.000Z
|
2021-11-17T06:32:30.000Z
|
"""
Tests for offsets.BDay
"""
from datetime import (
date,
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
BDay,
BMonthEnd,
)
from pandas import (
DatetimeIndex,
Timedelta,
_testing as tm,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<BusinessDay>"
assert repr(self.offset2) == "<2 * BusinessDays>"
expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize(
"td",
[
Timedelta(hours=2),
Timedelta(hours=2).to_pytimedelta(),
Timedelta(hours=2).to_timedelta64(),
],
ids=lambda x: type(x),
)
def test_with_offset_index(self, reverse, td, request):
if reverse and isinstance(td, np.timedelta64):
mark = pytest.mark.xfail(
reason="need __array_priority__, but that causes other errors"
)
request.node.add_marker(mark)
dti = DatetimeIndex([self.d])
expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
if reverse:
result = dti + (td + self.offset)
else:
result = dti + (self.offset + td)
tm.assert_index_equal(result, expected)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_is_on_offset(self):
tests = [
(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False),
]
for offset, d, expected in tests:
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8),
},
),
(
2 * BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9),
},
),
(
-BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7),
},
),
(
-2 * BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7),
},
),
(
BDay(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
msg = "Only know how to combine business day with datetime or timedelta"
with pytest.raises(ApplyTypeError, match=msg):
BDay().apply(BMonthEnd())
| 29.748918
| 81
| 0.535943
|
c95a5165c7cf83b45f498395d82ce2582162c034
| 830
|
py
|
Python
|
INOl1402.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
INOl1402.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
INOl1402.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
final_destination,tests=[int(x) for x in input().split(" ")]
z=[]
for i in range(tests):
x,y,z1=[int(x) for x in input().split(" ")]
z.append([x,y,z1])
p=0
source=1
for imz in range(len(z)):
k=[]
if source!=final_destination:
for i in range(len(z)):
if z[i][0]==source:
k.append(z[i])
if len(k)>1:
for i in range(len(k)-1):
if k[i][2]>k[i+1][2]:
p=p+k[i+1][2]
source=k[i+1][1]
if k[i][2]<k[i+1][2]:
p=p+k[i][2]
source=k[i][1]
if k[i][2]==k[i+1][2]:
p=p+k[i][2]
source=k[i][1]
if len(k)==1:
p=p+k[0][2]
source=k[0][1]
print(p)
| 26.774194
| 61
| 0.363855
|
b6f2caa300e56937bc5ca55e0c0e140842c8358d
| 4,555
|
py
|
Python
|
libs/applus/applus/tests/test_environ.py
|
cnicgpaul123/killNCP
|
aa153919389784354d1efa0c9669393a7ffe7cf7
|
[
"MulanPSL-1.0"
] | 5
|
2020-02-24T06:29:08.000Z
|
2021-03-07T06:46:26.000Z
|
libs/applus/applus/tests/test_environ.py
|
cnicgpaul123/killNCP
|
aa153919389784354d1efa0c9669393a7ffe7cf7
|
[
"MulanPSL-1.0"
] | 3
|
2020-06-06T01:21:19.000Z
|
2021-06-10T22:36:13.000Z
|
libs/applus/applus/tests/test_environ.py
|
cnicgpaul123/killNCP
|
aa153919389784354d1efa0c9669393a7ffe7cf7
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import os
import urllib
import unittest
from applus import environ
__all__ = ['TestEnviron']
class TestEnviron(unittest.TestCase):
def test_environ(self):
gef = environ.get_envfunc()
# read default
self.assertEqual(gef("SECRET_KEY", "AAAAFFFF"), "AAAAFFFF")
self.assertEqual(gef("VERSION", 2, int), 2)
self.assertEqual(gef("DEBUG", False, bool), False)
# update env
os.environ["SECRET_KEY"] = "BBBBEEEE"
os.environ["VERSION"] = "3"
os.environ["DEBUG"] = "true"
# read environ
self.assertEqual(gef("SECRET_KEY", "AAAAFFFF"), "BBBBEEEE")
self.assertEqual(gef("VERSION", 2, int), 3)
self.assertEqual(gef("DEBUG", False, bool), True)
#
self.assertEqual("x x", environ.decode_result("x%20x", "urllib.parse.unquote"))
self.assertEqual("x x", environ.decode_result("x+x", "urllib.parse.unquote_plus"))
self.assertEqual("x+x", environ.decode_result("x+x", "urllib.parse.unquote"))
#
DEF_DB_CONF = {
"username": "root",
"password": "",
"hostname": "127.0.0.1",
"port": 3306,
"sql_mode": "STRICT_TRANS_TABLES",
}
def test_django_database(self):
dbs = {}
uri = "mysql:///db_sample"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["ENGINE"], "django.db.backends.mysql")
self.assertEqual(dbs[0]["USER"], "root")
self.assertEqual(dbs[0]["PASSWORD"], "")
self.assertEqual(dbs[0]["HOST"], "127.0.0.1")
self.assertEqual(dbs[0]["PORT"], 3306)
self.assertEqual(dbs[0]["NAME"], "db_sample")
self.assertEqual(dbs[0]["OPTIONS"]["charset"], "utf8")
self.assertEqual(dbs[0]["OPTIONS"]["sql_mode"], "STRICT_TRANS_TABLES")
#
uri = "mysql:///db_sample?charset=utf8mb4"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["OPTIONS"]["charset"], "utf8mb4")
#
uri = "mysql://@/db_sample"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["USER"], "root")
self.assertEqual(dbs[0]["PASSWORD"], "")
self.assertEqual(dbs[0]["HOST"], "127.0.0.1")
self.assertEqual(dbs[0]["PORT"], 3306)
#
uri = "mysql://user:@/db_sample"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["USER"], "user")
self.assertEqual(dbs[0]["PASSWORD"], "")
#
uri = "mysql://:pwd@/db_sample"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["USER"], "root")
self.assertEqual(dbs[0]["PASSWORD"], "pwd")
#
uri = "mysql://@:8806/db_sample"
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["HOST"], "127.0.0.1")
self.assertEqual(dbs[0]["PORT"], 8806)
# 密码中含有特殊字符:转义
password = ":@#?;"
quoted = urllib.parse.quote(password)
uri = "mysql://username:{}@/db_sample".format(quoted)
environ.update_django_db(dbs, 0, uri, **self.DEF_DB_CONF)
self.assertEqual(dbs[0]["PASSWORD"], password)
#
def test_merge_uri(self):
url = "http://u:p@localhost/db_sample?charset=utf8"
parsed = environ.parse_uri(url)
ret = environ.merge_uri(parsed, netloc="localhost")
self.assertEqual(ret, "http://localhost/db_sample?charset=utf8")
#
def test_django_databases(self):
mysql = "django.db.backends.mysql"
sqlite3 = "django.db.backends.sqlite3"
filename = "/path/to/db.sqlite3"
mysql_opt = dict(sql_mode="")
#
dbs = {}
environ.update_django_dbs(dbs, "")
self.assertEqual({}, dbs)
#
dbs = {}
environ.update_django_dbs(dbs, "file://"+filename)
self.assertEqual(sqlite3, dbs["default"]["ENGINE"])
self.assertEqual(filename, dbs["default"]["NAME"])
#
dbs = {}
environ.update_django_dbs(dbs, "[]file://"+filename)
self.assertEqual(filename, dbs["default"]["NAME"])
#
dbs = dict(default=dict(ENGINE=sqlite3, NAME=filename))
environ.update_django_dbs(dbs, "[] [2nd]mysql://localhost/db_sample", **mysql_opt)
self.assertEqual(sqlite3, dbs["default"]["ENGINE"])
self.assertEqual(mysql, dbs["2nd"]["ENGINE"])
#
| 38.277311
| 90
| 0.58573
|
798e9eedbbc6a8880cbe73e583d9638f82245e80
| 4,559
|
py
|
Python
|
protoboard/settings.py
|
mariosky/protoboard
|
e715db9aaa9af8dba31ac7ab775c555cec2973b5
|
[
"Apache-2.0"
] | 4
|
2016-02-27T18:22:29.000Z
|
2019-01-30T15:01:37.000Z
|
protoboard/settings.py
|
mariosky/protoboard
|
e715db9aaa9af8dba31ac7ab775c555cec2973b5
|
[
"Apache-2.0"
] | 37
|
2015-08-17T08:09:39.000Z
|
2022-03-11T23:15:10.000Z
|
protoboard/settings.py
|
mariosky/protoboard
|
e715db9aaa9af8dba31ac7ab775c555cec2973b5
|
[
"Apache-2.0"
] | 7
|
2015-03-27T19:15:07.000Z
|
2021-04-08T00:33:23.000Z
|
# -*- coding: utf-8 -*-
"""
Django settings for protoboard project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import environ
from pathlib import Path
from django.core.management.utils import get_random_secret_key
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
env = environ.Env(
# set casting, default value
DEBUG=(bool, False), TEMPLATE_DEBUG=(bool, False)
)
environ.Env.read_env()
DEBUG = env('DEBUG')
TEMPLATE_DEBUG = env('TEMPLATE_DEBUG')
SECRET_KEY = get_random_secret_key()
MONGO_DB = env('MONGO_DB')
REDIS_URL = env('REDIS_URL')
DATABASES = {
'default': env.db(),
}
#BASE_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = Path(__file__).resolve().parent.parent
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
)
SESSION_COOKIE_SECURE=True
SESSION_COOKIE_HTTPONLY=True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.staticfiles',
'django.contrib.messages',
# 'channels',
'widget_tweaks',
'activitytree',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'protoboard.urls'
#ASGI_APPLICATION = "protoboard.routing.application"
WSGI_APPLICATION = 'protoboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
from os.path import join
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ join(BASE_DIR, 'templates_local'), join(BASE_DIR, 'templates'),
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'activitytree/locale/'),
)
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
('es-mx', ugettext('Español México')),
)
SITE_ID = 1
LANGUAGE_CODE = 'es-mx'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(BASE_DIR, 'debug.log'),
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
SESSION_COOKIE_SAMESITE = None
LOGOUT_REDIRECT_URL ="/"
| 22.909548
| 91
| 0.670322
|
f55578e1759b3e4e826b33f61c38f0cd109c120d
| 1,667
|
py
|
Python
|
joulescope_ui/widgets/waveform/axis_item_patch.py
|
sophiekovalevsky/pyjoulescope_ui
|
740cb4883795212c4e6b1ee962babf893372f2fb
|
[
"Apache-2.0"
] | 31
|
2018-12-13T16:13:02.000Z
|
2021-09-28T09:57:50.000Z
|
joulescope_ui/widgets/waveform/axis_item_patch.py
|
sophiekovalevsky/pyjoulescope_ui
|
740cb4883795212c4e6b1ee962babf893372f2fb
|
[
"Apache-2.0"
] | 144
|
2019-02-12T14:58:02.000Z
|
2022-03-24T12:06:17.000Z
|
joulescope_ui/widgets/waveform/axis_item_patch.py
|
sophiekovalevsky/pyjoulescope_ui
|
740cb4883795212c4e6b1ee962babf893372f2fb
|
[
"Apache-2.0"
] | 7
|
2019-07-04T16:34:54.000Z
|
2021-03-24T16:25:06.000Z
|
# Copyright 2020 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyqtgraph as pg
# Address issue https://github.com/pyqtgraph/pyqtgraph/issues/732 in 0.11.0
# 2020 Nov 11
class AxisItemPatch(pg.AxisItem):
def __init__(self, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
def drawPicture(self, p, axisSpec, tickSpecs, textSpecs):
p.setRenderHint(p.Antialiasing, False)
p.setRenderHint(p.TextAntialiasing, True)
## draw long line along axis
pen, p1, p2 = axisSpec
p.setPen(pen)
p.drawLine(p1, p2)
p.translate(0.5 ,0) ## resolves some damn pixel ambiguity
## draw ticks
for pen, p1, p2 in tickSpecs:
p.setPen(pen)
p.drawLine(p1, p2)
# Draw all text
if self.style['tickFont'] is not None:
p.setFont(self.style['tickFont'])
p.setPen(self.textPen())
bounding = self.boundingRect()
for rect, flags, text in textSpecs:
# PATCH: only draw text that completely fits
if bounding.contains(rect):
p.drawText(rect, int(flags), text)
| 32.686275
| 75
| 0.656269
|
e67bece00650c220e1467e735dcc25a8a8d3f504
| 810
|
py
|
Python
|
bias-detect/bias_backend/bias_backend/bias_backend/bias_backend/urls.py
|
zcabjwu/react-django-ml-app
|
7fc39f39c68653279e103aca5b942672d030405f
|
[
"MIT"
] | null | null | null |
bias-detect/bias_backend/bias_backend/bias_backend/bias_backend/urls.py
|
zcabjwu/react-django-ml-app
|
7fc39f39c68653279e103aca5b942672d030405f
|
[
"MIT"
] | null | null | null |
bias-detect/bias_backend/bias_backend/bias_backend/bias_backend/urls.py
|
zcabjwu/react-django-ml-app
|
7fc39f39c68653279e103aca5b942672d030405f
|
[
"MIT"
] | null | null | null |
"""bias_backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('bias_backend_app.urls'))
]
| 35.217391
| 77
| 0.708642
|
00c0a7128b34366083728c899759fb31ed922217
| 3,374
|
py
|
Python
|
cassiopeia/datastores/kernel/thirdpartycode.py
|
mikaeldui/cassiopeia
|
fb22e0dd2c71ae5e14c046379e49c8a44215e79d
|
[
"MIT"
] | null | null | null |
cassiopeia/datastores/kernel/thirdpartycode.py
|
mikaeldui/cassiopeia
|
fb22e0dd2c71ae5e14c046379e49c8a44215e79d
|
[
"MIT"
] | null | null | null |
cassiopeia/datastores/kernel/thirdpartycode.py
|
mikaeldui/cassiopeia
|
fb22e0dd2c71ae5e14c046379e49c8a44215e79d
|
[
"MIT"
] | null | null | null |
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
#######################
# Verification String #
#######################
_validate_get_verification_string_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
)
@get.register(VerificationStringDto)
@validate_query(_validate_get_verification_string_query, convert_region_to_platform)
def get_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> VerificationStringDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=query["summoner.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except (ValueError, APINotFoundError) as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return VerificationStringDto(data)
_validate_get_many_verification_string_query = (
Query.has("platforms").as_(Iterable).also.has("summoner.ids").as_(Iterable)
)
@get_many.register(VerificationStringDto)
@validate_query(
_validate_get_many_verification_string_query, convert_region_to_platform
)
def get_many_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> Generator[VerificationStringDto, None, None]:
def generator():
parameters = {"platform": query["platform"].value}
for platform, summoner_id in zip(query["platforms"], query["summoner.ids"]):
platform = Platform(platform.upper())
endpoint = (
"lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=summoner_id
)
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = platform.region.value
data["summonerId"] = summoner_id
yield VerificationStringDto(data)
return generator()
| 33.405941
| 88
| 0.62626
|
f2d8a1b2524f5071c686f50842e8ff75a2fdfb9c
| 347,561
|
py
|
Python
|
tests/python/unittest/test_operator.py
|
UtileFuzzball/mxnet_test
|
153d2f4bf9611c9d4a8d2d1abaa17732b6e9038a
|
[
"Apache-2.0"
] | null | null | null |
tests/python/unittest/test_operator.py
|
UtileFuzzball/mxnet_test
|
153d2f4bf9611c9d4a8d2d1abaa17732b6e9038a
|
[
"Apache-2.0"
] | 6
|
2021-03-18T23:54:21.000Z
|
2022-01-13T01:11:51.000Z
|
tests/python/unittest/test_operator.py
|
UtileFuzzball/mxnet_test
|
153d2f4bf9611c9d4a8d2d1abaa17732b6e9038a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
@unittest.skip("Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/12885")
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2);
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64}
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
print(order, dtype, i, out_dtype, in_shape)
in_data = np.random.uniform(-1, 1, in_shape).astype(acc_type[dtype])
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-3 if dtype is np.float16 else 1e-3,
atol=1e-5 if dtype is np.float16 else 1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for dtype, forward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4]):
for in_shape in [(10, 6, 5), (10, 10)]:
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/12901")
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
def get_data():
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
input_np = np.array(list(get_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
x = mx.sym.Variable('x', dtype=np.float32)
sym = mx.sym.Cast(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x' : mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_compat():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_compat():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@unittest.skip("Flaky test. Tracked in https://github.com/apache/incubator-mxnet/issues/13600")
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive(), "deadlock may exist in custom operator"
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def _custom_exc3(seed):
def custom_exc3():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
n = int(1e8)
a = mx.nd.zeros((n, 1))
b = mx.nd.zeros((1, n))
# trigger OOM
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def _custom_exc4(seed):
def custom_exc4():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot4')
n = int(1e8)
a = mx.nd.zeros((n, 1))
b = mx.nd.zeros((1, n))
# trigger OOM
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
run_in_spawned_process(_custom_exc3, {})
run_in_spawned_process(_custom_exc4, {})
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)
assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_compat():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputHeight - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_input0', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
@with_seed()
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D Input
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D Input
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_compat():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_compat():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_compat():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_compat_decorator():
@mx.use_np_compat
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_compat(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_compat
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
if __name__ == '__main__':
import nose
nose.runmodule()
| 42.898173
| 173
| 0.574503
|
d4415074a3f48391fe831be61e3802b9477e2dba
| 1,536
|
py
|
Python
|
plangym/box_2d/env.py
|
Guillemdb/plangym
|
60913bea3aaab23c8ec5caad5b9b65bf74969f44
|
[
"MIT"
] | 8
|
2019-02-05T15:03:04.000Z
|
2020-02-12T20:43:52.000Z
|
plangym/box_2d/env.py
|
Guillemdb/plangym
|
60913bea3aaab23c8ec5caad5b9b65bf74969f44
|
[
"MIT"
] | 2
|
2020-03-07T16:47:32.000Z
|
2020-03-07T19:19:44.000Z
|
plangym/box_2d/env.py
|
Guillemdb/plangym
|
60913bea3aaab23c8ec5caad5b9b65bf74969f44
|
[
"MIT"
] | 3
|
2019-02-05T19:58:40.000Z
|
2020-03-07T16:41:08.000Z
|
import copy
from typing import Union
import pickle
import numpy
from Box2D.Box2D import b2Vec2, b2Transform
from plangym.core import GymEnvironment
import copy
class Box2DEnv(GymEnvironment):
def get_state(self) -> numpy.array:
"""
Recover the internal state of the simulation.
An state must completely describe the Environment at a given moment.
"""
state = get_env_state(self.gym_env) # pickle.dumps(get_env_state(self.gym_env))
# state_vector = numpy.zeros(200, dtype=object)
# state_vector[: len(state)] = tuple(state[:])[:]
# if len(state.shape) == 1:
# state = state[numpy.newaxis, :]
return numpy.array((state, None), dtype=object) # "S250000")
def set_state(self, state: numpy.ndarray) -> None:
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
Returns:
None
"""
# loaded_state = pickle.loads(state[:])
set_env_state(self.gym_env, state[0])
def _lunar_lander_end(self, obs):
if self.gym_env.game_over or abs(obs[0]) >= 1.0:
return True
elif not self.gym_env.lander.awake:
return True
return False
def _step_with_dt(self, action, dt):
obs, reward, _, info = super(Box2DEnv, self)._step_with_dt(action, dt)
terminal = self._lunar_lander_end(obs)
info["oob"] = terminal
return obs, reward, terminal, info
| 28.444444
| 88
| 0.619792
|
54fd287df7f6ee1ebc65a8a4a8e55a5fec3ed20f
| 5,643
|
py
|
Python
|
test/functional/p2p-leaktests.py
|
Palem1988/ion_old
|
2c2b532abf61e2a06231c1d3b4d9b2bd0cdb469a
|
[
"MIT"
] | 2
|
2017-01-16T13:42:19.000Z
|
2017-01-16T17:14:59.000Z
|
test/functional/p2p-leaktests.py
|
ionomy/ion_new
|
759071e12ba2ab889221bf91d99bb052a3b98303
|
[
"MIT"
] | 18
|
2017-01-19T09:19:48.000Z
|
2017-01-27T01:59:30.000Z
|
test/functional/p2p-leaktests.py
|
ionomy/ion_new
|
759071e12ba2ab889221bf91d99bb052a3b98303
|
[
"MIT"
] | 10
|
2017-01-17T19:54:55.000Z
|
2017-02-11T19:26:43.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it
into sending us something it shouldn't.
"""
from test_framework.mininode import *
from test_framework.test_framework import IonTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(NodeConnCB):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self, conn):
self.connected = True
self.ever_connected = True
def on_version(self, conn, message): self.bad_message(message)
def on_verack(self, conn, message): self.bad_message(message)
def on_reject(self, conn, message): self.bad_message(message)
def on_inv(self, conn, message): self.bad_message(message)
def on_addr(self, conn, message): self.bad_message(message)
def on_alert(self, conn, message): self.bad_message(message)
def on_getdata(self, conn, message): self.bad_message(message)
def on_getblocks(self, conn, message): self.bad_message(message)
def on_tx(self, conn, message): self.bad_message(message)
def on_block(self, conn, message): self.bad_message(message)
def on_getaddr(self, conn, message): self.bad_message(message)
def on_headers(self, conn, message): self.bad_message(message)
def on_getheaders(self, conn, message): self.bad_message(message)
def on_ping(self, conn, message): self.bad_message(message)
def on_mempool(self, conn): self.bad_message(message)
def on_pong(self, conn, message): self.bad_message(message)
def on_feefilter(self, conn, message): self.bad_message(message)
def on_sendheaders(self, conn, message): self.bad_message(message)
def on_sendcmpct(self, conn, message): self.bad_message(message)
def on_cmpctblock(self, conn, message): self.bad_message(message)
def on_getblocktxn(self, conn, message): self.bad_message(message)
def on_blocktxn(self, conn, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if iond ban behavior changes
def on_open(self, conn):
super().on_open(conn)
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, conn, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, conn, message): pass
def on_verack(self, conn, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, conn, message):
self.version_received = True
conn.send_message(msg_ping())
conn.send_message(msg_getaddr())
class P2PLeakTest(IonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = CNodeNoVersionBan()
no_version_idlenode = CNodeNoVersionIdle()
no_verack_idlenode = CNodeNoVerackIdle()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
no_version_bannode.add_connection(connections[0])
no_version_idlenode.add_connection(connections[1])
no_verack_idlenode.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
assert wait_until(lambda: no_version_bannode.ever_connected, timeout=10)
assert wait_until(lambda: no_version_idlenode.ever_connected, timeout=10)
assert wait_until(lambda: no_verack_idlenode.version_received, timeout=10)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.connected
[conn.disconnect_node() for conn in connections]
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
if __name__ == '__main__':
P2PLeakTest().main()
| 42.11194
| 118
| 0.716817
|
5d4286a80b77522efdea83cf9c051a2b843c098a
| 21,388
|
py
|
Python
|
edk2toollib/uefi/authenticated_variables_structure_support.py
|
joschock/edk2-pytool-library
|
7281a7c5cff7b0ed273b89717cd1304c3db73e50
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
edk2toollib/uefi/authenticated_variables_structure_support.py
|
joschock/edk2-pytool-library
|
7281a7c5cff7b0ed273b89717cd1304c3db73e50
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
edk2toollib/uefi/authenticated_variables_structure_support.py
|
joschock/edk2-pytool-library
|
7281a7c5cff7b0ed273b89717cd1304c3db73e50
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
##
# UEFI Authenticated Variable Structure Support Library
#
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import logging
import datetime
import struct
import hashlib
import uuid
from edk2toollib.uefi.wincert import WinCert, WinCertUefiGuid
from edk2toollib.utility_functions import PrintByteList
# spell-checker: ignore decodefs, createfs
'''
Structures definition based on UEFI specification (UEFI 2.7)
Each object can be created and or populated from a file stream.
Each object can be written to a filesteam as binary and printed to the console in text.
'''
# UEFI global Variable Namespace
EfiGlobalVarNamespaceUuid = uuid.UUID('8BE4DF61-93CA-11d2-AA0D-00E098032B8C')
Sha256Oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
#
# EFI_SIGNATURE_DATA Structure for X509 Certs
#
class EfiSignatureDataEfiCertX509(object):
STATIC_STRUCT_SIZE = 16
#
# decodefs is a filestream object of binary content that is the structure encoded
# decodesize is number of bytes to decode as the EFI_SIGNATURE_DATA object (guid + x509 data)
# createfs is a filestream object that is the DER encoded x509 cert
# sigowner is the uuid object of the signature owner guid
def __init__(self, decodefs=None, decodesize=0, createfs=None, sigowner=None):
if(decodefs is not None):
self.PopulateFromFileStream(decodefs, decodesize)
elif(createfs is not None):
# create a new one
self.SignatureOwner = sigowner
start = createfs.tell() # should be 0 but maybe this filestream has other things at the head
createfs.seek(0, 2)
end = createfs.tell()
createfs.seek(start)
self.SignatureDataSize = end - start
if(self.SignatureDataSize < 0):
raise Exception("Create File Stream has invalid size")
self.SignatureData = memoryview(createfs.read(self.SignatureDataSize))
else:
raise Exception("Invalid Parameters - Not Supported")
def PopulateFromFileStream(self, fs, decodesize):
if(fs is None):
raise Exception("Invalid File Steam")
if(decodesize == 0):
raise Exception("Invalid Decode Size")
# only populate from file stream those parts that are complete in the file stream
offset = fs.tell()
fs.seek(0, 2)
end = fs.tell()
fs.seek(offset)
if((end - offset) < EfiSignatureDataEfiCertX509.STATIC_STRUCT_SIZE): # size of the guid
raise Exception("Invalid file stream size")
if((end - offset) < decodesize): # size requested is too big
raise Exception("Invalid file stream size vs decodesize")
self.SignatureOwner = uuid.UUID(bytes_le=fs.read(16))
# read remainling decode size for x509 data
self.SignatureDataSize = decodesize - EfiSignatureDataEfiCertX509.STATIC_STRUCT_SIZE
self.SignatureData = memoryview(fs.read(self.SignatureDataSize))
def Print(self):
print("EfiSignatureData - EfiSignatureDataEfiCertX509")
print(" Signature Owner: %s" % str(self.SignatureOwner))
print(" Signature Data: ")
if(self.SignatureData is None):
print(" NONE")
else:
sdl = self.SignatureData.tolist()
if(self.SignatureDataSize != len(sdl)):
raise Exception("Invalid Signature Data Size vs Length of data")
PrintByteList(sdl)
def Write(self, fs):
if(fs is None):
raise Exception("Invalid File Output Stream")
if(self.SignatureData is None):
raise Exception("Invalid object")
fs.write(self.SignatureOwner.bytes_le)
fs.write(self.SignatureData)
def GetTotalSize(self):
return EfiSignatureDataEfiCertX509.STATIC_STRUCT_SIZE + self.SignatureDataSize
#
# EFI_SIGNATURE_DATA Structure for Sha256 hash
#
class EfiSignatureDataEfiCertSha256(object):
STATIC_STRUCT_SIZE = 16 + hashlib.sha256().digest_size # has guid and array
#
# decodefs is a filestream object of binary content that is the structure encoded
# createfs is a filestream object of binary that is to be hashed to create the signature data
# digest is a byte array that contains the hash value for new signature data
# sigowner is the uuid object of the signature owner guid
def __init__(self, decodefs=None, createfs=None, digest=None, sigowner=None):
if(decodefs is not None):
self.PopulateFromFileStream(decodefs)
elif(createfs is not None):
# create a new one
self.SignatureOwner = sigowner
self.SignatureData = memoryview(hashlib.sha256(createfs.read()).digest())
elif(digest is not None):
self.SignatureOwner = uuid.UUID(sigowner)
self.SignatureData = memoryview(digest)
else:
raise Exception("Invalid Parameters - Not Supported")
def PopulateFromFileStream(self, fs):
if(fs is None):
raise Exception("Invalid File Steam")
# only populate from file stream those parts that are complete in the file stream
offset = fs.tell()
fs.seek(0, 2)
end = fs.tell()
fs.seek(offset)
if((end - offset) < EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE): # size of the data
raise Exception("Invalid file stream size")
self.SignatureOwner = uuid.UUID(bytes_le=fs.read(16))
self.SignatureData = memoryview(fs.read(hashlib.sha256().digest_size))
def Print(self):
print("EfiSignatureData - EfiSignatureDataEfiCertSha256")
print(" Signature Owner: %s" % str(self.SignatureOwner))
print(" Signature Data: ", end="")
if(self.SignatureData is None):
print(" NONE")
else:
sdl = self.SignatureData.tolist()
for index in range(len(sdl)):
print("%02X" % sdl[index], end='')
print("")
def Write(self, fs):
if(fs is None):
raise Exception("Invalid File Output Stream")
if(self.SignatureData is None):
raise Exception("Invalid object")
fs.write(self.SignatureOwner.bytes_le)
fs.write(self.SignatureData)
def GetTotalSize(self):
return EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE
class EfiSignatureHeader(object):
def __init__(self):
raise Exception("Not Implemented")
class EfiSignatureDataFactory(object):
EFI_CERT_SHA256_GUID = uuid.UUID('c1c41626-504c-4092-aca9-41f936934328')
# EFI_CERT_RSA2048_GUID = uuid.UUID("0x3c5766e8, 0x269c, 0x4e34, 0xaa, 0x14, 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6")
# EFI_CERT_RSA2048_SHA256_GUID = uuid.UUID("0xe2b36190, 0x879b, 0x4a3d, 0xad, 0x8d, 0xf2, 0xe7, 0xbb, 0xa3, 0x27, 0x84") # noqa: E501
# EFI_CERT_SHA1_GUID = uuid.UUID("0x826ca512, 0xcf10, 0x4ac9, 0xb1, 0x87, 0xbe, 0x1, 0x49, 0x66, 0x31, 0xbd")
# EFI_CERT_RSA2048_SHA1_GUID = uuid.UUID("0x67f8444f, 0x8743, 0x48f1, 0xa3, 0x28, 0x1e, 0xaa, 0xb8, 0x73, 0x60, 0x80") # noqa: E501
EFI_CERT_X509_GUID = uuid.UUID("a5c059a1-94e4-4aa7-87b5-ab155c2bf072")
# EFI_CERT_SHA224_GUID = uuid.UUID("0xb6e5233, 0xa65c, 0x44c9, 0x94, 0x7, 0xd9, 0xab, 0x83, 0xbf, 0xc8, 0xbd")
# EFI_CERT_SHA384_GUID = uuid.UUID("0xff3e5307, 0x9fd0, 0x48c9, 0x85, 0xf1, 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x1")
# EFI_CERT_SHA512_GUID = uuid.UUID("0x93e0fae, 0xa6c4, 0x4f50, 0x9f, 0x1b, 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a")
EFI_CERT_X509_SHA256_GUID = uuid.UUID("3bd2a492-96c0-4079-b420-fcf98ef103ed")
# EFI_CERT_X509_SHA384_GUID = uuid.UUID("0x7076876e, 0x80c2, 0x4ee6, 0xaa, 0xd2, 0x28, 0xb3, 0x49, 0xa6, 0x86, 0x5b") # noqa: E501
# EFI_CERT_X509_SHA512_GUID = uuid.UUID("0x446dbf63, 0x2502, 0x4cda, 0xbc, 0xfa, 0x24, 0x65, 0xd2, 0xb0, 0xfe, 0x9d") # noqa: E501
# EFI_CERT_TYPE_PKCS7_GUID = uuid.UUID("0x4aafd29d, 0x68df, 0x49ee, 0x8a, 0xa9, 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7")
#
# This method is a factory for creating the correct Efi Signature Data object
# from the filestream of an existing auth payload
#
@staticmethod
def Factory(fs, type, size):
if(fs is None):
raise Exception("Invalid File stream")
if(type == EfiSignatureDataFactory.EFI_CERT_SHA256_GUID):
if(size != EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE):
raise Exception("Invalid Size 0x%x" % size)
return EfiSignatureDataEfiCertSha256(decodefs=fs)
elif(type == EfiSignatureDataFactory.EFI_CERT_X509_GUID):
return EfiSignatureDataEfiCertX509(decodefs=fs, decodesize=size)
else:
logging.error("GuidType Value: %s" % type)
raise Exception("Not Supported")
return None
#
# Create a new Efi Signature Data object.
# Type will be baed on GUID
# Value will be based on type and Content (content stream opened for reading)
# sigowner is the UUID object for the signature owner guid
@staticmethod
def Create(type, ContentFileStream, sigowner):
if(ContentFileStream is None):
raise Exception("Invalid Content File Stream")
if(type == EfiSignatureDataFactory.EFI_CERT_SHA256_GUID):
return EfiSignatureDataEfiCertSha256(createfs=ContentFileStream, sigowner=sigowner)
elif(type == EfiSignatureDataFactory.EFI_CERT_X509_GUID):
return EfiSignatureDataEfiCertX509(createfs=ContentFileStream, sigowner=sigowner)
else:
raise Exception("Not Supported")
##
# EFI_SIGNATURE_LIST structure
##
class EfiSignatureList(object):
STATIC_STRUCT_SIZE = 16 + 4 + 4 + 4
def __init__(self, filestream=None, typeguid=None):
if(filestream is None):
# Type of the signature. GUID signature types are defined in below.
self.SignatureType = typeguid
# Total size of the signature list, including this header.
self.SignatureListSize = EfiSignatureList.STATIC_STRUCT_SIZE
# Size of the signature header which precedes the array of signatures.
self.SignatureHeaderSize = -1
# Size of each signature.
self.SignatureSize = 0
# Header before the array of signatures. The format of this header is specified by the SignatureType.
self.SignatureHeader = None
# An array of signatures. Each signature is SignatureSize bytes in length.
self.SignatureData_List = None
else:
self.PopulateFromFileStream(filestream)
def PopulateFromFileStream(self, fs):
if(fs is None):
raise Exception("Invalid File Steam")
# only populate from file stream those parts that are complete in the file stream
start = fs.tell()
fs.seek(0, 2)
end = fs.tell()
fs.seek(start)
if((end - start) < EfiSignatureList.STATIC_STRUCT_SIZE): # size of the static header data
raise Exception("Invalid file stream size")
self.SignatureType = uuid.UUID(bytes_le=fs.read(16))
self.SignatureListSize = struct.unpack("<I", fs.read(4))[0]
self.SignatureHeaderSize = struct.unpack("<I", fs.read(4))[0]
self.SignatureSize = struct.unpack("<I", fs.read(4))[0]
# check the total size of this is within the File
if((end - start) < self.SignatureListSize):
logging.debug("SignatureListSize 0x%x" % self.SignatureListSize)
logging.debug("End - Start is 0x%x" % (end - start))
raise Exception("Invalid File Stream. Not enough file content to cover the Sig List Size")
# check that structure is built correctly and there is room within the structure total size to read the header
if((self.SignatureListSize - (fs.tell() - start)) < self.SignatureHeaderSize):
raise Exception("Invalid Sig List. Sizes not correct. "
"SignatureHeaderSize extends beyond end of structure")
# Signature Header is allowed to be nothing (size 0)
self.SignatureHeader = None
if(self.SignatureHeaderSize > 0):
self.SignatureHeader = EfiSignatureHeader(fs, self.SignatureHeaderSize)
if(((self.SignatureListSize - (fs.tell() - start)) % self.SignatureSize) != 0):
raise Exception("Invalid Sig List. Signature Data Array is not a valid size")
self.SignatureData_List = []
while((start + self.SignatureListSize) > fs.tell()):
# double check that everything is adding up correctly.
if((start + self.SignatureListSize - fs.tell() - self.SignatureSize) < 0):
raise Exception("Invalid Signature List Processing. Signature Data not correctly parsed!!")
a = EfiSignatureDataFactory.Factory(fs, self.SignatureType, self.SignatureSize)
self.SignatureData_List.append(a)
def Print(self):
print("EfiSignatureList")
print(" Signature Type: %s" % str(self.SignatureType))
print(" Signature List Size: 0x%x" % self.SignatureListSize)
print(" Signature Header Size: 0x%x" % self.SignatureHeaderSize)
print(" Signature Size: 0x%x" % self.SignatureSize)
if(self.SignatureHeader is not None):
self.SignatureHeader.Print()
else:
print(" Signature Header: NONE")
for a in self.SignatureData_List:
a.Print()
def Write(self, fs):
if(fs is None):
raise Exception("Invalid File Output Stream")
if((self.SignatureHeader is None) and (self.SignatureHeaderSize == -1)):
raise Exception("Invalid object. Uninitialized Sig Header")
if(self.SignatureData_List is None):
raise Exception("Invalid object. No Sig Data")
fs.write(self.SignatureType.bytes_le)
fs.write(struct.pack("<I", self.SignatureListSize))
fs.write(struct.pack("<I", self.SignatureHeaderSize))
fs.write(struct.pack("<I", self.SignatureSize))
if(self.SignatureHeader is not None):
self.SignatureHeader.Write(fs)
for a in self.SignatureData_List:
a.Write(fs)
def AddSignatureHeader(self, SigHeader, SigSize=0):
if(self.SignatureHeader is not None):
raise Exception("Signature Header already set")
if(self.SignatureHeaderSize != -1):
raise Exception("Signature Header already set (size)")
if(self.SignatureSize != 0):
raise Exception("Signature Size already set")
if(self.SignatureData_List is not None):
raise Exception("Signature Data List is already initialized")
if(SigHeader is None) and (SigSize == 0):
raise Exception("Invalid parameters. Can't have no header and 0 Signature Size")
self.SignatureHeader = SigHeader
if(SigHeader is None):
self.SignatureHeaderSize = 0
self.SignatureSize = SigSize
else:
self.SignatureHeaderSize = SigHeader.GetTotalSize()
self.SignatureSize = SigHeader.GetSizeOfSignatureDataEntry()
self.SignatureListSize += self.SignatureHeaderSize
def AddSignatureData(self, SigDataObject):
if(self.SignatureSize == 0):
raise Exception("Before adding Signature Data you must have set the Signature Size")
if(self.SignatureSize != SigDataObject.GetTotalSize()):
raise Exception("Can't add Signature Data of different size")
if(self.SignatureData_List is None):
self.SignatureData_List = []
self.SignatureData_List.append(SigDataObject)
self.SignatureListSize += self.SignatureSize
class EfiTime(object):
STATIC_STRUCT_SIZE = 16
def __init__(self, Time=datetime.datetime.now(), decodefs=None):
if(decodefs is None):
self.Time = Time
else:
self.PopulateFromFileStream(decodefs)
def PopulateFromFileStream(self, fs):
if(fs is None):
raise Exception("Invalid File Steam")
# only populate from file stream those parts that are complete in the file stream
start = fs.tell()
fs.seek(0, 2)
end = fs.tell()
fs.seek(start)
if((end - start) < EfiTime.STATIC_STRUCT_SIZE): # size of the static structure data
raise Exception("Invalid file stream size")
Year = struct.unpack("<H", fs.read(2))[0]
Month = struct.unpack("<B", fs.read(1))[0]
Day = struct.unpack("<B", fs.read(1))[0]
Hour = struct.unpack("<B", fs.read(1))[0]
Minute = struct.unpack("<B", fs.read(1))[0]
Second = struct.unpack("<B", fs.read(1))[0]
fs.seek(1, 1) # seek past pad1
NanoSecond = struct.unpack("<I", fs.read(4))[0]
TimeZone = struct.unpack("<h", fs.read(2))[0]
Daylight = struct.unpack("<B", fs.read(1))[0]
fs.seek(1, 1) # seek past pad2
self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond / 1000)
logging.debug("I don't know how to deal with TimeZone or Daylight and I don't care at the moment")
logging.debug("Timezone value is: 0x%x" % TimeZone)
logging.debug("Daylight value is: 0x%X" % Daylight)
def Print(self):
print("EfiTime: %s" % datetime.datetime.strftime(self.Time, "%A, %B %d, %Y %I:%M%p"))
def Write(self, fs):
if(fs is None):
raise Exception("Invalid File Output Stream")
fs.write(struct.pack("<H", self.Time.year))
fs.write(struct.pack("<B", self.Time.month))
fs.write(struct.pack("<B", self.Time.day))
fs.write(struct.pack("<B", self.Time.hour))
fs.write(struct.pack("<B", self.Time.minute))
fs.write(struct.pack("<B", self.Time.second))
fs.write(struct.pack("<B", 0)) # Pad1
fs.write(struct.pack("<I", 0)) # Nano Seconds
fs.write(struct.pack("<h", 0)) # TimeZone
fs.write(struct.pack("<B", 0)) # Daylight
fs.write(struct.pack("<B", 0)) # Pad2
class EFiVariableAuthentication2(object):
def __init__(self, Time=datetime.datetime.now(), decodefs=None):
if(decodefs is None):
self.EfiTime = EfiTime(Time=Time)
self.AuthInfo = WinCertUefiGuid()
self.Payload = None
self.PayloadSize = 0
self.SigListPayload = None
else:
self.PopulateFromFileStream(decodefs)
def PopulateFromFileStream(self, fs):
if(fs is None):
raise Exception("Invalid File Steam")
self.EfiTime = EfiTime(decodefs=fs)
self.AuthInfo = WinCert.Factory(fs)
self.Payload = None
self.SigListPayload = None
self.SetPayload(fs)
def Print(self):
print("EFiVariableAuthentication2")
self.EfiTime.Print()
self.AuthInfo.Print()
print("-------------------- VARIABLE PAYLOAD --------------------")
if(self.SigListPayload is not None):
self.SigListPayload.Print()
elif(self.Payload is not None):
print("Raw Data: ")
sdl = self.Payload.tolist()
if(self.PayloadSize != len(sdl)):
raise Exception("Invalid Payload Data Size vs Length of data")
PrintByteList(sdl)
def Write(self, fs):
if(fs is None):
raise Exception("Invalid File Output Stream")
self.EfiTime.Write(fs)
self.AuthInfo.Write(fs)
if(self.Payload is not None):
fs.write(self.Payload)
def SetPayload(self, fs):
if(fs is None):
raise Exception("Invalid File Input Stream")
# Find the payload size
start = fs.tell()
fs.seek(0, 2)
end = fs.tell()
fs.seek(start)
self.PayloadSize = end - start
if(self.PayloadSize == 0):
logging.debug("No Payload for this EfiVariableAuthenticated2 Object")
return
# read as siglist
try:
self.SigListPayload = EfiSignatureList(fs)
except Exception as e:
logging.debug("Exception Trying to parse SigList Payload. \n%s" % str(e))
# reset the file pointer
fs.seek(start)
self.Payload = memoryview(fs.read(self.PayloadSize))
'''
THESE ARE NOT SUPPORTED IN THE TOOL
typedef struct {
///
/// The SHA256 hash of an X.509 certificate's To-Be-Signed contents.
///
EFI_SHA256_HASH ToBeSignedHash;
///
/// The time that the certificate shall be considered to be revoked.
///
EFI_TIME TimeOfRevocation;
} EFI_CERT_X509_SHA256;
typedef struct {
///
/// The SHA384 hash of an X.509 certificate's To-Be-Signed contents.
///
EFI_SHA384_HASH ToBeSignedHash;
///
/// The time that the certificate shall be considered to be revoked.
///
EFI_TIME TimeOfRevocation;
} EFI_CERT_X509_SHA384;
typedef struct {
///
/// The SHA512 hash of an X.509 certificate's To-Be-Signed contents.
///
EFI_SHA512_HASH ToBeSignedHash;
///
/// The time that the certificate shall be considered to be revoked.
///
EFI_TIME TimeOfRevocation;
} EFI_CERT_X509_SHA512;
'''
| 38.816697
| 138
| 0.640453
|
b2ef7b7fff26c049c0881359c0d262af732a22e1
| 358
|
py
|
Python
|
ThumkiStores/admin.py
|
Ashwin-Dev-P/Thumki-Final-Hosting
|
fcfe40000b688d419513831ece5400cb32caaa69
|
[
"MIT"
] | 1
|
2022-01-09T10:23:22.000Z
|
2022-01-09T10:23:22.000Z
|
ThumkiStores/admin.py
|
Ashwin-Dev-P/dev-stores
|
fcfe40000b688d419513831ece5400cb32caaa69
|
[
"MIT"
] | null | null | null |
ThumkiStores/admin.py
|
Ashwin-Dev-P/dev-stores
|
fcfe40000b688d419513831ece5400cb32caaa69
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Dress)
admin.site.register(Category)
admin.site.register(Customer_review)
admin.site.register(Messages)
admin.site.register(FAQ)
admin.site.register(Subscription)
admin.site.register(Profile)
admin.site.register(General_info)
admin.site.register(Sizes)
| 25.571429
| 36
| 0.821229
|
4839e27af44ea72fb6ea43aac20cf9833ba8430d
| 6,592
|
py
|
Python
|
fblldbbase.py
|
zddd/chisel
|
7782bdde3062e15ccbdc5f617aa3a8f096b6751b
|
[
"MIT"
] | 1
|
2020-03-04T20:24:33.000Z
|
2020-03-04T20:24:33.000Z
|
fblldbbase.py
|
zddd/chisel
|
7782bdde3062e15ccbdc5f617aa3a8f096b6751b
|
[
"MIT"
] | null | null | null |
fblldbbase.py
|
zddd/chisel
|
7782bdde3062e15ccbdc5f617aa3a8f096b6751b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import lldb
import json
import shlex
class FBCommandArgument:
def __init__(self, short='', long='', arg='', type='', help='', default='', boolean=False):
self.shortName = short
self.longName = long
self.argName = arg
self.argType = type
self.help = help
self.default = default
self.boolean = boolean
class FBCommand:
def name(self):
return None
def options(self):
return []
def args(self):
return []
def description(self):
return ''
def lex(self, commandLine):
return shlex.split(commandLine)
def run(self, arguments, option):
pass
def isSuccess(error):
# When evaluating a `void` expression, the returned value will indicate an
# error. This error is named: kNoResult. This error value does *not* mean
# there was a problem. This logic follows what the builtin `expression`
# command does. See: https://git.io/vwpjl (UserExpression.h)
kNoResult = 0x1001
return error.success or error.value == kNoResult
def importModule(frame, module):
options = lldb.SBExpressionOptions()
options.SetLanguage(lldb.eLanguageTypeObjC)
value = frame.EvaluateExpression('@import ' + module, options)
return isSuccess(value.error)
# evaluates expression in Objective-C++ context, so it will work even for
# Swift projects
def evaluateExpressionValue(expression, printErrors=True, language=lldb.eLanguageTypeObjC_plus_plus, tryAllThreads=False):
frame = lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame()
options = lldb.SBExpressionOptions()
options.SetLanguage(language)
# Allow evaluation that contains a @throw/@catch.
# By default, ObjC @throw will cause evaluation to be aborted. At the time
# of a @throw, it's not known if the exception will be handled by a @catch.
# An exception that's caught, should not cause evaluation to fail.
options.SetTrapExceptions(False)
# Give evaluation more time.
options.SetTimeoutInMicroSeconds(5000000) # 5s
# Most Chisel commands are not multithreaded.
options.SetTryAllThreads(tryAllThreads)
value = frame.EvaluateExpression(expression, options)
error = value.GetError()
# Retry if the error could be resolved by first importing UIKit.
if (error.type == lldb.eErrorTypeExpression and
error.value == lldb.eExpressionParseError and
importModule(frame, 'UIKit')):
value = frame.EvaluateExpression(expression, options)
error = value.GetError()
if printErrors and not isSuccess(error):
print(error)
return value
def evaluateInputExpression(expression, printErrors=True):
# HACK
if expression.startswith('(id)'):
return evaluateExpressionValue(expression, printErrors=printErrors).GetValue()
frame = lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame()
options = lldb.SBExpressionOptions()
options.SetTrapExceptions(False)
value = frame.EvaluateExpression(expression, options)
error = value.GetError()
if printErrors and error.Fail():
print(error)
return value.GetValue()
def evaluateIntegerExpression(expression, printErrors=True):
output = evaluateExpression('(int)(' + expression + ')', printErrors).replace('\'', '')
if output.startswith('\\x'): # Booleans may display as \x01 (Hex)
output = output[2:]
elif output.startswith('\\'): # Or as \0 (Dec)
output = output[1:]
return int(output, 0)
def evaluateBooleanExpression(expression, printErrors=True):
return (int(evaluateIntegerExpression('(BOOL)(' + expression + ')', printErrors)) != 0)
def evaluateExpression(expression, printErrors=True):
return evaluateExpressionValue(expression, printErrors=printErrors).GetValue()
def describeObject(expression, printErrors=True):
return evaluateExpressionValue('(id)(' + expression + ')', printErrors).GetObjectDescription()
def evaluateEffect(expression, printErrors=True):
evaluateExpressionValue('(void)(' + expression + ')', printErrors=printErrors)
def evaluateObjectExpression(expression, printErrors=True):
return evaluateExpression('(id)(' + expression + ')', printErrors)
def evaluateCStringExpression(expression, printErrors=True):
ret = evaluateExpression(expression, printErrors)
process = lldb.debugger.GetSelectedTarget().GetProcess()
error = lldb.SBError()
ret = process.ReadCStringFromMemory(int(ret, 16), 256, error)
if error.Success():
return ret
else:
if printErrors:
print(error)
return None
RETURN_MACRO = """
#define IS_JSON_OBJ(obj)\
(obj != nil && ((bool)[NSJSONSerialization isValidJSONObject:obj] ||\
(bool)[obj isKindOfClass:[NSString class]] ||\
(bool)[obj isKindOfClass:[NSNumber class]]))
#define RETURN(ret) ({\
if (!IS_JSON_OBJ(ret)) {\
(void)[NSException raise:@"Invalid RETURN argument" format:@""];\
}\
NSDictionary *__dict = @{@"return":ret};\
NSData *__data = (id)[NSJSONSerialization dataWithJSONObject:__dict options:0 error:NULL];\
NSString *__str = (id)[[NSString alloc] initWithData:__data encoding:4];\
(char *)[__str UTF8String];})
#define RETURNCString(ret)\
({NSString *___cstring_ret = [NSString stringWithUTF8String:ret];\
RETURN(___cstring_ret);})
"""
def check_expr(expr):
return expr.strip().split(';')[-2].find('RETURN') != -1
# evaluate a batch of Objective-C expressions, the last expression must contain a RETURN marco
# and it will automatic transform the Objective-C object to Python object
# Example:
# >>> fblldbbase.evaluate('NSString *str = @"hello world"; RETURN(@{@"key": str});')
# {u'key': u'hello world'}
def evaluate(expr):
if not check_expr(expr):
raise Exception("Invalid Expression, the last expression not include a RETURN family marco")
command = "({" + RETURN_MACRO + '\n' + expr + "})"
ret = evaluateExpressionValue(command, printErrors=True)
if not ret.GetError().Success():
print(ret.GetError())
return None
else:
process = lldb.debugger.GetSelectedTarget().GetProcess()
error = lldb.SBError()
ret = process.ReadCStringFromMemory(int(ret.GetValue(), 16), 2**20, error)
if not error.Success():
print(error)
return None
else:
ret = json.loads(ret)
return ret['return']
def currentLanguage():
return lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame().GetCompileUnit().GetLanguage()
| 34.694737
| 125
| 0.718902
|
01afd7cdf803b08f3bb537ea966695ff8583771a
| 7,446
|
py
|
Python
|
python/ncnn/model_zoo/rfcn.py
|
fzyzcjy/ncnn
|
42e71609508fde1bd54d9d9de6ca5522ee3bcf37
|
[
"BSD-3-Clause"
] | 14,886
|
2017-07-24T02:58:35.000Z
|
2022-03-31T18:17:04.000Z
|
python/ncnn/model_zoo/rfcn.py
|
fzyzcjy/ncnn
|
42e71609508fde1bd54d9d9de6ca5522ee3bcf37
|
[
"BSD-3-Clause"
] | 3,361
|
2017-07-24T05:56:31.000Z
|
2022-03-31T13:26:35.000Z
|
python/ncnn/model_zoo/rfcn.py
|
fzyzcjy/ncnn
|
42e71609508fde1bd54d9d9de6ca5522ee3bcf37
|
[
"BSD-3-Clause"
] | 3,786
|
2017-07-24T03:09:15.000Z
|
2022-03-31T16:56:40.000Z
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class RFCN:
def __init__(
self,
target_size=224,
max_per_image=100,
confidence_thresh=0.6,
nms_threshold=0.3,
num_threads=1,
use_gpu=False,
):
self.target_size = target_size
self.max_per_image = max_per_image
self.confidence_thresh = confidence_thresh
self.nms_threshold = nms_threshold
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [102.9801, 115.9465, 122.7717]
self.norm_vals = []
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/YuwenXiong/py-R-FCN
# https://github.com/YuwenXiong/py-R-FCN/blob/master/models/pascal_voc/ResNet-50/rfcn_end2end/test_agnostic.prototxt
# https://1drv.ms/u/s!AoN7vygOjLIQqUWHpY67oaC7mopf
# resnet50_rfcn_final.caffemodel
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("rfcn_end2end.param"))
self.net.load_model(get_model_file("rfcn_end2end.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
h = img.shape[0]
w = img.shape[1]
scale = 1.0
if w < h:
scale = float(self.target_size) / w
w = self.target_size
h = h * scale
else:
scale = float(self.target_size) / h
h = self.target_size
w = w * scale
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
int(w),
int(h),
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
im_info = ncnn.Mat(3)
im_info[0] = h
im_info[1] = w
im_info[2] = scale
# step1, extract feature and all rois
ex1 = self.net.create_extractor()
ex1.set_num_threads(self.num_threads)
ex1.input("data", mat_in)
ex1.input("im_info", im_info)
ret1, rfcn_cls = ex1.extract("rfcn_cls")
ret2, rfcn_bbox = ex1.extract("rfcn_bbox")
ret3, rois = ex1.extract("rois") # all rois
# step2, extract bbox and score for each roi
class_candidates = []
for i in range(rois.c):
ex2 = self.net.create_extractor()
roi = rois.channel(i) # get single roi
ex2.input("rfcn_cls", rfcn_cls)
ex2.input("rfcn_bbox", rfcn_bbox)
ex2.input("rois", roi)
ret1, bbox_pred = ex2.extract("bbox_pred")
ret2, cls_prob = ex2.extract("cls_prob")
num_class = cls_prob.w
while len(class_candidates) < num_class:
class_candidates.append([])
# find class id with highest score
label = 0
score = 0.0
for j in range(num_class):
class_score = cls_prob[j]
if class_score > score:
label = j
score = class_score
# ignore background or low score
if label == 0 or score <= self.confidence_thresh:
continue
# fprintf(stderr, "%d = %f\n", label, score)
# unscale to image size
x1 = roi[0] / scale
y1 = roi[1] / scale
x2 = roi[2] / scale
y2 = roi[3] / scale
pb_w = x2 - x1 + 1
pb_h = y2 - y1 + 1
# apply bbox regression
dx = bbox_pred[4]
dy = bbox_pred[4 + 1]
dw = bbox_pred[4 + 2]
dh = bbox_pred[4 + 3]
cx = x1 + pb_w * 0.5
cy = y1 + pb_h * 0.5
obj_cx = cx + pb_w * dx
obj_cy = cy + pb_h * dy
obj_w = pb_w * np.exp(dw)
obj_h = pb_h * np.exp(dh)
obj_x1 = obj_cx - obj_w * 0.5
obj_y1 = obj_cy - obj_h * 0.5
obj_x2 = obj_cx + obj_w * 0.5
obj_y2 = obj_cy + obj_h * 0.5
# clip
obj_x1 = np.maximum(np.minimum(obj_x1, float(img.shape[1] - 1)), 0.0)
obj_y1 = np.maximum(np.minimum(obj_y1, float(img.shape[0] - 1)), 0.0)
obj_x2 = np.maximum(np.minimum(obj_x2, float(img.shape[1] - 1)), 0.0)
obj_y2 = np.maximum(np.minimum(obj_y2, float(img.shape[0] - 1)), 0.0)
# append object
obj = Detect_Object()
obj.rect.x = obj_x1
obj.rect.y = obj_y1
obj.rect.w = obj_x2 - obj_x1 + 1
obj.rect.h = obj_y2 - obj_y1 + 1
obj.label = label
obj.prob = score
class_candidates[label].append(obj)
# post process
objects = []
for candidates in class_candidates:
if len(candidates) == 0:
continue
candidates.sort(key=lambda obj: obj.prob, reverse=True)
picked = self.nms_sorted_bboxes(candidates, self.nms_threshold)
for j in range(len(picked)):
z = picked[j]
objects.append(candidates[z])
objects.sort(key=lambda obj: obj.prob, reverse=True)
objects = objects[: self.max_per_image]
return objects
def nms_sorted_bboxes(self, objects, nms_threshold):
picked = []
n = len(objects)
areas = np.zeros((n,), dtype=np.float32)
for i in range(n):
areas[i] = objects[i].rect.area()
for i in range(n):
a = objects[i]
keep = True
for j in range(len(picked)):
b = objects[picked[j]]
# intersection over union
inter_area = a.rect.intersection_area(b.rect)
union_area = areas[i] + areas[picked[j]] - inter_area
# float IoU = inter_area / union_area
if inter_area / union_area > nms_threshold:
keep = False
if keep:
picked.append(i)
return picked
| 30.641975
| 124
| 0.536127
|
98bae88b63690b4f013f1b4a5dbee685c9b15080
| 1,343
|
py
|
Python
|
plugin_office_crypto.py
|
bidhata/maldissect
|
b09cca953342657ada9ff4644013d449c52646e6
|
[
"Apache-2.0"
] | null | null | null |
plugin_office_crypto.py
|
bidhata/maldissect
|
b09cca953342657ada9ff4644013d449c52646e6
|
[
"Apache-2.0"
] | null | null | null |
plugin_office_crypto.py
|
bidhata/maldissect
|
b09cca953342657ada9ff4644013d449c52646e6
|
[
"Apache-2.0"
] | 1
|
2020-09-17T23:17:16.000Z
|
2020-09-17T23:17:16.000Z
|
#!/usr/bin/env python
__description__ = 'Office crypto plugin for oledump.py'
__author__ = 'Didier Stevens'
__version__ = '0.0.1'
__date__ = '2018/05/06'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2018/05/06: start
Todo:
"""
import struct
class cOfficeCrypto(cPluginParent):
macroOnly = False
name = 'Office crypto plugin'
def __init__(self, name, stream, options):
self.streamname = name
self.stream = stream
self.options = options
self.ran = False
def Analyze(self):
result = []
if self.streamname == ['EncryptionInfo']:
self.ran = True
if len(self.stream) >= 4:
dVersions = {'2.2': 'Standard Encryption', '3.2': 'Standard Encryption', '4.2': 'Standard Encryption', '3.3': 'Extensible Encryption', '4.3': 'Extensible Encryption', '4.4': 'Agile Encryption'}
versionMajor, versionMinor = struct.unpack("<HH", self.stream[0:4])
version = '%d.%d' % (versionMajor, versionMinor)
result.append('Crypto version %s: %s' % (version, dVersions.get(version, 'Unknown')))
else:
result.append('EncryptionInfo stream is too short')
return result
AddPlugin(cOfficeCrypto)
| 27.979167
| 209
| 0.619509
|
e0bc5da1209fde5f3e09e3f9c3261ec2e1648f03
| 4,520
|
py
|
Python
|
Game.py
|
mbrookes1304/Dudo
|
b71ad58032d74dc27e166121bfc3adb416143077
|
[
"MIT"
] | 1
|
2016-04-23T06:53:41.000Z
|
2016-04-23T06:53:41.000Z
|
Game.py
|
mbrookes1304/Dudo
|
b71ad58032d74dc27e166121bfc3adb416143077
|
[
"MIT"
] | null | null | null |
Game.py
|
mbrookes1304/Dudo
|
b71ad58032d74dc27e166121bfc3adb416143077
|
[
"MIT"
] | null | null | null |
from ComputerPlayer import ComputerPlayer
from HumanPlayer import HumanPlayer
from Dice import Dice
from queue import Queue
class Game(object):
'''Represents a game of Dudo.'''
def __init__(self, playername):
# Create a list of players, one human and four computer
self._playerList = [HumanPlayer(playername, self)]
for i in range(1, 5):
self._playerList.append(ComputerPlayer("Computer " + str(i), self))
# Get the first player
firstPlayer = self._getFirstPlayer(self._playerList)
print (firstPlayer.getName() + " gets to go first!")
# Start the game
self.newRound(firstPlayer)
def _getFirstPlayer(self, playerList):
''' Returns the first player based on rolling a dice.'''
highestRollers = set()
highestRoll = 0
dice = Dice()
for player in playerList:
print(player.getName() + " rolls a " + str(dice.getTop()))
if dice.getTop() > highestRoll:
highestRollers.clear()
highestRollers.add(player)
highestRoll = dice.getTop()
elif dice.getTop() == highestRoll:
highestRollers.add(player)
dice.roll()
if len(highestRollers) > 1:
return self._getFirstPlayer(highestRollers)
else:
return highestRollers.pop()
def makeBid(self, bid):
'''Change the latest bid.'''
print (bid.getBidder().getName() + " bets " + str(bid))
self._lastBid = bid
def getLastBid(self):
'''Returns the last bid made in the game.'''
return self._lastBid
def callBid(self, caller):
'''The bid made by the previous player is called.'''
# Build a mapping of dice face to total shown
diceFrequencies = dict()
for player in self.getPlayers():
diceset = player.getDiceSet()
print(player.getName() + " reveals: " + str(diceset))
for dice in diceset:
diceFrequencies[dice.getTop()] = diceFrequencies.get(
dice.getTop(), 0) + 1
# Check whether bid met
bidCorrect = False
lastBid = self.getLastBid()
bidFace = lastBid.getFace()
if diceFrequencies.get(bidFace, 0) >= lastBid.getFrequency():
bidCorrect = True
print("The last bid was correct")
else:
print("The last bid was successfully called")
# Remove a dice for the player who lost and start a new round
if bidCorrect:
print(caller.getName() + " loses a dice.")
caller.removeDice()
self.playerQueue = self.createPlayerQueue(caller)
else:
print(lastBid.getBidder().getName() + " loses a dice.")
lastBid.getBidder().removeDice()
self.playerQueue = self.createPlayerQueue(lastBid.getBidder())
def callSpotOn(self):
'''The player believes the last bid to be spot on.'''
pass
def newRound(self, firstPlayer):
'''Start a new round from the given player.'''
self.playerQueue = self.createPlayerQueue(firstPlayer)
# Get the first player to make a bid
firstPlayer = self.playerQueue.get()
self.playerQueue.put(firstPlayer)
firstPlayer.makeFirstBid()
# Game loop
while self.playerQueue.qsize() > 1:
# Pop player from queue and add to back
player = self.playerQueue.get()
self.playerQueue.put(player)
player.takeTurn()
def createPlayerQueue(self, firstPlayer):
'''Creates a queue of players starting with firstPlayer. If any players
are eliminated then they are removed from the list of players. All dice
are rolled.'''
queue = Queue()
firstPlayerPosInList = self._playerList.index(firstPlayer)
for i in range(0, len(self._playerList)):
player = self._playerList[(firstPlayerPosInList + i) %
len(self._playerList)]
# Check whether player has been eliminated
if not player.isEliminated():
player.rollAllDice()
queue.put(player)
else:
print(player.getName() + " is eliminated.")
self._playerList.remove(player)
i -= 1
return queue
def getPlayers(self):
return self._playerList
| 37.666667
| 79
| 0.582301
|
d0267c6c552c192001abbdc4d33ee6f85a1bedad
| 2,089
|
py
|
Python
|
pyftpd/app/run_pyftpd.py
|
xaled/rpi4-docker
|
5e1f0f68c9b7f4b77211052864e13caa6030cd24
|
[
"MIT"
] | null | null | null |
pyftpd/app/run_pyftpd.py
|
xaled/rpi4-docker
|
5e1f0f68c9b7f4b77211052864e13caa6030cd24
|
[
"MIT"
] | null | null | null |
pyftpd/app/run_pyftpd.py
|
xaled/rpi4-docker
|
5e1f0f68c9b7f4b77211052864e13caa6030cd24
|
[
"MIT"
] | null | null | null |
import os
import logging
from logging.handlers import RotatingFileHandler
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
HOME_DIR = "/data"
BIND_ADDRESS = "0.0.0.0"
LISTEN_PORT = 21
PASSIVE_PORTS = list(range(21100, 21111))
LOG_PATH = "/var/log/pyftpd/pyftpd.log"
logger = logging.getLogger(__name__)
def main(username, password, nat_address, perm):
# Auth options:
authorizer = DummyAuthorizer()
if username is not None and password is not None:
authorizer.add_user(username,
password,
HOME_DIR,
perm=perm)
else:
authorizer.add_anonymous(HOME_DIR, perm=perm)
# Run server:
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = nat_address
handler.passive_ports = PASSIVE_PORTS
server = FTPServer((BIND_ADDRESS, LISTEN_PORT), handler)
try:
server.serve_forever()
finally:
server.close_all()
if __name__ == '__main__':
_debug = os.getenv("PYFTPD_DEBUG", "false").lower() == 'true'
logging.basicConfig(
handlers=[RotatingFileHandler(LOG_PATH, maxBytes=1000000, backupCount=10)],
level=logging.DEBUG if _debug else logging.INFO,
format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
datefmt='%Y-%m-%dT%H:%M:%S')
# Parse env
_username = os.getenv("PYFTPD_USERNAME", "")
_password = os.getenv("PYFTPD_PASSWORD", "")
_username = _username if _username != "" else None
_password = _password if _password != "" else None
_nat_address = os.getenv("PYFTPD_NAT_ADDRESS")
_writable = os.getenv("PYFTPD_READWRITE", "false").lower() == 'true'
_perm = "elradfmwMT" if _writable or (_username is not None and _password is not None) else "elr"
# Run main
logger.info("Starting server (username=%s, perm=%s, nat_address=%s)", _username, _perm, _nat_address)
main(_username, _password, _nat_address, _perm)
| 34.816667
| 105
| 0.672092
|
476841da8db3c984dcd7801e03b476cf97e312b6
| 2,376
|
py
|
Python
|
src/media_player.py
|
Siroj42/wasp-companion
|
d915c49308dd5c4bef47469c7735d0892e012b86
|
[
"MIT"
] | 1
|
2021-09-01T15:54:19.000Z
|
2021-09-01T15:54:19.000Z
|
src/media_player.py
|
Siroj42/wasp-companion
|
d915c49308dd5c4bef47469c7735d0892e012b86
|
[
"MIT"
] | 4
|
2021-09-20T20:03:56.000Z
|
2021-12-24T20:15:14.000Z
|
src/media_player.py
|
Siroj42/wasp-companion
|
d915c49308dd5c4bef47469c7735d0892e012b86
|
[
"MIT"
] | 1
|
2021-09-20T18:25:24.000Z
|
2021-09-20T18:25:24.000Z
|
import threading
import gi
import time
gi.require_version('Playerctl', '2.0')
from gi.repository import Playerctl, GLib
pc_music_commands = {
"play": 'GB({"t":"musicstate","state":"play"})',
"pause": 'GB({"t":"musicstate","state":"pause"})',
"info": 'GB({{"t":"musicinfo","artist":"{artist}","track":"{track}"}})'
}
class MainThread(threading.Thread):
def __init__(self, app_object):
global app
global thread
thread = self
threading.Thread.__init__(self)
app = app_object
self.waspconn_ready_event = threading.Event()
def run(self):
self.waspconn_ready_event.wait()
self.manager = Playerctl.PlayerManager()
self.manager.connect('name-appeared', on_player_appeared)
self.manager.connect('player-vanished', on_player_vanished)
for name in self.manager.props.player_names:
on_player_appeared(self.manager, name)
self.main = GLib.MainLoop()
self.main.run()
def process_watchcmd(self, n):
if n == "pause":
self.current_player.pause()
elif n == "play":
self.current_player.play()
elif n == "next":
self.current_player.next()
elif n == "previous":
self.current_player.previous()
def quit(self):
self.main.quit()
def on_player_appeared(manager, name):
thread.current_player = Playerctl.Player.new_from_name(name)
thread.current_player.connect('playback-status::playing', on_play, thread.manager)
thread.current_player.connect('playback-status::paused', on_pause, thread.manager)
thread.current_player.connect('metadata', on_metadata_change, thread.manager)
on_metadata_change(None, None, None)
if thread.current_player.get_property("playback-status") == Playerctl.PlaybackStatus(0):
on_play(None, None, None)
else:
on_pause(None, None, None)
thread.manager.manage_player(thread.current_player)
def on_player_vanished(manager, player):
return
def on_play(player, status, manager):
app.threadW.run_command(pc_music_commands["play"])
def on_pause(player, status, manager):
app.threadW.run_command(pc_music_commands["pause"])
def on_metadata_change(player, metadata, manager):
artist = thread.current_player.get_artist()
track = thread.current_player.get_title()
if artist and track:
app.threadW.run_command(pc_music_commands["info"].format(artist=artist.replace('"','\\"'), track=track.replace('"','\\"')))
else:
app.threadW.run_command(pc_music_commands["info"].format(artist="", track=""))
| 30.857143
| 125
| 0.738215
|
30d0f908d95f7b057533d1970d769142c54cccb4
| 90
|
py
|
Python
|
main.py
|
automationlogic/default-service
|
3b64db086f98d17e71b36291afdcdd554fc7a0e8
|
[
"MIT"
] | 1
|
2020-01-16T17:19:32.000Z
|
2020-01-16T17:19:32.000Z
|
main.py
|
automationlogic/default-service
|
3b64db086f98d17e71b36291afdcdd554fc7a0e8
|
[
"MIT"
] | 6
|
2020-01-28T23:08:27.000Z
|
2022-02-10T00:36:11.000Z
|
main.py
|
automationlogic/default-service
|
3b64db086f98d17e71b36291afdcdd554fc7a0e8
|
[
"MIT"
] | 1
|
2020-05-13T12:27:09.000Z
|
2020-05-13T12:27:09.000Z
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def ok():
return 'ok'
| 11.25
| 23
| 0.633333
|
bff7865b83b0ff6c1e2202fb5f69efb95382061e
| 10,717
|
py
|
Python
|
nodepool/driver/fake/provider.py
|
leki75/nodepool
|
1f971b03eff582051e6f638fbd2343ced3052b8e
|
[
"Apache-2.0"
] | null | null | null |
nodepool/driver/fake/provider.py
|
leki75/nodepool
|
1f971b03eff582051e6f638fbd2343ced3052b8e
|
[
"Apache-2.0"
] | null | null | null |
nodepool/driver/fake/provider.py
|
leki75/nodepool
|
1f971b03eff582051e6f638fbd2343ced3052b8e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import uuid
import shade
from nodepool import exceptions
from nodepool.driver.openstack.provider import OpenStackProvider
from nodepool.driver.fake.handler import FakeNodeRequestHandler
class Dummy(object):
IMAGE = 'Image'
INSTANCE = 'Instance'
FLAVOR = 'Flavor'
LOCATION = 'Server.Location'
def __init__(self, kind, **kw):
self.__kind = kind
self.__kw = kw
for k, v in kw.items():
setattr(self, k, v)
try:
if self.should_fail:
raise shade.OpenStackCloudException('This image has '
'SHOULD_FAIL set to True.')
if self.over_quota:
raise shade.exc.OpenStackCloudHTTPError(
'Quota exceeded for something', 403)
except AttributeError:
pass
def __repr__(self):
args = []
for k in self.__kw.keys():
args.append('%s=%s' % (k, getattr(self, k)))
args = ' '.join(args)
return '<%s %s %s>' % (self.__kind, id(self), args)
def __getitem__(self, key, default=None):
return getattr(self, key, default)
def __setitem__(self, key, value):
setattr(self, key, value)
def get(self, key, default=None):
return getattr(self, key, default)
def set(self, key, value):
setattr(self, key, value)
class FakeOpenStackCloud(object):
log = logging.getLogger("nodepool.FakeOpenStackCloud")
@staticmethod
def _get_quota():
return 100, 20, 1000000
def __init__(self, images=None, networks=None):
self.pause_creates = False
self._image_list = images
if self._image_list is None:
self._image_list = [
Dummy(
Dummy.IMAGE,
id='fake-image-id',
status='READY',
name='Fake Precise',
metadata={})
]
if networks is None:
networks = [dict(id='fake-public-network-uuid',
name='fake-public-network-name'),
dict(id='fake-private-network-uuid',
name='fake-private-network-name'),
dict(id='fake-ipv6-network-uuid',
name='fake-ipv6-network-name')]
self.networks = networks
self._flavor_list = [
Dummy(Dummy.FLAVOR, id='f1', ram=8192, name='Fake Flavor',
vcpus=4),
Dummy(Dummy.FLAVOR, id='f2', ram=8192, name='Unreal Flavor',
vcpus=4),
]
self._server_list = []
self.max_cores, self.max_instances, self.max_ram = FakeOpenStackCloud.\
_get_quota()
def _get(self, name_or_id, instance_list):
self.log.debug("Get %s in %s" % (name_or_id, repr(instance_list)))
for instance in instance_list:
if instance.name == name_or_id or instance.id == name_or_id:
return instance
return None
def get_network(self, name_or_id, filters=None):
for net in self.networks:
if net['id'] == name_or_id or net['name'] == name_or_id:
return net
return self.networks[0]
def _create(self, instance_list, instance_type=Dummy.INSTANCE,
done_status='ACTIVE', max_quota=-1, **kw):
should_fail = kw.get('SHOULD_FAIL', '').lower() == 'true'
nics = kw.get('nics', [])
security_groups = kw.get('security_groups', [])
addresses = None
# if keyword 'ipv6-uuid' is found in provider config,
# ipv6 address will be available in public addr dict.
for nic in nics:
if nic['net-id'] != 'fake-ipv6-network-uuid':
continue
addresses = dict(
public=[dict(version=4, addr='fake'),
dict(version=6, addr='fake_v6')],
private=[dict(version=4, addr='fake')]
)
public_v6 = 'fake_v6'
public_v4 = 'fake'
private_v4 = 'fake'
interface_ip = 'fake_v6'
break
if not addresses:
addresses = dict(
public=[dict(version=4, addr='fake')],
private=[dict(version=4, addr='fake')]
)
public_v6 = ''
public_v4 = 'fake'
private_v4 = 'fake'
interface_ip = 'fake'
over_quota = False
if (instance_type == Dummy.INSTANCE and
self.max_instances > -1 and
len(instance_list) >= self.max_instances):
over_quota = True
s = Dummy(instance_type,
id=uuid.uuid4().hex,
name=kw['name'],
status='BUILD',
adminPass='fake',
addresses=addresses,
public_v4=public_v4,
public_v6=public_v6,
private_v4=private_v4,
interface_ip=interface_ip,
security_groups=security_groups,
location=Dummy(Dummy.LOCATION, zone=kw.get('az')),
metadata=kw.get('meta', {}),
manager=self,
key_name=kw.get('key_name', None),
should_fail=should_fail,
over_quota=over_quota,
event=threading.Event())
instance_list.append(s)
t = threading.Thread(target=self._finish,
name='FakeProvider create',
args=(s, 0.1, done_status))
t.start()
return s
def _delete(self, name_or_id, instance_list):
self.log.debug("Delete from %s" % (repr(instance_list),))
instance = None
for maybe in instance_list:
if maybe.name == name_or_id or maybe.id == name_or_id:
instance = maybe
if instance:
instance_list.remove(instance)
self.log.debug("Deleted from %s" % (repr(instance_list),))
def _finish(self, obj, delay, status):
self.log.debug("Pause creates %s", self.pause_creates)
if self.pause_creates:
self.log.debug("Pausing")
obj.event.wait()
self.log.debug("Continuing")
else:
time.sleep(delay)
obj.status = status
def create_image(self, **kwargs):
return self._create(
self._image_list, instance_type=Dummy.IMAGE,
done_status='READY', **kwargs)
def get_image(self, name_or_id):
return self._get(name_or_id, self._image_list)
def list_images(self):
return self._image_list
def delete_image(self, name_or_id):
if not name_or_id:
raise Exception('name_or_id is Empty')
self._delete(name_or_id, self._image_list)
def create_image_snapshot(self, name, server, **metadata):
# XXX : validate metadata?
return self._create(
self._image_list, instance_type=Dummy.IMAGE,
name=name, **metadata)
def list_flavors(self, get_extra=False):
return self._flavor_list
def get_openstack_vars(self, server):
server.public_v4 = 'fake'
server.public_v6 = 'fake'
server.private_v4 = 'fake'
server.interface_ip = 'fake'
return server
def create_server(self, **kw):
return self._create(self._server_list, **kw)
def get_server(self, name_or_id):
result = self._get(name_or_id, self._server_list)
return result
def _clean_floating_ip(self, server):
server.public_v4 = ''
server.public_v6 = ''
server.interface_ip = server.private_v4
return server
def wait_for_server(self, server, **kwargs):
while server.status == 'BUILD':
time.sleep(0.1)
auto_ip = kwargs.get('auto_ip')
if not auto_ip:
server = self._clean_floating_ip(server)
return server
def list_servers(self):
return self._server_list
def delete_server(self, name_or_id, delete_ips=True):
self._delete(name_or_id, self._server_list)
def list_availability_zone_names(self):
return ['fake-az1', 'fake-az2']
def get_compute_limits(self):
return Dummy(
'limits',
max_total_cores=self.max_cores,
max_total_instances=self.max_instances,
max_total_ram_size=self.max_ram,
total_cores_used=4 * len(self._server_list),
total_instances_used=len(self._server_list),
total_ram_used=8192 * len(self._server_list)
)
class FakeUploadFailCloud(FakeOpenStackCloud):
log = logging.getLogger("nodepool.FakeUploadFailCloud")
def __init__(self, times_to_fail=None):
super(FakeUploadFailCloud, self).__init__()
self.times_to_fail = times_to_fail
self.times_failed = 0
def create_image(self, **kwargs):
if self.times_to_fail is None:
raise exceptions.BuilderError("Test fail image upload.")
self.times_failed += 1
if self.times_failed <= self.times_to_fail:
raise exceptions.BuilderError("Test fail image upload.")
else:
return super(FakeUploadFailCloud, self).create_image(**kwargs)
class FakeProvider(OpenStackProvider):
fake_cloud = FakeOpenStackCloud
def __init__(self, provider, use_taskmanager):
self.createServer_fails = 0
self.__client = FakeProvider.fake_cloud()
super(FakeProvider, self).__init__(provider, use_taskmanager)
def _getClient(self):
return self.__client
def createServer(self, *args, **kwargs):
while self.createServer_fails:
self.createServer_fails -= 1
raise Exception("Expected createServer exception")
return super(FakeProvider, self).createServer(*args, **kwargs)
def getRequestHandler(self, poolworker, request):
return FakeNodeRequestHandler(poolworker, request)
| 34.349359
| 79
| 0.584119
|
f44b5068a01c0ebf75e367b9e88407629691ee7a
| 3,095
|
py
|
Python
|
services/mls/mls/settings.py
|
PXMYH/mls-scrapy
|
57a102fd8f70a3a2251d8a15cfebcd60d1af0c44
|
[
"MIT"
] | 1
|
2020-01-03T17:35:29.000Z
|
2020-01-03T17:35:29.000Z
|
services/mls/mls/settings.py
|
PXMYH/mls-scrapy
|
57a102fd8f70a3a2251d8a15cfebcd60d1af0c44
|
[
"MIT"
] | 167
|
2018-10-20T15:51:04.000Z
|
2021-06-25T15:18:54.000Z
|
services/mls/mls/settings.py
|
PXMYH/mls-scrapy
|
57a102fd8f70a3a2251d8a15cfebcd60d1af0c44
|
[
"MIT"
] | 3
|
2019-04-23T15:16:17.000Z
|
2019-10-29T01:43:28.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for mls project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mls'
SPIDER_MODULES = ['mls.spiders']
NEWSPIDER_MODULE = 'mls.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mls (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mls.middlewares.MlsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mls.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'mls.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.010989
| 109
| 0.775444
|
059f9560fc470e43c3d83760184714e2354f4a33
| 934
|
py
|
Python
|
my_site/views/course.py
|
sch841466053/shangchunhong
|
a2d54373350c7b2cf9fe8dadf4e393e74cd3f620
|
[
"MIT"
] | null | null | null |
my_site/views/course.py
|
sch841466053/shangchunhong
|
a2d54373350c7b2cf9fe8dadf4e393e74cd3f620
|
[
"MIT"
] | 1
|
2021-06-10T20:54:27.000Z
|
2021-06-10T20:54:27.000Z
|
my_site/views/course.py
|
sch841466053/django-rest-framework-
|
67a7aa61b39c1ef492479134a5cbbd9957513129
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from my_site import models
from my_site.serializers.course_serializer import FreeCourseSerializer, SeniorCourseSerializer
from utils.auth import Auth
from django_redis import get_redis_connection
class GetFreeCourseList(APIView):
def get(self,request,*args,**kwargs):
ret = {"code":1000}
queryset = models.FreeCourse.objects.all()
ser = FreeCourseSerializer(instance=queryset,many=True)
ret["data"] = ser.data
# print(ret)
return Response(ret)
class GetSeniorCourseList(APIView):
authentication_classes = [Auth, ]
def get(self,request,*args,**kwargs):
ret = {"code":1000}
queryset = models.SeniorCourse.objects.all()
ser = SeniorCourseSerializer(instance=queryset,many=True)
ret["data"] = ser.data
# print(ret)
return Response(ret)
| 29.1875
| 94
| 0.703426
|
5f69f983d42077863f7a4036364ea2b37a5e97db
| 3,672
|
py
|
Python
|
python_by_contract_corpus/correct/aoc2020/day_16_ticket_translation.py
|
mristin/python-by-contract-corpus
|
c96ed00389c3811d7d63560ac665d410a7ee8493
|
[
"MIT"
] | 8
|
2021-05-07T17:37:37.000Z
|
2022-02-26T15:08:42.000Z
|
python_by_contract_corpus/correct/aoc2020/day_16_ticket_translation.py
|
mristin/python-by-contract-corpus
|
c96ed00389c3811d7d63560ac665d410a7ee8493
|
[
"MIT"
] | 22
|
2021-04-28T21:55:48.000Z
|
2022-03-04T07:41:37.000Z
|
python_by_contract_corpus/correct/aoc2020/day_16_ticket_translation.py
|
mristin/aocdbc
|
c96ed00389c3811d7d63560ac665d410a7ee8493
|
[
"MIT"
] | 3
|
2021-03-26T22:29:12.000Z
|
2021-04-11T20:45:45.000Z
|
import re
from typing import List, Tuple, Final
from icontract import require, ensure, DBC
# crosshair: on
from python_by_contract_corpus.common import Lines
class RuleParsing:
"""Represent a rule which is not constrained by pre-conditions."""
identifier: Final[str] #: Identifier of the field
ranges: Final[List[Tuple[int, int]]] #: Valid range of values for the field
def __init__(self, identifier: str, ranges: List[Tuple[int, int]]) -> None:
"""Initialize with the given values."""
self.identifier = identifier
self.ranges = ranges
class Rule(DBC):
"""Represent a rule for the ticket field."""
identifier: Final[str] #: identifier of the field
ranges: Final[List[Tuple[int, int]]] #: acceptable ranges for the field
@require(lambda identifier: len(identifier) > 0)
@require(lambda ranges: all(range[0] < range[1] for range in ranges))
def __init__(self, identifier: str, ranges: List[Tuple[int, int]]) -> None:
"""Initialize with the given values."""
self.identifier = identifier
self.ranges = ranges
def applies(rule: Rule, value: int) -> bool:
"""Check whether the ``rule`` applies to the ``value``."""
return any(range[0] <= value <= range[1] for range in rule.ranges)
RULE_RE = re.compile(
r"^([^:]+): "
r"(0|[1-9][0-9]*)-(0|[1-9][0-9]*)"
r"( or (0|[1-9][0-9]*)-(0|[1-9][0-9]*))+\Z"
)
@require(lambda lines: all(RULE_RE.match(line) for line in lines))
def parse_rules(lines: Lines) -> List[RuleParsing]:
"""
Parse the ``lines`` into rules.
While the parsed rules are syntactically correct, they have to be yet semantically
verified.
"""
result = [] # type: List[RuleParsing]
for line in lines:
identifier, rest = line.split(": ", 1)
ranges = [] # type: List[Tuple[int, int]]
for part in rest.split(" or "):
first, last = part.split("-")
ranges.append((int(first), int(last)))
result.append(RuleParsing(identifier=identifier, ranges=ranges))
return result
# fmt: off
@require(
lambda lines:
all(
re.match(r'^(0|[1-9][0-9]*)(,(0|[1-9][0-9]*))+\Z', line)
for line in lines
)
)
# fmt: on
def parse_nearby_tickets(lines: List[str]) -> List[List[int]]:
"""Parse the nearby tickets from ``lines`` to list of field values."""
# fmt: off
return [
[int(part) for part in line.split(',')]
for line in lines
]
# fmt: on
# fmt: off
@ensure(
lambda ticket, result:
all(
value in ticket
for value in result
)
)
# fmt: on
def invalid_fields(rules: List[Rule], ticket: List[int]) -> List[int]:
"""Select the invalid fields from a ``ticket`` according to ``rules``."""
result = [] # type: List[int]
for value in ticket:
if not any(applies(rule=rule, value=value) for rule in rules):
result.append(value)
return result
# fmt: off
@ensure(
lambda tickets, result:
all(
any(value in ticket for ticket in tickets)
for value in result
)
)
# fmt: on
def list_all_invalid_values(rules: List[Rule], tickets: List[List[int]]) -> List[int]:
"""Select the invalid fields accross all ``tickets`` according to ``rules``."""
result = [] # type: List[int]
for ticket in tickets:
for value in ticket:
if not any(applies(rule=rule, value=value) for rule in rules):
result.append(value)
return result
def compute_error_rate(invalid_values: List[int]) -> int:
"""Compute the error rate as sum of the invalid values."""
return sum(invalid_values)
| 27.402985
| 87
| 0.614107
|
412b49985db6dc28cab9cf3fc1ebef7a3cf1cfbb
| 20,001
|
py
|
Python
|
statsmodels/tsa/varma_process.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 15
|
2015-03-03T09:47:42.000Z
|
2022-01-05T18:28:31.000Z
|
statsmodels/tsa/varma_process.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 7
|
2015-11-20T08:33:04.000Z
|
2020-07-24T19:34:39.000Z
|
statsmodels/tsa/varma_process.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 14
|
2015-01-06T22:08:34.000Z
|
2021-01-01T16:33:23.000Z
|
# -*- coding: utf-8 -*-
""" Helper and filter functions for VAR and VARMA, and basic VAR class
Created on Mon Jan 11 11:04:23 2010
Author: josef-pktd
License: BSD
This is a new version, I didn't look at the old version again, but similar
ideas.
not copied/cleaned yet:
* fftn based filtering, creating samples with fft
* Tests: I ran examples but did not convert them to tests
examples look good for parameter estimate and forecast, and filter functions
main TODOs:
* result statistics
* see whether Bayesian dummy observation can be included without changing
the single call to linalg.lstsq
* impulse response function does not treat correlation, see Hamilton and jplv
Extensions
* constraints, Bayesian priors/penalization
* Error Correction Form and Cointegration
* Factor Models Stock-Watson, ???
see also VAR section in Notes.txt
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_equal
from scipy import signal
#might not (yet) need the following
from scipy.signal.signaltools import _centered as trim_centered
from statsmodels.tsa.tsatools import lagmat
def varfilter(x, a):
'''apply an autoregressive filter to a series x
Warning: I just found out that convolve doesn't work as I
thought, this likely doesn't work correctly for
nvars>3
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
#(no, reserar is inverse filter)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# #not necessary:
# if np.any(a.shape[1:] != nvar):
# raise ValueError('if 3d shape of a has to be (nobs,nvar,nvar)')
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid
def varinversefilter(ar, nobs, version=1):
'''creates inverse ar filter (MA representation) recursively
The VAR lag polynomial is defined by ::
ar(L) y_t = u_t or
y_t = -ar_{-1}(L) y_{t-1} + u_t
the returned lagpolynomial is arinv(L)=ar^{-1}(L) in ::
y_t = arinv(L) u_t
Parameters
----------
ar : array, (nlags,nvars,nvars)
matrix lagpolynomial, currently no exog
first row should be identity
Returns
-------
arinv : array, (nobs,nvars,nvars)
Notes
-----
'''
nlags, nvars, nvarsex = ar.shape
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
arinv = np.zeros((nobs+1, nvarsex, nvars))
arinv[0,:,:] = ar[0]
arinv[1:nlags,:,:] = -ar[1:]
if version == 1:
for i in range(2,nobs+1):
tmp = np.zeros((nvars,nvars))
for p in range(1,nlags):
tmp += np.dot(-ar[p],arinv[i-p,:,:])
arinv[i,:,:] = tmp
if version == 0:
for i in range(nlags+1,nobs+1):
print(ar[1:].shape, arinv[i-1:i-nlags:-1,:,:].shape)
#arinv[i,:,:] = np.dot(-ar[1:],arinv[i-1:i-nlags:-1,:,:])
#print(np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1])).shape
#arinv[i,:,:] = np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1]))
raise NotImplementedError('waiting for generalized ufuncs or something')
return arinv
def vargenerate(ar, u, initvalues=None):
'''generate an VAR process with errors u
similar to gauss
uses loop
Parameters
----------
ar : array (nlags,nvars,nvars)
matrix lagpolynomial
u : array (nobs,nvars)
exogenous variable, error term for VAR
Returns
-------
sar : array (1+nobs,nvars)
sample of var process, inverse filtered u
does not trim initial condition y_0 = 0
Examples
--------
# generate random sample of VAR
nobs, nvars = 10, 2
u = numpy.random.randn(nobs,nvars)
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
vargenerate(a21,u)
# Impulse Response to an initial shock to the first variable
imp = np.zeros((nobs, nvars))
imp[0,0] = 1
vargenerate(a21,imp)
'''
nlags, nvars, nvarsex = ar.shape
nlagsm1 = nlags - 1
nobs = u.shape[0]
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
if u.shape[1] != nvars:
raise ValueError('u needs to have nvars columns')
if initvalues is None:
sar = np.zeros((nobs+nlagsm1, nvars))
start = nlagsm1
else:
start = max(nlagsm1, initvalues.shape[0])
sar = np.zeros((nobs+start, nvars))
sar[start-initvalues.shape[0]:start] = initvalues
#sar[nlagsm1:] = u
sar[start:] = u
#if version == 1:
for i in range(start,start+nobs):
for p in range(1,nlags):
sar[i] += np.dot(sar[i-p,:],-ar[p])
return sar
def padone(x, front=0, back=0, axis=0, fillvalue=0):
'''pad with zeros along one axis, currently only axis=0
can be used sequentially to pad several axis
Examples
--------
>>> padone(np.ones((2,3)),1,3,axis=1)
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> padone(np.ones((2,3)),1,1, fillvalue=np.nan)
array([[ NaN, NaN, NaN],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ NaN, NaN, NaN]])
'''
#primitive version
shape = np.array(x.shape)
shape[axis] += (front + back)
shapearr = np.array(x.shape)
out = np.empty(shape)
out.fill(fillvalue)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shapearr
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(out.shape
#print(out[tuple(myslice)].shape
out[tuple(myslice)] = x
return out
def trimone(x, front=0, back=0, axis=0):
'''trim number of array elements along one axis
Examples
--------
>>> xp = padone(np.ones((2,3)),1,3,axis=1)
>>> xp
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> trimone(xp,1,3,1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
'''
shape = np.array(x.shape)
shape[axis] -= (front + back)
#print(shape, front, back
shapearr = np.array(x.shape)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(shape, endind
#print(x[tuple(myslice)].shape
return x[tuple(myslice)]
def ar2full(ar):
'''make reduced lagpolynomial into a right side lagpoly array
'''
nlags, nvar,nvarex = ar.shape
return np.r_[np.eye(nvar,nvarex)[None,:,:],-ar]
def ar2lhs(ar):
'''convert full (rhs) lagpolynomial into a reduced, left side lagpoly array
this is mainly a reminder about the definition
'''
return -ar[1:]
class _Var(object):
'''obsolete VAR class, use tsa.VAR instead, for internal use only
Examples
--------
>>> v = Var(ar2s)
>>> v.fit(1)
>>> v.arhat
array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.77784898, 0.01726193],
[ 0.10733009, -0.78665335]]])
'''
def __init__(self, y):
self.y = y
self.nobs, self.nvars = y.shape
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : integer
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2]
def predict(self):
'''calculate estimated timeseries (yhat) for sample
'''
if not hasattr(self, 'yhat'):
self.yhat = varfilter(self.y, self.arhat)
return self.yhat
def covmat(self):
''' covariance matrix of estimate
# not sure it's correct, need to check orientation everywhere
# looks ok, display needs getting used to
>>> v.rss[None,None,:]*np.linalg.inv(np.dot(v.xred.T,v.xred))[:,:,None]
array([[[ 0.37247445, 0.32210609],
[ 0.1002642 , 0.08670584]],
[[ 0.1002642 , 0.08670584],
[ 0.45903637, 0.39696255]]])
>>>
>>> v.rss[0]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.37247445, 0.1002642 ],
[ 0.1002642 , 0.45903637]])
>>> v.rss[1]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.32210609, 0.08670584],
[ 0.08670584, 0.39696255]])
'''
#check if orientation is same as self.arhat
self.paramcov = (self.rss[None,None,:] *
np.linalg.inv(np.dot(self.xred.T, self.xred))[:,:,None])
def forecast(self, horiz=1, u=None):
'''calculates forcast for horiz number of periods at end of sample
Parameters
----------
horiz : int (optional, default=1)
forecast horizon
u : array (horiz, nvars)
error term for forecast periods. If None, then u is zero.
Returns
-------
yforecast : array (nobs+horiz, nvars)
this includes the sample and the forecasts
'''
if u is None:
u = np.zeros((horiz, self.nvars))
return vargenerate(self.arhat, u, initvalues=self.y)
class VarmaPoly(object):
'''class to keep track of Varma polynomial format
Examples
--------
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
'''
def __init__(self, ar, ma=None):
self.ar = ar
self.ma = ma
nlags, nvarall, nvars = ar.shape
self.nlags, self.nvarall, self.nvars = nlags, nvarall, nvars
self.isstructured = not (ar[0,:nvars] == np.eye(nvars)).all()
if self.ma is None:
self.ma = np.eye(nvars)[None,...]
self.isindependent = True
else:
self.isindependent = not (ma[0] == np.eye(nvars)).all()
self.malags = ar.shape[0]
self.hasexog = nvarall > nvars
self.arm1 = -ar[1:]
#@property
def vstack(self, a=None, name='ar'):
'''stack lagpolynomial vertically in 2d array
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.reshape(-1, self.nvarall)
#@property
def hstack(self, a=None, name='ar'):
'''stack lagpolynomial horizontally in 2d array
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.swapaxes(1,2).reshape(-1, self.nvarall).T
#@property
def stacksquare(self, a=None, name='ar', orientation='vertical'):
'''stack lagpolynomial vertically in 2d square array with eye
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
astacked = a.reshape(-1, self.nvarall)
lenpk, nvars = astacked.shape #[0]
amat = np.eye(lenpk, k=nvars)
amat[:,:nvars] = astacked
return amat
#@property
def vstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.reshape(-1, self.nvarall)
#@property
def hstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
this is the Kalman Filter representation, I think
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.swapaxes(1,2).reshape(-1, self.nvarall)
def getisstationary(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isstationary : boolean
*attaches*
areigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if not a is None:
a = a
else:
if self.isstructured:
a = -self.reduceform(self.ar)[1:]
else:
a = -self.ar[1:]
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.areigenvalues = ev
return (np.abs(ev) < 1).all()
def getisinvertible(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isinvertible : boolean
*attaches*
maeigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if not a is None:
a = a
else:
if self.isindependent:
a = self.reduceform(self.ma)[1:]
else:
a = self.ma[1:]
if a.shape[0] == 0:
# no ma lags
self.maeigenvalues = np.array([], np.complex)
return True
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.maeigenvalues = ev
return (np.abs(ev) < 1).all()
def reduceform(self, apoly):
'''
this assumes no exog, todo
'''
if apoly.ndim != 3:
raise ValueError('apoly needs to be 3d')
nlags, nvarsex, nvars = apoly.shape
a = np.empty_like(apoly)
try:
a0inv = np.linalg.inv(a[0,:nvars, :])
except np.linalg.LinAlgError:
raise ValueError('matrix not invertible',
'ask for implementation of pinv')
for lag in range(nlags):
a[lag] = np.dot(a0inv, apoly[lag])
return a
if __name__ == "__main__":
# some example lag polynomials
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
a22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0.1, -0.8]]])
a23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0.2],
[ 0.1, -0.6]]])
a24 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
a31 = np.r_[np.eye(3)[None,:,:], 0.8*np.eye(3)[None,:,:]]
a32 = np.array([[[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[ 0.8, 0. , 0. ],
[ 0.1, 0.6, 0. ],
[ 0. , 0. , 0.9]]])
########
ut = np.random.randn(1000,2)
ar2s = vargenerate(a22,ut)
#res = np.linalg.lstsq(lagmat(ar2s,1)[:,1:], ar2s)
res = np.linalg.lstsq(lagmat(ar2s,1), ar2s, rcond=-1)
bhat = res[0].reshape(1,2,2)
arhat = ar2full(bhat)
#print(maxabs(arhat - a22)
v = _Var(ar2s)
v.fit(1)
v.forecast()
v.forecast(25)[-30:]
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
ar23ns = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-1.9, 0. ],
[ 0.4, -0.6]],
[[ 0.3, 0. ],
[ 0.1, -0.1]]])
vp = VarmaPoly(ar23, ma22)
print(vars(vp))
print(vp.vstack())
print(vp.vstack(a24))
print(vp.hstackarma_minus1())
print(vp.getisstationary())
print(vp.getisinvertible())
vp2 = VarmaPoly(ar23ns)
print(vp2.getisstationary())
print(vp2.getisinvertible()) # no ma lags
| 27.702216
| 90
| 0.515074
|
66a93f50ffa97cd0c1aa0e28bf88e25e2c8bdf00
| 1,291
|
py
|
Python
|
setup.py
|
jakebriggs/mopidy-rnz
|
5361f115ab5cffdd6a4bdaefe35f84023923d2f3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jakebriggs/mopidy-rnz
|
5361f115ab5cffdd6a4bdaefe35f84023923d2f3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jakebriggs/mopidy-rnz
|
5361f115ab5cffdd6a4bdaefe35f84023923d2f3
|
[
"Apache-2.0"
] | null | null | null |
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-RNZ',
version=get_version('mopidy_rnz/__init__.py'),
url='https://github.com/danbrough/mopidy-rnz',
license='Apache License, Version 2.0',
author='Dan Brough',
author_email='dan@danbrough.org',
description='Mopidy extension for RNZ content',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'Pykka >= 1.1',
'requests-cache >= 0.4.13',
'python_dateutil >= 2.6.1',
],
entry_points={
'mopidy.ext': [
'rnz = mopidy_rnz:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| 28.065217
| 74
| 0.594113
|
38afdc6fe79d25494b4fb9bc18a5465944fc18d0
| 75,724
|
py
|
Python
|
kayobe/cli/commands.py
|
kcl-eresearch/kayobe
|
2445ec740e3445f813ba6b23c086bf113c5d96cf
|
[
"Apache-2.0"
] | 1
|
2021-06-29T12:56:22.000Z
|
2021-06-29T12:56:22.000Z
|
kayobe/cli/commands.py
|
kcl-eresearch/kayobe
|
2445ec740e3445f813ba6b23c086bf113c5d96cf
|
[
"Apache-2.0"
] | null | null | null |
kayobe/cli/commands.py
|
kcl-eresearch/kayobe
|
2445ec740e3445f813ba6b23c086bf113c5d96cf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import json
import os
import sys
from cliff.command import Command
from cliff.hooks import CommandHook
from kayobe import ansible
from kayobe import environment
from kayobe import kolla_ansible
from kayobe import utils
from kayobe import vault
# This is set to an arbitrary large number to simplify the sorting logic
DEFAULT_SEQUENCE_NUMBER = sys.maxsize
def _build_playbook_list(*playbooks):
"""Return a list of names of playbook files given their basenames."""
return [
_get_playbook_path(playbook)
for playbook in playbooks
]
def _get_playbook_path(playbook):
"""Return the absolute path of a playbook"""
return utils.get_data_files_path("ansible", "%s.yml" % playbook)
class VaultMixin(object):
"""Mixin class for commands requiring Ansible vault."""
def get_parser(self, prog_name):
parser = super(VaultMixin, self).get_parser(prog_name)
group = parser.add_argument_group("Ansible vault")
vault.add_args(group)
return parser
class KayobeAnsibleMixin(object):
"""Mixin class for commands running Kayobe Ansible playbooks."""
def get_parser(self, prog_name):
parser = super(KayobeAnsibleMixin, self).get_parser(prog_name)
group = parser.add_argument_group("Kayobe Ansible")
self.add_kayobe_ansible_args(group)
return parser
def add_kayobe_ansible_args(self, group):
ansible.add_args(group)
def _get_verbosity_args(self):
"""Add quietness and verbosity level arguments."""
# Cliff's default verbosity level is 1, 0 means quiet.
verbosity_args = {}
if self.app.options.verbose_level:
ansible_verbose_level = self.app.options.verbose_level - 1
verbosity_args["verbose_level"] = ansible_verbose_level
else:
verbosity_args["quiet"] = True
return verbosity_args
def run_kayobe_playbooks(self, parsed_args, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return ansible.run_playbooks(parsed_args, *args, **kwargs)
def run_kayobe_playbook(self, parsed_args, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return ansible.run_playbook(parsed_args, *args, **kwargs)
def run_kayobe_config_dump(self, parsed_args, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return ansible.config_dump(parsed_args, *args, **kwargs)
def generate_kolla_ansible_config(self, parsed_args, install=False,
service_config=True,
bifrost_config=False):
"""Generate configuration for kolla-ansible.
:param install: If True, also install kolla-ansible.
:param service_config: If True, generate service configuration.
:param bifrost_config: If True, generate bifrost configuration.
"""
# We use ignore_limit here because all of these plays execute against
# localhost, and are typically necessary for kolla-ansible to function
# correctly. Previously a number of people were caught out by using
# --limit and having these plays skipped.
tags = None if install else "config"
playbooks = _build_playbook_list("kolla-ansible")
self.run_kayobe_playbooks(parsed_args, playbooks, tags=tags,
ignore_limit=True)
if service_config:
playbooks = _build_playbook_list("kolla-openstack")
self.run_kayobe_playbooks(parsed_args, playbooks,
ignore_limit=True)
if bifrost_config:
playbooks = _build_playbook_list("kolla-bifrost")
self.run_kayobe_playbooks(parsed_args, playbooks,
ignore_limit=True)
class KollaAnsibleMixin(object):
"""Mixin class for commands running Kolla Ansible."""
def get_parser(self, prog_name):
parser = super(KollaAnsibleMixin, self).get_parser(prog_name)
group = parser.add_argument_group("Kolla Ansible")
self.add_kolla_ansible_args(group)
return parser
def add_kolla_ansible_args(self, group):
kolla_ansible.add_args(group)
def _get_verbosity_args(self):
"""Add quietness and verbosity level arguments."""
# Cliff's default verbosity level is 1, 0 means quiet.
verbosity_args = {}
if self.app.options.verbose_level:
ansible_verbose_level = self.app.options.verbose_level - 1
verbosity_args["verbose_level"] = ansible_verbose_level
else:
verbosity_args["quiet"] = True
return verbosity_args
def run_kolla_ansible(self, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return kolla_ansible.run(*args, **kwargs)
def run_kolla_ansible_overcloud(self, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return kolla_ansible.run_overcloud(*args, **kwargs)
def run_kolla_ansible_seed(self, *args, **kwargs):
kwargs.update(self._get_verbosity_args())
return kolla_ansible.run_seed(*args, **kwargs)
def _split_hook_sequence_number(hook):
parts = hook.split("-", 1)
if len(parts) < 2:
return (DEFAULT_SEQUENCE_NUMBER, hook)
try:
return (int(parts[0]), parts[1])
except ValueError:
return (DEFAULT_SEQUENCE_NUMBER, hook)
class HookDispatcher(CommandHook):
"""Runs custom playbooks before and after a command"""
# Order of calls: get_epilog, get_parser, before, after
def __init__(self, *args, **kwargs):
self.command = kwargs["command"]
self.logger = self.command.app.LOG
cmd = self.command.cmd_name
# Replace white space with dashes for consistency with ansible
# playbooks. Example cmd: kayobe control host bootstrap
self.name = "-".join(cmd.split())
def get_epilog(self):
pass
def get_parser(self, prog_name):
pass
def _find_hooks(self, config_path, target):
name = self.name
path = os.path.join(config_path, "hooks", name, "%s.d" % target)
self.logger.debug("Discovering hooks in: %s" % path)
if not os.path.exists:
return []
hooks = glob.glob(os.path.join(path, "*.yml"))
self.logger.debug("Discovered the following hooks: %s" % hooks)
return hooks
def hooks(self, config_path, target):
hooks = self._find_hooks(config_path, target)
# Hooks can be prefixed with a sequence number to adjust running order,
# e.g 10-my-custom-playbook.yml. Sort by sequence number.
hooks = sorted(hooks, key=_split_hook_sequence_number)
# Resolve symlinks so that we can reference roles.
hooks = [os.path.realpath(hook) for hook in hooks]
return hooks
def run_hooks(self, parsed_args, target):
config_path = parsed_args.config_path
hooks = self.hooks(config_path, target)
if hooks:
self.logger.debug("Running hooks: %s" % hooks)
self.command.run_kayobe_playbooks(parsed_args, hooks)
def before(self, parsed_args):
self.run_hooks(parsed_args, "pre")
return parsed_args
def after(self, parsed_args, return_code):
if return_code == 0:
self.run_hooks(parsed_args, "post")
else:
self.logger.debug("Not running hooks due to non-zero return code")
return return_code
class ControlHostBootstrap(KayobeAnsibleMixin, KollaAnsibleMixin, VaultMixin,
Command):
"""Bootstrap the Kayobe control environment.
* Downloads and installs Ansible roles from Galaxy.
* Generates an SSH key for the Ansible control host, if one does not exist.
* Installs kolla-ansible on the Ansible control host.
* Generates admin-openrc.sh and public-openrc.sh files when passwords.yml
exists.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Bootstrapping Kayobe Ansible control host")
ansible.install_galaxy_roles(parsed_args)
playbooks = _build_playbook_list("bootstrap")
self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True)
passwords_exist = ansible.passwords_yml_exists(parsed_args)
if passwords_exist:
# Install and generate configuration - necessary for post-deploy.
ka_tags = None
else:
ka_tags = "install"
playbooks = _build_playbook_list("kolla-ansible")
self.run_kayobe_playbooks(parsed_args, playbooks, tags=ka_tags,
ignore_limit=True)
if passwords_exist:
# If we are bootstrapping a control host for an existing
# environment, we should also generate the admin-openrc.sh and
# public-openrc.sh scripts that provide admin credentials.
self.run_kolla_ansible_overcloud(parsed_args, "post-deploy")
# Create an environment file for accessing the public API as the
# admin user.
playbooks = _build_playbook_list("public-openrc")
self.run_kayobe_playbooks(parsed_args, playbooks,
ignore_limit=True)
class ControlHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command):
"""Upgrade the Kayobe control environment.
* Downloads and installs updated Ansible roles from Galaxy.
* Generates an SSH key for the Ansible control host, if one does not exist.
* Updates kolla-ansible on the Ansible control host.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading Kayobe Ansible control host")
# Remove roles that are no longer used. Do this before installing new
# ones, just in case a custom role dependency includes any.
ansible.prune_galaxy_roles(parsed_args)
# Use force to upgrade roles.
ansible.install_galaxy_roles(parsed_args, force=True)
playbooks = _build_playbook_list("bootstrap")
self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True)
playbooks = _build_playbook_list("kolla-ansible")
self.run_kayobe_playbooks(parsed_args, playbooks, tags="install",
ignore_limit=True)
class ConfigurationDump(KayobeAnsibleMixin, VaultMixin, Command):
"""Dump Kayobe configuration.
Dumps kayobe Ansible host variables to standard output. The output may be
filtered by selecting one or more hosts, or a specific variable.
"""
def get_parser(self, prog_name):
parser = super(ConfigurationDump, self).get_parser(prog_name)
group = parser.add_argument_group("Configuration Dump")
group.add_argument("--dump-facts", default=False,
help="whether to gather and dump host facts")
group.add_argument("--host",
help="name of a host to dump config for")
group.add_argument("--hosts",
help="name of hosts and/or groups to dump config "
"for")
group.add_argument("--var-name",
help="name of a variable to dump")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Dumping Ansible configuration")
hostvars = self.run_kayobe_config_dump(
parsed_args, host=parsed_args.host, hosts=parsed_args.hosts,
facts=parsed_args.dump_facts, var_name=parsed_args.var_name)
try:
json.dump(hostvars, sys.stdout, sort_keys=True, indent=4)
except TypeError as e:
self.app.LOG.error("Failed to JSON encode configuration: %s",
repr(e))
sys.exit(1)
class PlaybookRun(KayobeAnsibleMixin, VaultMixin, Command):
"""Run a Kayobe Ansible playbook.
Allows a single Kayobe ansible playbook to be run. For advanced users only.
"""
def add_kayobe_ansible_args(self, group):
super(PlaybookRun, self).add_kayobe_ansible_args(group)
group.add_argument("playbook", nargs="+",
help="name of the playbook(s) to run")
def take_action(self, parsed_args):
self.app.LOG.debug("Running Kayobe playbook(s)")
self.run_kayobe_playbooks(parsed_args, parsed_args.playbook)
class KollaAnsibleRun(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Run a Kolla Ansible command.
Allows a single kolla-ansible command to be run. For advanced users only.
"""
def add_kolla_ansible_args(self, group):
super(KollaAnsibleRun, self).add_kolla_ansible_args(group)
group.add_argument("--kolla-inventory-filename", default="overcloud",
choices=["seed", "overcloud"],
help="name of the kolla-ansible inventory file, "
"one of seed or overcloud (default "
"overcloud)")
group.add_argument("command",
help="name of the kolla-ansible command to run")
def take_action(self, parsed_args):
self.app.LOG.debug("Running Kolla Ansible command")
self.run_kolla_ansible(parsed_args, parsed_args.command,
parsed_args.kolla_inventory_filename)
class PhysicalNetworkConfigure(KayobeAnsibleMixin, VaultMixin, Command):
"""Configure a set of physical network devices."""
def get_parser(self, prog_name):
parser = super(PhysicalNetworkConfigure, self).get_parser(
prog_name)
group = parser.add_argument_group("Physical Networking")
group.add_argument("--group", required=True,
help="the Ansible group to apply configuration to")
group.add_argument("--display", action="store_true",
help="display the candidate configuration and exit "
"without applying it")
group.add_argument("--interface-limit",
help="limit the switch interfaces to be configured "
"by interface name")
group.add_argument("--interface-description-limit",
help="limit the switch interfaces to be configured "
"by interface description")
discovery = parser.add_mutually_exclusive_group()
discovery.add_argument("--enable-discovery", action="store_true",
help="configure the network for hardware "
"discovery")
discovery.add_argument("--disable-discovery", action="store_true",
help="deconfigure the network for hardware "
"discovery")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Configuring a physical network")
extra_vars = {}
extra_vars["physical_network_display"] = parsed_args.display
if parsed_args.enable_discovery:
extra_vars["physical_network_enable_discovery"] = True
if parsed_args.disable_discovery:
extra_vars["physical_network_disable_discovery"] = True
if parsed_args.interface_limit:
extra_vars["physical_network_interface_limit"] = (
parsed_args.interface_limit)
if parsed_args.interface_description_limit:
extra_vars["physical_network_interface_description_limit"] = (
parsed_args.interface_description_limit)
self.run_kayobe_playbook(parsed_args,
_get_playbook_path('physical-network'),
limit=parsed_args.group,
extra_vars=extra_vars)
class SeedHypervisorHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Configure the seed hypervisor node host OS and services.
* Allocate IP addresses for all configured networks.
* Add the host to SSH known hosts.
* Configure a user account for use by kayobe for SSH access.
* Configure package repos.
* Configure a PyPI mirror.
* Optionally, create a virtualenv for remote target hosts.
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Configure timezone and ntp.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
* Configure the host as a libvirt hypervisor.
"""
def get_parser(self, prog_name):
parser = super(SeedHypervisorHostConfigure, self).get_parser(prog_name)
group = parser.add_argument_group("Host Configuration")
group.add_argument("--wipe-disks", action='store_true',
help="wipe partition and LVM data from all disks "
"that are not mounted. Warning: this can "
"result in the loss of data")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Configuring seed hypervisor host OS")
# Explicitly request the dump-config tag to ensure this play runs even
# if the user specified tags.
ansible_user = self.run_kayobe_config_dump(
parsed_args, host="seed-hypervisor",
var_name="kayobe_ansible_user", tags="dump-config")
if not ansible_user:
self.app.LOG.error("Could not determine kayobe_ansible_user "
"variable for seed hypervisor host")
sys.exit(1)
# Allocate IP addresses.
playbooks = _build_playbook_list("ip-allocation")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor")
playbooks = _build_playbook_list(
"ssh-known-host", "kayobe-ansible-user",
"dnf", "pip", "kayobe-target-venv")
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "dev-tools", "network", "sysctl", "time",
"mdadm", "luks", "lvm", "seed-hypervisor-libvirt-host")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor")
class SeedHypervisorHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command):
"""Update packages on the seed hypervisor host."""
def get_parser(self, prog_name):
parser = super(SeedHypervisorHostPackageUpdate, self).get_parser(
prog_name)
group = parser.add_argument_group("Host Package Updates")
group.add_argument("--packages", required=True,
help="List of packages to update. Use '*' to "
"update all packages.")
group.add_argument("--security", action='store_true',
help="Only install updates that have been marked "
"security related.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Updating seed hypervisor host packages")
extra_vars = {
"host_package_update_packages": parsed_args.packages,
"host_package_update_security": parsed_args.security,
}
playbooks = _build_playbook_list("host-package-update")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor",
extra_vars=extra_vars)
class SeedHypervisorHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command):
"""Run command on the seed hypervisor host."""
def get_parser(self, prog_name):
parser = super(SeedHypervisorHostCommandRun, self).get_parser(
prog_name)
group = parser.add_argument_group("Host Command Run")
group.add_argument("--command", required=True,
help="Command to run (required).")
group.add_argument("--show-output", action='store_true',
help="Show command output")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Run command on seed hypervisor host")
extra_vars = {
"host_command_to_run": utils.escape_jinja(parsed_args.command),
"show_output": parsed_args.show_output}
playbooks = _build_playbook_list("host-command-run")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor",
extra_vars=extra_vars)
class SeedHypervisorHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command):
"""Upgrade the seed hypervisor host services.
Performs the changes necessary to make the host services suitable for the
configured OpenStack release.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading seed hypervisor host services")
playbooks = _build_playbook_list("kayobe-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor")
class SeedVMProvision(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Provision the seed VM.
* Allocate IP addresses for all configured networks.
* Provision a virtual machine using libvirt.
* Configure the kolla-ansible inventory for the seed VM.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Provisioning seed VM")
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("ip-allocation"),
limit="seed")
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("seed-vm-provision"))
# Now populate the Kolla Ansible inventory.
self.generate_kolla_ansible_config(parsed_args, service_config=False)
class SeedVMDeprovision(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Deprovision the seed VM.
This will destroy the seed VM and all associated volumes.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Deprovisioning seed VM")
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("seed-vm-deprovision"))
class SeedHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Configure the seed node host OS and services.
* Allocate IP addresses for all configured networks.
* Add the host to SSH known hosts.
* Configure a user account for use by kayobe for SSH access.
* Configure package repos.
* Configure a PyPI mirror.
* Optionally, create a virtualenv for remote target hosts.
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Disable SELinux.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Configure IP routing and source NAT.
* Disable bootstrap interface configuration.
* Configure timezone and ntp.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
* Optionally, create a virtualenv for kolla-ansible.
* Configure a user account for kolla-ansible.
* Configure Docker engine.
* Optionally, deploy a Docker Registry.
"""
def get_parser(self, prog_name):
parser = super(SeedHostConfigure, self).get_parser(prog_name)
group = parser.add_argument_group("Host Configuration")
group.add_argument("--wipe-disks", action='store_true',
help="wipe partition and LVM data from all disks "
"that are not mounted. Warning: this can "
"result in the loss of data")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Configuring seed host OS")
# Allocate IP addresses.
playbooks = _build_playbook_list("ip-allocation")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed")
# Run kayobe playbooks.
playbooks = _build_playbook_list(
"ssh-known-host", "kayobe-ansible-user",
"dnf", "pip", "kayobe-target-venv")
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "dev-tools", "disable-selinux", "network",
"sysctl", "ip-routing", "snat", "disable-glean", "time",
"mdadm", "luks", "lvm", "docker-devicemapper",
"kolla-ansible-user", "kolla-pip", "kolla-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed")
self.generate_kolla_ansible_config(parsed_args, service_config=False)
# Run kolla-ansible bootstrap-servers.
self.run_kolla_ansible_seed(parsed_args, "bootstrap-servers")
# Run final kayobe playbooks.
playbooks = _build_playbook_list(
"kolla-host", "docker")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed")
# Optionally, deploy a Docker Registry.
playbooks = _build_playbook_list("docker-registry")
extra_vars = {"kayobe_action": "deploy"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="seed")
class SeedHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command):
"""Update packages on the seed host."""
def get_parser(self, prog_name):
parser = super(SeedHostPackageUpdate, self).get_parser(prog_name)
group = parser.add_argument_group("Host Package Updates")
group.add_argument("--packages", required=True,
help="List of packages to update. Use '*' to "
"update all packages.")
group.add_argument("--security", action='store_true',
help="Only install updates that have been marked "
"security related.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Updating seed host packages")
extra_vars = {
"host_package_update_packages": parsed_args.packages,
"host_package_update_security": parsed_args.security,
}
playbooks = _build_playbook_list("host-package-update")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed",
extra_vars=extra_vars)
class SeedHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command):
"""Run command on the seed host."""
def get_parser(self, prog_name):
parser = super(SeedHostCommandRun, self).get_parser(prog_name)
group = parser.add_argument_group("Host Command Run")
group.add_argument("--command", required=True,
help="Command to run (required).")
group.add_argument("--show-output", action='store_true',
help="Show command output")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Run command on seed host")
extra_vars = {
"host_command_to_run": utils.escape_jinja(parsed_args.command),
"show_output": parsed_args.show_output}
playbooks = _build_playbook_list("host-command-run")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed",
extra_vars=extra_vars)
class SeedHostUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Upgrade the seed host services.
Performs the changes necessary to make the host services suitable for the
configured OpenStack release.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading seed host services")
playbooks = _build_playbook_list(
"kayobe-target-venv", "kolla-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed")
class SeedServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Deploy the seed services.
* Deploys user defined containers
* Configures kolla-ansible.
* Configures the bifrost service.
* Deploys the bifrost container using kolla-ansible.
* Builds disk images for the overcloud hosts using Diskimage Builder (DIB).
* Configures ironic inspector introspection rules in the bifrost inspector
service.
* When enabled, configures a Bare Metal Provisioning (BMP) environment for
Dell Force10 switches, hosted by the bifrost dnsmasq and nginx services.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Deploying seed services")
playbooks = _build_playbook_list(
"seed-deploy-containers")
self.run_kayobe_playbooks(parsed_args, playbooks)
self.generate_kolla_ansible_config(parsed_args, service_config=False,
bifrost_config=True)
self.run_kolla_ansible_seed(parsed_args, "deploy-bifrost")
playbooks = _build_playbook_list(
"seed-introspection-rules",
"dell-switch-bmp")
self.run_kayobe_playbooks(parsed_args, playbooks)
class SeedServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Upgrade the seed services.
* Deploys user defined containers
* Configures kolla-ansible.
* Configures the bifrost service.
* Prepares the bifrost service for an upgrade.
* Deploys the bifrost container using kolla-ansible.
* Builds disk images for the overcloud hosts using Diskimage Builder (DIB).
* Configures ironic inspector introspection rules in the bifrost inspector
service.
* When enabled, configures a Bare Metal Provisioning (BMP) environment for
Dell Force10 switches, hosted by the bifrost dnsmasq and nginx services.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading seed services")
playbooks = _build_playbook_list(
"seed-deploy-containers")
self.run_kayobe_playbooks(parsed_args, playbooks)
self.generate_kolla_ansible_config(parsed_args, service_config=False,
bifrost_config=True)
playbooks = _build_playbook_list(
"seed-service-upgrade-prep")
self.run_kayobe_playbooks(parsed_args, playbooks)
self.run_kolla_ansible_seed(parsed_args, "upgrade-bifrost")
playbooks = _build_playbook_list(
"seed-introspection-rules",
"dell-switch-bmp")
self.run_kayobe_playbooks(parsed_args, playbooks)
class SeedContainerImageBuild(KayobeAnsibleMixin, VaultMixin, Command):
"""Build the seed container images.
* Installs and configures kolla build environment on the seed.
* Builds container images for the seed services.
"""
def get_parser(self, prog_name):
parser = super(SeedContainerImageBuild, self).get_parser(
prog_name)
group = parser.add_argument_group("Container Image Build")
group.add_argument("--nocache", action="store_true",
help="whether to not use cache")
group.add_argument("--push", action="store_true",
help="whether to push images to a registry after "
"building")
group.add_argument("regex", nargs='*',
help="regular expression matching names of images "
"to build. Builds all images if unspecified")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Building seed container images")
playbooks = _build_playbook_list(
"container-image-builders-check", "kolla-build",
"container-image-build")
extra_vars = {
"nocache": parsed_args.nocache,
"push_images": parsed_args.push
}
if parsed_args.regex:
regexes = " ".join(parsed_args.regex)
extra_vars["container_image_regexes"] = regexes
else:
extra_vars["container_image_sets"] = (
"{{ seed_container_image_sets }}")
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class SeedDeploymentImageBuild(KayobeAnsibleMixin, VaultMixin, Command):
"""Build the seed deployment kernel and ramdisk images.
Builds Ironic Python Agent (IPA) deployment images using Diskimage Builder
(DIB) for use when provisioning and inspecting the overcloud hosts.
"""
def get_parser(self, prog_name):
parser = super(SeedDeploymentImageBuild, self).get_parser(
prog_name)
group = parser.add_argument_group("Deployment Image Build")
group.add_argument("--force-rebuild", action="store_true",
help="whether to force rebuilding the images")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Building seed deployment images")
playbooks = _build_playbook_list("seed-ipa-build")
extra_vars = {}
if parsed_args.force_rebuild:
extra_vars["ipa_image_force_rebuild"] = True
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class OvercloudInventoryDiscover(KayobeAnsibleMixin, VaultMixin, Command):
"""Discover the overcloud inventory from the seed's Ironic service.
* Query the ironic inventory on the seed, and use this to populate kayobe's
ansible inventory.
* Allocate IP addresses for all configured networks.
* Update the kolla-ansible configuration for the new overcloud hosts.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Discovering overcloud inventory")
# Run the inventory discovery playbook separately, else the discovered
# hosts will not be present in the following playbooks in which they
# are used to populate other inventories.
self.run_kayobe_playbook(parsed_args,
_get_playbook_path(
"overcloud-inventory-discover"))
# If necessary, allocate IP addresses for the discovered hosts.
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("ip-allocation"))
# Now populate the Kolla Ansible inventory.
self.generate_kolla_ansible_config(parsed_args, service_config=False)
class OvercloudIntrospectionDataSave(KayobeAnsibleMixin, VaultMixin, Command):
"""Save hardware introspection data for the overcloud.
Save hardware introspection data from the seed's ironic inspector service
to the Ansible control host.
"""
def get_parser(self, prog_name):
parser = super(OvercloudIntrospectionDataSave, self).get_parser(
prog_name)
group = parser.add_argument_group("Introspection data")
# Defaults for these are applied in the playbook.
group.add_argument("--output-dir", type=str,
help="Path to directory in which to save "
"introspection data. Default: "
"$PWD/overcloud-introspection-data")
group.add_argument("--output-format", type=str,
help="Format in which to save output data. One of "
"JSON or YAML. Default: JSON",
choices=["JSON", "YAML"])
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Saving introspection data")
extra_vars = {}
if parsed_args.output_dir:
extra_vars['output_dir'] = parsed_args.output_dir
if parsed_args.output_format:
extra_vars['output_format'] = parsed_args.output_format
playbooks = _build_playbook_list("kolla-bifrost-hostvars",
"overcloud-introspection-data-save")
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class OvercloudBIOSRAIDConfigure(KayobeAnsibleMixin, VaultMixin, Command):
"""Configure BIOS and RAID for the overcloud hosts."""
def take_action(self, parsed_args):
self.app.LOG.debug("Configure overcloud BIOS and RAID")
playbooks = _build_playbook_list("overcloud-bios-raid")
self.run_kayobe_playbooks(parsed_args, playbooks)
class OvercloudHardwareInspect(KayobeAnsibleMixin, VaultMixin, Command):
"""Inspect the overcloud hardware using ironic inspector.
Perform hardware inspection of existing ironic nodes in the seed's
ironic inventory.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Inspecting overcloud")
playbooks = _build_playbook_list("kolla-bifrost-hostvars",
"overcloud-hardware-inspect")
self.run_kayobe_playbooks(parsed_args, playbooks)
class OvercloudProvision(KayobeAnsibleMixin, VaultMixin, Command):
"""Provision the overcloud.
Provision the overcloud hosts using the seed host's bifrost service. This
will image the hosts and perform some minimal network configuration using
glean/simple-init.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Provisioning overcloud")
playbooks = _build_playbook_list("kolla-bifrost-hostvars",
"overcloud-provision")
self.run_kayobe_playbooks(parsed_args, playbooks)
class OvercloudDeprovision(KayobeAnsibleMixin, VaultMixin, Command):
"""Deprovision the overcloud.
Deprovision the overcloud hosts using the seed host's bifrost service. This
will clear the instance state of the nodes from the seed's ironic service
and power them off.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Deprovisioning overcloud")
playbooks = _build_playbook_list("overcloud-deprovision")
self.run_kayobe_playbooks(parsed_args, playbooks)
class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Configure the overcloud host OS and services.
* Allocate IP addresses for all configured networks.
* Add the host to SSH known hosts.
* Configure a user account for use by kayobe for SSH access.
* Configure package repos.
* Configure a PyPI mirror.
* Optionally, create a virtualenv for remote target hosts.
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Disable SELinux.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Disable bootstrap interface configuration.
* Configure timezone and ntp.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
* Optionally, create a virtualenv for kolla-ansible.
* Configure a user account for kolla-ansible.
* Configure Docker engine.
"""
def get_parser(self, prog_name):
parser = super(OvercloudHostConfigure, self).get_parser(prog_name)
group = parser.add_argument_group("Host Configuration")
group.add_argument("--wipe-disks", action='store_true',
help="wipe partition and LVM data from all disks "
"that are not mounted. Warning: this can "
"result in the loss of data")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Configuring overcloud host OS")
# Allocate IP addresses.
playbooks = _build_playbook_list("ip-allocation")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
# Kayobe playbooks.
playbooks = _build_playbook_list(
"ssh-known-host", "kayobe-ansible-user",
"dnf", "pip", "kayobe-target-venv")
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "dev-tools", "disable-selinux", "network",
"sysctl", "disable-glean", "disable-cloud-init", "time",
"mdadm", "luks", "lvm", "docker-devicemapper",
"kolla-ansible-user", "kolla-pip", "kolla-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
self.generate_kolla_ansible_config(parsed_args, service_config=False)
# Kolla-ansible bootstrap-servers.
self.run_kolla_ansible_overcloud(parsed_args, "bootstrap-servers")
# Further kayobe playbooks.
playbooks = _build_playbook_list(
"kolla-host", "docker", "swift-block-devices")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
class OvercloudHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command):
"""Update packages on the overcloud hosts."""
def get_parser(self, prog_name):
parser = super(OvercloudHostPackageUpdate, self).get_parser(prog_name)
group = parser.add_argument_group("Host Package Updates")
group.add_argument("--packages", required=True,
help="List of packages to update. Use '*' to "
"update all packages.")
group.add_argument("--security", action='store_true',
help="Only install updates that have been marked "
"security related.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Updating overcloud host packages")
extra_vars = {
"host_package_update_packages": parsed_args.packages,
"host_package_update_security": parsed_args.security,
}
playbooks = _build_playbook_list("host-package-update")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud",
extra_vars=extra_vars)
class OvercloudHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command):
"""Run command on the overcloud host."""
def get_parser(self, prog_name):
parser = super(OvercloudHostCommandRun, self).get_parser(prog_name)
group = parser.add_argument_group("Host Command Run")
group.add_argument("--command", required=True,
help="Command to run (required).")
group.add_argument("--show-output", action='store_true',
help="Show command output")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Run command on overcloud host")
extra_vars = {
"host_command_to_run": utils.escape_jinja(parsed_args.command),
"show_output": parsed_args.show_output}
playbooks = _build_playbook_list("host-command-run")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud",
extra_vars=extra_vars)
class OvercloudHostUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Upgrade the overcloud host services.
Performs the changes necessary to make the host services suitable for the
configured OpenStack release.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading overcloud host services")
playbooks = _build_playbook_list(
"kayobe-target-venv", "kolla-target-venv",
"overcloud-docker-sdk-upgrade", "overcloud-etc-hosts-fixup")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
# TODO(mgoddard): Remove this in Y cycle after Kolla Ansible chrony
# container has been dropped for a cycle.
# NOTE(mgoddard): Clean up the chrony container if it exists, and
# deploy a host chrony daemon.
self.generate_kolla_ansible_config(parsed_args, service_config=False)
self.run_kolla_ansible_overcloud(parsed_args, "chrony-cleanup")
playbooks = _build_playbook_list("time")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
class OvercloudDatabaseBackup(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Backup the overcloud database."""
def get_parser(self, prog_name):
parser = super(OvercloudDatabaseBackup, self).get_parser(prog_name)
group = parser.add_argument_group("Overcloud Database Backup")
group.add_argument("--incremental", action='store_true',
help="Whether to perform an incremental database "
"backup. Default is false.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Performing overcloud database backup")
extra_args = []
if parsed_args.incremental:
extra_args.append('--incremental')
self.run_kolla_ansible_overcloud(parsed_args, "mariadb_backup",
extra_args=extra_args)
class OvercloudDatabaseRecover(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Recover the overcloud database."""
def get_parser(self, prog_name):
parser = super(OvercloudDatabaseRecover, self).get_parser(prog_name)
group = parser.add_argument_group("Overcloud Database Recovery")
group.add_argument("--force-recovery-host",
help="Name of a host to use to perform the "
"recovery. By default kolla-ansible will "
"automatically determine which host to use, "
"and this option should not be used.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Performing overcloud database recovery")
extra_vars = {}
if parsed_args.force_recovery_host:
extra_vars['mariadb_recover_inventory_name'] = (
parsed_args.force_recovery_host)
self.run_kolla_ansible_overcloud(parsed_args, "mariadb_recovery",
extra_vars=extra_vars)
class OvercloudServiceConfigurationGenerate(KayobeAnsibleMixin,
KollaAnsibleMixin, VaultMixin,
Command):
"""Generate the overcloud service configuration files.
Generates kolla-ansible configuration for the OpenStack control plane
services, without pushing that configuration to the running containers.
This can be used to generate a candidate configuration set for comparison
with the existing configuration. It is recommended to use a directory other
than /etc/kolla for --node-config-dir, to ensure that the running
containers are not affected.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceConfigurationGenerate,
self).get_parser(prog_name)
group = parser.add_argument_group("Service Configuration")
group.add_argument("--node-config-dir", required=True,
help="the directory to store the config files on "
"the remote node (required)")
group.add_argument("--skip-prechecks", action='store_true',
help="skip the kolla-ansible prechecks command")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Generating overcloud service configuration")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run kolla-ansible prechecks before deployment.
if not parsed_args.skip_prechecks:
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
# Generate the configuration.
extra_vars = {}
if parsed_args.node_config_dir:
extra_vars["node_config_directory"] = parsed_args.node_config_dir
self.run_kolla_ansible_overcloud(parsed_args, "genconfig",
extra_vars=extra_vars)
class OvercloudServiceConfigurationSave(KayobeAnsibleMixin, VaultMixin,
Command):
"""Gather and save the overcloud service configuration files.
This can be used to collect the running configuration for inspection (the
default) or a candidate configuration generated via 'kayobe overcloud
service configuration generate', for comparision with another configuration
set.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceConfigurationSave, self).get_parser(
prog_name)
group = parser.add_argument_group("Service configuration")
group.add_argument("--exclude",
help="optional comma-separated list of patterns "
"matching filenames to exclude")
group.add_argument("--include",
help="optional comma-separated list of patterns "
"matching filenames to include")
group.add_argument("--node-config-dir",
help="the directory to store the config files on "
"the remote node (default /etc/kolla)")
group.add_argument("--output-dir",
help="path to a directory in which to save "
"configuration")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Saving overcloud service configuration")
playbooks = _build_playbook_list("overcloud-service-config-save")
extra_vars = {}
if parsed_args.exclude:
extra_vars["exclude_patterns"] = parsed_args.exclude
if parsed_args.include:
extra_vars["include_patterns"] = parsed_args.include
if parsed_args.output_dir:
extra_vars["config_save_path"] = parsed_args.output_dir
if parsed_args.node_config_dir:
extra_vars["node_config_directory"] = parsed_args.node_config_dir
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Deploy the overcloud services.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform kolla-ansible prechecks to verify the system state for
deployment.
* Perform a kolla-ansible deployment of the overcloud services.
* Configure and deploy kayobe extra services.
* Generate openrc files for the admin user.
This can be used in conjunction with the --tags and --kolla-tags arguments
to deploy specific services.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceDeploy, self).get_parser(prog_name)
group = parser.add_argument_group("Service Deployment")
group.add_argument("--skip-prechecks", action='store_true',
help="skip the kolla-ansible prechecks command")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Deploying overcloud services")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run kolla-ansible prechecks before deployment.
if not parsed_args.skip_prechecks:
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
# Perform the kolla-ansible deployment.
self.run_kolla_ansible_overcloud(parsed_args, "deploy")
# Deploy kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "deploy"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
# Post-deployment configuration.
self.run_kolla_ansible_overcloud(parsed_args, "post-deploy")
# Create an environment file for accessing the public API as the admin
# user.
playbooks = _build_playbook_list("public-openrc")
self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True)
class OvercloudServiceDeployContainers(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Deploy the overcloud services without updating configuration.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform kolla-ansible prechecks to verify the system state for
deployment.
* Perform a kolla-ansible deployment of the overcloud service containers.
* Configure and deploy kayobe extra services.
This can be used in conjunction with the --tags and --kolla-tags arguments
to deploy specific services.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceDeployContainers, self).get_parser(
prog_name)
group = parser.add_argument_group("Service Deployment")
group.add_argument("--skip-prechecks", action='store_true',
help="skip the kolla-ansible prechecks command")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Deploying overcloud services (containers only)")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run kolla-ansible prechecks before deployment.
if not parsed_args.skip_prechecks:
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
# Perform the kolla-ansible deployment.
self.run_kolla_ansible_overcloud(parsed_args, "deploy-containers")
# Deploy kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "deploy"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
class OvercloudServicePrechecks(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Run prechecks against overcloud services.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform kolla-ansible prechecks to verify the system state for
deployment.
This can be used in conjunction with the --tags and --kolla-tags arguments
to check specific services.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Running overcloud prechecks")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run the kolla-ansible prechecks.
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
class OvercloudServiceReconfigure(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Reconfigure the overcloud services.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform kolla-ansible prechecks to verify the system state for
deployment.
* Perform a kolla-ansible reconfiguration of the overcloud services.
* Configure and deploy kayobe extra services.
* Generate openrc files for the admin user.
This can be used in conjunction with the --tags and --kolla-tags arguments
to reconfigure specific services.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceReconfigure, self).get_parser(prog_name)
group = parser.add_argument_group("Service Reconfiguration")
group.add_argument("--skip-prechecks", action='store_true',
help="skip the kolla-ansible prechecks command")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Reconfiguring overcloud services")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run kolla-ansible prechecks before reconfiguration.
if not parsed_args.skip_prechecks:
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
# Perform the kolla-ansible reconfiguration.
self.run_kolla_ansible_overcloud(parsed_args, "reconfigure")
# Reconfigure kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "reconfigure"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
# Post-deployment configuration.
self.run_kolla_ansible_overcloud(parsed_args, "post-deploy")
# Create an environment file for accessing the public API as the admin
# user.
playbooks = _build_playbook_list("public-openrc")
self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True)
class OvercloudServiceStop(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
Command):
"""Stop the overcloud services.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform a kolla-ansible stop of the overcloud services.
* Stop kayobe extra services.
This can be used in conjunction with the --tags and --kolla-tags arguments
to stop specific services.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceStop, self).get_parser(prog_name)
group = parser.add_argument_group("Services")
group.add_argument("--yes-i-really-really-mean-it",
action='store_true',
help="confirm that you understand that this will "
"stop running services.")
return parser
def take_action(self, parsed_args):
if not parsed_args.yes_i_really_really_mean_it:
self.app.LOG.error("This will stop running services. Specify "
"--yes-i-really-really-mean-it to confirm that "
"you understand this.")
sys.exit(1)
self.app.LOG.debug("Stopping overcloud services")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Perform the kolla-ansible stop.
extra_args = ["--yes-i-really-really-mean-it"]
self.run_kolla_ansible_overcloud(parsed_args, "stop",
extra_args=extra_args)
# Stop kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "stop"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
class OvercloudServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Upgrade the overcloud services.
* Configure kolla-ansible.
* Configure overcloud services in kolla-ansible.
* Perform kolla-ansible prechecks to verify the system state for
deployment.
* Perform a kolla-ansible upgrade of the overcloud services.
* Configure and upgrade kayobe extra services.
* Regenerate openrc files for the admin user.
This can be used in conjunction with the --tags and --kolla-tags arguments
to upgrade specific services.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceUpgrade, self).get_parser(prog_name)
group = parser.add_argument_group("Service Upgrade")
group.add_argument("--skip-prechecks", action='store_true',
help="skip the kolla-ansible prechecks command")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading overcloud services")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args, install=True)
# Run kolla-ansible prechecks before upgrade.
if not parsed_args.skip_prechecks:
self.run_kolla_ansible_overcloud(parsed_args, "prechecks")
# Perform the kolla-ansible upgrade.
self.run_kolla_ansible_overcloud(parsed_args, "upgrade")
# Upgrade kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "upgrade"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
# Post-deployment configuration.
self.run_kolla_ansible_overcloud(parsed_args, "post-deploy")
# Create an environment file for accessing the public API as the admin
# user.
playbooks = _build_playbook_list("public-openrc")
self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True)
class OvercloudServiceDestroy(KollaAnsibleMixin, KayobeAnsibleMixin,
VaultMixin, Command):
"""Destroy the overcloud services.
Permanently destroy the overcloud containers, container images, and
container volumes.
"""
def get_parser(self, prog_name):
parser = super(OvercloudServiceDestroy, self).get_parser(prog_name)
group = parser.add_argument_group("Services")
group.add_argument("--yes-i-really-really-mean-it",
action='store_true',
help="confirm that you understand that this will "
"permantently destroy all services and data.")
return parser
def take_action(self, parsed_args):
if not parsed_args.yes_i_really_really_mean_it:
self.app.LOG.error("This will permanently destroy all services "
"and data. Specify "
"--yes-i-really-really-mean-it to confirm that "
"you understand this.")
sys.exit(1)
self.app.LOG.debug("Destroying overcloud services")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args)
# Run kolla-ansible destroy.
extra_args = ["--yes-i-really-really-mean-it"]
self.run_kolla_ansible_overcloud(parsed_args, "destroy",
extra_args=extra_args)
# Destroy kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "destroy"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
class OvercloudContainerImagePull(KayobeAnsibleMixin, KollaAnsibleMixin,
VaultMixin, Command):
"""Pull the overcloud container images from a registry."""
def take_action(self, parsed_args):
self.app.LOG.debug("Pulling overcloud container images")
# First prepare configuration.
self.generate_kolla_ansible_config(parsed_args, service_config=False)
# Pull updated kolla container images.
self.run_kolla_ansible_overcloud(parsed_args, "pull")
# Pull container images for kayobe extra services.
playbooks = _build_playbook_list("overcloud-extras")
extra_vars = {"kayobe_action": "pull"}
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud")
class OvercloudContainerImageBuild(KayobeAnsibleMixin, VaultMixin, Command):
"""Build the overcloud container images."""
def get_parser(self, prog_name):
parser = super(OvercloudContainerImageBuild, self).get_parser(
prog_name)
group = parser.add_argument_group("Container Image Build")
group.add_argument("--nocache", action="store_true",
help="whether to not use cache")
group.add_argument("--push", action="store_true",
help="whether to push images to a registry after "
"building")
group.add_argument("regex", nargs='*',
help="regular expression matching names of images "
"to build. Builds all images if unspecified")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Building overcloud container images")
playbooks = _build_playbook_list(
"container-image-builders-check", "kolla-build",
"container-image-build")
extra_vars = {
"nocache": parsed_args.nocache,
"push_images": parsed_args.push
}
if parsed_args.regex:
regexes = " ".join(parsed_args.regex)
extra_vars["container_image_regexes"] = regexes
else:
extra_vars["container_image_sets"] = (
"{{ overcloud_container_image_sets }}")
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class OvercloudDeploymentImageBuild(KayobeAnsibleMixin, VaultMixin, Command):
"""Build the overcloud deployment kernel and ramdisk images."""
def get_parser(self, prog_name):
parser = super(OvercloudDeploymentImageBuild, self).get_parser(
prog_name)
group = parser.add_argument_group("Deployment Image Build")
group.add_argument("--force-rebuild", action="store_true",
help="whether to force rebuilding the images")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Building overcloud deployment images")
playbooks = _build_playbook_list("overcloud-ipa-build")
extra_vars = {}
if parsed_args.force_rebuild:
extra_vars["ipa_image_force_rebuild"] = True
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class OvercloudPostConfigure(KayobeAnsibleMixin, VaultMixin, Command):
"""Perform post-deployment configuration.
* Register Ironic Python Agent (IPA) deployment images using Diskimage
Builder (DIB), if building deployment images locally.
* Register ironic inspector introspection rules with the overcloud
inspector service.
* Register a provisioning network with glance.
* Configure Grafana for control plane.
* Configure serial consoles for the ironic nodes
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Performing post-deployment configuration")
playbooks = _build_playbook_list(
"overcloud-ipa-images", "overcloud-introspection-rules",
"overcloud-introspection-rules-dell-lldp-workaround",
"provision-net", "overcloud-grafana-configure",
"baremetal-compute-serial-console-post-config")
self.run_kayobe_playbooks(parsed_args, playbooks)
class OvercloudSwiftRingsGenerate(KayobeAnsibleMixin, VaultMixin, Command):
"""Generate Swift rings."""
def take_action(self, parsed_args):
self.app.LOG.debug("Generating Swift rings")
playbooks = _build_playbook_list("swift-rings")
self.run_kayobe_playbooks(parsed_args, playbooks)
class NetworkConnectivityCheck(KayobeAnsibleMixin, VaultMixin, Command):
"""Check network connectivity between hosts in the control plane.
Checks for access to an external IP address, an external hostname, any
configured gateways, and between hosts on the same subnets. The MTU of
each network is validated by sending ping packets of maximum size.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Performing network connectivity check")
playbooks = _build_playbook_list("network-connectivity")
self.run_kayobe_playbooks(parsed_args, playbooks)
class BaremetalComputeInspect(KayobeAnsibleMixin, VaultMixin, Command):
"""Perform hardware inspection on baremetal compute nodes."""
def take_action(self, parsed_args):
self.app.LOG.debug("Performing hardware inspection on baremetal "
"compute nodes")
playbooks = _build_playbook_list("baremetal-compute-inspect")
self.run_kayobe_playbooks(parsed_args, playbooks)
class BaremetalComputeManage(KayobeAnsibleMixin, VaultMixin, Command):
"""Put baremetal compute nodes into the manageable provision state."""
def take_action(self, parsed_args):
self.app.LOG.debug("Making baremetal compute nodes manageable")
playbooks = _build_playbook_list("baremetal-compute-manage")
self.run_kayobe_playbooks(parsed_args, playbooks)
class BaremetalComputeProvide(KayobeAnsibleMixin, VaultMixin, Command):
"""Put baremetal compute nodes into the available provision state."""
def take_action(self, parsed_args):
self.app.LOG.debug("Making baremetal compute nodes available")
playbooks = _build_playbook_list("baremetal-compute-provide")
self.run_kayobe_playbooks(parsed_args, playbooks)
class BaremetalComputeRename(KayobeAnsibleMixin, VaultMixin, Command):
"""Rename baremetal compute nodes to match inventory hostname"""
def take_action(self, parsed_args):
self.app.LOG.debug("Renaming baremetal compute nodes")
playbooks = _build_playbook_list("baremetal-compute-rename")
self.run_kayobe_playbooks(parsed_args, playbooks)
class BaremetalComputeSerialConsoleBase(KayobeAnsibleMixin, VaultMixin,
Command):
"""Base class for the baremetal serial console commands"""
@staticmethod
def process_limit(parsed_args, extra_vars):
if parsed_args.baremetal_compute_limit:
extra_vars["console_compute_node_limit"] = (
parsed_args.baremetal_compute_limit
)
def get_parser(self, prog_name):
parser = super(BaremetalComputeSerialConsoleBase, self).get_parser(
prog_name)
group = parser.add_argument_group("Baremetal Serial Consoles")
group.add_argument("--baremetal-compute-limit",
help="Limit the change to the hosts specified in "
"this limit"
)
return parser
class BaremetalComputeSerialConsoleEnable(BaremetalComputeSerialConsoleBase):
"""Enable Serial Console for Baremetal Compute Nodes"""
def take_action(self, parsed_args):
self.app.LOG.debug("Enabling serial console for ironic nodes")
extra_vars = {}
BaremetalComputeSerialConsoleBase.process_limit(parsed_args,
extra_vars)
extra_vars["cmd"] = "enable"
playbooks = _build_playbook_list("baremetal-compute-serial-console")
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class BaremetalComputeSerialConsoleDisable(BaremetalComputeSerialConsoleBase):
"""Disable Serial Console for Baremetal Compute Nodes"""
def take_action(self, parsed_args):
self.app.LOG.debug("Disable serial console for ironic nodes")
extra_vars = {}
BaremetalComputeSerialConsoleBase.process_limit(parsed_args,
extra_vars)
extra_vars["cmd"] = "disable"
playbooks = _build_playbook_list("baremetal-compute-serial-console")
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class BaremetalComputeUpdateDeploymentImage(KayobeAnsibleMixin, VaultMixin,
Command):
"""Update the Ironic nodes to use the new kernel and ramdisk images."""
def get_parser(self, prog_name):
parser = super(BaremetalComputeUpdateDeploymentImage, self).get_parser(
prog_name)
group = parser.add_argument_group("Baremetal Compute Update")
group.add_argument("--baremetal-compute-limit",
help="Limit the upgrade to the hosts specified in "
"this limit"
)
return parser
def take_action(self, parsed_args):
self.app.LOG.debug(
"Upgrading the ironic nodes to use the latest deployment images")
playbooks = _build_playbook_list("overcloud-ipa-images")
extra_vars = {}
extra_vars["ipa_images_update_ironic_nodes"] = True
if parsed_args.baremetal_compute_limit:
extra_vars["ipa_images_compute_node_limit"] = (
parsed_args.baremetal_compute_limit
)
self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars)
class EnvironmentCreate(KayobeAnsibleMixin, VaultMixin, Command):
"""Create a new Kayobe environment."""
def get_parser(self, prog_name):
parser = super(EnvironmentCreate, self).get_parser(prog_name)
group = parser.add_argument_group("Kayobe Environments")
environment.add_args(group)
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Creating new Kayobe environment")
if not parsed_args.environment:
self.app.LOG.error("An environment must be specified")
sys.exit(1)
source_config_path = parsed_args.source_config_path
if source_config_path:
result = utils.is_readable_dir(source_config_path)
if not result["result"]:
self.app.LOG.error("Kayobe configuration %s is invalid: %s",
source_config_path, result["message"])
sys.exit(1)
try:
environment.create_kayobe_environment(parsed_args)
except Exception as e:
self.app.LOG.error("Failed to create environment %s: %s",
parsed_args.environment, repr(e))
sys.exit(1)
| 42.685457
| 79
| 0.64887
|
0830f1d84477d4ad8126f1719d7a647b6faccde0
| 1,325
|
py
|
Python
|
benchmarks/benchmark.py
|
yarray/hpdbscan
|
c01f7a2657e91365fb846bbb492d5b66434160e8
|
[
"MIT"
] | 20
|
2020-01-17T09:39:52.000Z
|
2022-01-11T05:00:58.000Z
|
benchmarks/benchmark.py
|
yarray/hpdbscan
|
c01f7a2657e91365fb846bbb492d5b66434160e8
|
[
"MIT"
] | 3
|
2020-03-24T09:17:37.000Z
|
2021-07-14T19:40:38.000Z
|
benchmarks/benchmark.py
|
yarray/hpdbscan
|
c01f7a2657e91365fb846bbb492d5b66434160e8
|
[
"MIT"
] | 3
|
2021-08-09T14:44:26.000Z
|
2022-03-29T02:23:42.000Z
|
#!/usr/bin/env python
import numpy as np
import subprocess
import sys
import time
DATASET_PARAMETERS = {# eps, min_points
#'bremen_small.h5': (100, 312),
'iris.h5': (0.32, 3),
#'twitter_small.h5': (0.01, 40),
}
TRIALS = 10
def run_benchmark(command, log_path):
sys_stdout, sys_stderr = sys.stdout, sys.stderr
log_handle = open(log_path, 'w')
sys.stdout, sys.stderr = log_handle, log_handle
for dataset, parameters in DATASET_PARAMETERS.items():
eps, min_points = parameters
timings = np.empty((TRIALS,))
print('Running benchmarks for', dataset)
for i in range(TRIALS):
start = time.perf_counter()
subprocess.run(command.format(dataset=dataset, eps=eps, min_points=min_points), shell=True)
end = time.perf_counter()
timings[i] = end - start
print('\t', i, timings[i])
print('Average:', timings.mean(), ' Deviation:', timings.std())
print('')
sys.stdout, sys.stderr = sys_stdout, sys_stderr
if __name__ == '__main__':
run_benchmark('./sklearn-dbscan.py {dataset} -e {eps} -m {min_points}', 'sklearn.log')
run_benchmark('../build/hpdbscan -i {dataset} --input-dataset DBSCAN -o output.h5 --output-dataset CLUSTERS -e {eps} -m {min_points}', 'hpdbscan.log')
| 31.547619
| 154
| 0.632453
|
d14279175ca353e6e9a56577e034a22deb30b0fd
| 6,160
|
py
|
Python
|
hydrus/core/HydrusRatingArchive.py
|
thatfuckingbird/hydrus-websocket-server
|
b55454740dca5101448bf92224432f8bdbec7e77
|
[
"WTFPL"
] | 1,417
|
2015-01-22T00:50:30.000Z
|
2022-03-30T18:44:55.000Z
|
hydrus/core/HydrusRatingArchive.py
|
thatfuckingbird/hydrus-websocket-server
|
b55454740dca5101448bf92224432f8bdbec7e77
|
[
"WTFPL"
] | 975
|
2015-01-05T01:41:40.000Z
|
2022-03-31T06:01:50.000Z
|
hydrus/core/HydrusRatingArchive.py
|
thatfuckingbird/hydrus-websocket-server
|
b55454740dca5101448bf92224432f8bdbec7e77
|
[
"WTFPL"
] | 163
|
2015-02-04T13:09:35.000Z
|
2022-03-23T01:00:05.000Z
|
import os
import sqlite3
HASH_TYPE_MD5 = 0 # 16 bytes long
HASH_TYPE_SHA1 = 1 # 20 bytes long
HASH_TYPE_SHA256 = 2 # 32 bytes long
HASH_TYPE_SHA512 = 3 # 64 bytes long
# Please feel free to use this file however you wish.
# None of this is thread-safe, though, so don't try to do anything clever.
# A rating for hydrus is a float from 0.0 to 1.0
# dislike/like are 0.0 and 1.0
# numerical are fractions between 0.0 and 1.0
# for a four-star rating that allows 0 stars, the 5 possibles are: 0.0, 0.25, 0.5, 0.75, 1.0
# for a three-star rating that does not allow 0 stars, the three possibles are: 0.0, 0.5, 1.0
# in truth, at our level:
# a five-star rating that does allow stars is a six-star rating
# a ten-star rating that does not allow stars is a ten-star rating
# If you want to make a new rating archive for use in hydrus, you want to do something like:
# import HydrusRatingArchive
# hra = HydrusRatingArchive.HydrusRatingArchive( 'my_little_archive.db' )
# hra.SetHashType( HydrusRatingArchive.HASH_TYPE_MD5 )
# hra.SetNumberOfStars( 5 )
# hra.BeginBigJob()
# for ( hash, rating ) in my_rating_generator: hra.AddRating( hash, rating )
# hra.CommitBigJob()
# del hra
# If you are only adding a couple ratings, you can exclude the BigJob stuff. It just makes millions of sequential writes more efficient.
# Also, this manages hashes as bytes, not hex, so if you have something like:
# hash = ab156e87c5d6e215ab156e87c5d6e215
# Then go hash = bytes.fromhex( hash ) before you pass it to Add/Get/Has/SetRating
# And also feel free to contact me directly at hydrus.admin@gmail.com if you need help.
class HydrusRatingArchive( object ):
def __init__( self, path ):
self._path = path
if not os.path.exists( self._path ): create_db = True
else: create_db = False
self._InitDBConnection()
if create_db: self._InitDB()
def _InitDB( self ):
self._c.execute( 'CREATE TABLE hash_type ( hash_type INTEGER );', )
self._c.execute( 'CREATE TABLE number_of_stars ( number_of_stars INTEGER );', )
self._c.execute( 'CREATE TABLE ratings ( hash BLOB PRIMARY KEY, rating REAL );' )
def _InitDBConnection( self ):
self._db = sqlite3.connect( self._path, isolation_level = None, detect_types = sqlite3.PARSE_DECLTYPES )
self._c = self._db.cursor()
def BeginBigJob( self ):
self._c.execute( 'BEGIN IMMEDIATE;' )
def CommitBigJob( self ):
self._c.execute( 'COMMIT;' )
self._c.execute( 'VACUUM;' )
def DeleteRating( self, hash ):
self._c.execute( 'DELETE FROM ratings WHERE hash = ?;', ( sqlite3.Binary( hash ), ) )
def GetHashType( self ):
result = self._c.execute( 'SELECT hash_type FROM hash_type;' ).fetchone()
if result is None:
result = self._c.execute( 'SELECT hash FROM hashes;' ).fetchone()
if result is None:
raise Exception( 'This archive has no hash type set, and as it has no files, no hash type guess can be made.' )
if len( hash ) == 16: hash_type = HASH_TYPE_MD5
elif len( hash ) == 20: hash_type = HASH_TYPE_SHA1
elif len( hash ) == 32: hash_type = HASH_TYPE_SHA256
elif len( hash ) == 64: hash_type = HASH_TYPE_SHA512
else:
raise Exception( 'This archive has non-standard hashes. Something is wrong.' )
self.SetHashType( hash_type )
return hash_type
else:
( hash_type, ) = result
return hash_type
def GetName( self ):
filename = os.path.basename( self._path )
if '.' in filename:
filename = filename.split( '.', 1 )[0]
return filename
def GetNumberOfStars( self ):
result = self._c.execute( 'SELECT number_of_stars FROM number_of_stars;' ).fetchone()
if result is None:
raise Exception( 'This rating archive has no number of stars set.' )
else:
( number_of_stars, ) = result
return number_of_stars
def GetRating( self, hash ):
result = self._c.execute( 'SELECT rating FROM ratings WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
if result is None:
return None
else:
( rating, ) = result
return rating
def HasHash( self, hash ):
result = self._c.execute( 'SELECT 1 FROM ratings WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
if result is None:
return False
else:
return True
def IterateRatings( self ):
for row in self._c.execute( 'SELECT hash, rating FROM ratings;' ):
yield row
def SetHashType( self, hash_type ):
self._c.execute( 'DELETE FROM hash_type;' )
self._c.execute( 'INSERT INTO hash_type ( hash_type ) VALUES ( ? );', ( hash_type, ) )
def SetNumberOfStars( self, number_of_stars ):
self._c.execute( 'DELETE FROM number_of_stars;' )
self._c.execute( 'INSERT INTO number_of_stars ( number_of_stars ) VALUES ( ? );', ( number_of_stars, ) )
def SetRating( self, hash, rating ):
self._c.execute( 'REPLACE INTO ratings ( hash, rating ) VALUES ( ?, ? );', ( sqlite3.Binary( hash ), rating ) )
| 29.333333
| 136
| 0.548539
|
9fa7f9ee236b04eb8bf55d3b72cf365ab1ed7181
| 3,814
|
py
|
Python
|
eval.py
|
Aralas/icassp19
|
5f54e7d6b9818fabf63e87be22786a45c6b2c9fc
|
[
"MIT"
] | 93
|
2018-12-08T06:11:48.000Z
|
2022-03-31T08:02:32.000Z
|
eval.py
|
DiegoOrtego/icassp19
|
3d7ed3e58c99d4da1e1690401018ee227886d162
|
[
"MIT"
] | 2
|
2019-07-11T14:18:25.000Z
|
2019-08-05T04:20:41.000Z
|
eval.py
|
DiegoOrtego/icassp19
|
3d7ed3e58c99d4da1e1690401018ee227886d162
|
[
"MIT"
] | 27
|
2019-02-27T13:36:31.000Z
|
2021-11-30T22:59:52.000Z
|
import numpy as np
def get_accuracy(actual=None, predicted=None):
"""Computes accuracy, done with strings"""
if predicted == actual:
return 1.0
else:
return 0.0
class Evaluator (object):
def __init__(self, gt=None, predictions=None, list_labels=None, params_ctrl=None, params_files=None):
self.gt = gt
self.predictions = predictions
self.list_labels = list_labels
self.train_data = params_ctrl['train_data']
def evaluate_acc(self):
"""
input two dataframes to compare
:param gt:
:param predictions:
:return:
"""
print('\n=====Evaluating ACCURACY - MICRO on the {0} subset of the training set============================='.
format(self.train_data))
acc = {}
for index, row in self.predictions.iterrows():
pred_per_file = row['label']
temp = self.gt.loc[self.gt['fname'] == row['fname']]
for idx_gt, row_gt in temp.iterrows():
acc[row_gt['fname']] = get_accuracy(actual=row_gt['label'], predicted=pred_per_file)
sum_acc = 0
for f_name, score in acc.items():
sum_acc += score
self.mean_acc = (sum_acc / len(acc))*100
print('Number of files evaluated: %d' % len(acc))
print('Mean Accuracy for files evaluated: %5.2f' % self.mean_acc)
def evaluate_acc_classwise(self):
"""
input two dataframes to compare
:param gt:
:param predictions:
:return:
"""
print('\n=====Evaluating ACCURACY - PER CLASS ======================================================')
scores = {key: {'nb_files': 0, 'acc_cum': 0} for key in self.list_labels}
for idx_gt, row_gt in self.gt.iterrows():
predicted_match = self.predictions.loc[self.predictions['fname'] == row_gt['fname']]
for idx_pred, row_pred in predicted_match.iterrows():
pred_per_file = row_pred['label']
scores[row_gt['label']]['nb_files'] += 1
# computing ACCURACY and saving it in the due class
scores[row_gt['label']]['acc_cum'] += get_accuracy(actual=row_gt['label'], predicted=pred_per_file)
total = 0
perclass_acc = []
for label, v in scores.items():
# If encounter 0 accuracy, don't want program to crash on divide by zero
if v['nb_files'] == 0:
mean_acc = 0
else:
mean_acc = (v['acc_cum'] / v['nb_files'])*100
print('%-21s | number of files in total: %-4d | Accuracy: %6.3f' % (label, v['nb_files'], mean_acc))
perclass_acc.append(mean_acc)
total += v['nb_files']
print('Total number of files: %d' % total)
print('\n=====Printing sorted classes for ACCURACY - PER CLASS ========================================')
perclass_acc_np = np.array(perclass_acc)
idx_sort = np.argsort(-perclass_acc_np)
for i in range(len(self.list_labels)):
print('%-21s | number of files in total: %-4d | Accuracy: %6.3f' %
(self.list_labels[idx_sort[i]], scores[self.list_labels[idx_sort[i]]]['nb_files'],
perclass_acc[idx_sort[i]]))
def print_summary_eval(self):
print('\n=====================================================================================================')
print('=====================================================================================================')
print('SUMMARY of evaluation:')
print('Mean Accuracy for files evaluated: %5.2f' % self.mean_acc)
print('\n=====================================================================================================')
| 41.912088
| 120
| 0.509963
|
8fbd20af865117290482fc8ccb3c07dd1b3dd109
| 410
|
py
|
Python
|
dlive/errors.py
|
A-Trash-Coder/dlive.py
|
17246194e326566b47f1d4258e9acf658ef6ea08
|
[
"MIT"
] | 4
|
2020-09-14T09:27:39.000Z
|
2021-07-01T05:33:09.000Z
|
dlive/errors.py
|
A-Trash-Coder/dlive.py
|
17246194e326566b47f1d4258e9acf658ef6ea08
|
[
"MIT"
] | null | null | null |
dlive/errors.py
|
A-Trash-Coder/dlive.py
|
17246194e326566b47f1d4258e9acf658ef6ea08
|
[
"MIT"
] | null | null | null |
class DLivePyException(BaseException):
pass
class ConnectionError(DLivePyException):
pass
class HttpException(DLivePyException):
pass
class Forbidden(DLivePyException):
pass
class CommandError(DLivePyException):
pass
class MissingRequiredArgument(DLivePyException):
pass
class BadArgument(DLivePyException):
pass
class RequiresAuthorization(DLivePyException):
pass
| 13.666667
| 48
| 0.773171
|
c97d8026b58a90a9b2f533e19fa5ce64006a0fa0
| 11,972
|
py
|
Python
|
aesara/d3viz/formatting.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | 1
|
2021-12-30T00:44:32.000Z
|
2021-12-30T00:44:32.000Z
|
aesara/d3viz/formatting.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | null | null | null |
aesara/d3viz/formatting.py
|
sagartomar/aesara
|
477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6
|
[
"BSD-3-Clause"
] | null | null | null |
"""Functions for formatting Aesara compute graphs.
Author: Christof Angermueller <cangermueller@gmail.com>
"""
import os
from functools import reduce
import numpy as np
import aesara
from aesara.compile import Function, builders
from aesara.graph.basic import Apply, Constant, Variable, graph_inputs
from aesara.graph.fg import FunctionGraph
from aesara.printing import pydot_imported, pydot_imported_msg
try:
from aesara.printing import pd
except ImportError:
pass
class PyDotFormatter:
"""Create `pydot` graph object from Aesara function.
Parameters
----------
compact : bool
if True, will remove intermediate variables without name.
Attributes
----------
node_colors : dict
Color table of node types.
apply_colors : dict
Color table of apply nodes.
shapes : dict
Shape table of node types.
"""
def __init__(self, compact=True):
"""Construct PyDotFormatter object."""
if not pydot_imported:
raise ImportError("Failed to import pydot. " + pydot_imported_msg)
self.compact = compact
self.node_colors = {
"input": "limegreen",
"constant_input": "SpringGreen",
"shared_input": "YellowGreen",
"output": "dodgerblue",
"unused": "lightgrey",
}
self.apply_colors = {
"GpuFromHost": "red",
"HostFromGpu": "red",
"Scan": "yellow",
"Shape": "cyan",
"IfElse": "magenta",
"Elemwise": "#FFAABB", # dark pink
"Subtensor": "#FFAAFF", # purple
"Alloc": "#FFAA22",
} # orange
self.shapes = {"input": "box", "output": "box", "apply": "ellipse"}
self.__node_prefix = "n"
def __add_node(self, node):
"""Add new node to node list and return unique id.
Parameters
----------
node : Aesara graph node
Apply node, tensor variable, or shared variable in compute graph.
Returns
-------
str
Unique node id.
"""
assert node not in self.__nodes
_id = f"{self.__node_prefix}{len(self.__nodes) + 1}"
self.__nodes[node] = _id
return _id
def __node_id(self, node):
"""Return unique node id.
Parameters
----------
node : Aesara graph node
Apply node, tensor variable, or shared variable in compute graph.
Returns
-------
str
Unique node id.
"""
if node in self.__nodes:
return self.__nodes[node]
else:
return self.__add_node(node)
def __call__(self, fct, graph=None):
"""Create pydot graph from function.
Parameters
----------
fct : aesara.compile.function.types.Function
A compiled Aesara function, variable, apply or a list of variables.
graph: pydot.Dot
`pydot` graph to which nodes are added. Creates new one if
undefined.
Returns
-------
pydot.Dot
Pydot graph of `fct`
"""
if graph is None:
graph = pd.Dot()
self.__nodes = {}
profile = None
if isinstance(fct, Function):
profile = getattr(fct, "profile", None)
fgraph = fct.maker.fgraph
elif isinstance(fct, FunctionGraph):
fgraph = fct
else:
if isinstance(fct, Variable):
fct = [fct]
elif isinstance(fct, Apply):
fct = fct.outputs
assert isinstance(fct, (list, tuple))
assert all(isinstance(v, Variable) for v in fct)
fgraph = FunctionGraph(inputs=graph_inputs(fct), outputs=fct)
outputs = fgraph.outputs
topo = fgraph.toposort()
outputs = list(outputs)
# Loop over apply nodes
for node in topo:
nparams = {}
__node_id = self.__node_id(node)
nparams["name"] = __node_id
nparams["label"] = apply_label(node)
nparams["profile"] = apply_profile(fgraph, node, profile)
nparams["node_type"] = "apply"
nparams["apply_op"] = nparams["label"]
nparams["shape"] = self.shapes["apply"]
use_color = None
for opName, color in self.apply_colors.items():
if opName in node.op.__class__.__name__:
use_color = color
if use_color:
nparams["style"] = "filled"
nparams["fillcolor"] = use_color
nparams["type"] = "colored"
pd_node = dict_to_pdnode(nparams)
graph.add_node(pd_node)
# Loop over input nodes
for id, var in enumerate(node.inputs):
var_id = self.__node_id(var.owner if var.owner else var)
if var.owner is None:
vparams = {
"name": var_id,
"label": var_label(var),
"node_type": "input",
}
if isinstance(var, Constant):
vparams["node_type"] = "constant_input"
elif isinstance(var, aesara.tensor.sharedvar.TensorSharedVariable):
vparams["node_type"] = "shared_input"
vparams["dtype"] = type_to_str(var.type)
vparams["tag"] = var_tag(var)
vparams["style"] = "filled"
vparams["fillcolor"] = self.node_colors[vparams["node_type"]]
vparams["shape"] = self.shapes["input"]
pd_var = dict_to_pdnode(vparams)
graph.add_node(pd_var)
edge_params = {}
if node.op.view_map and id in reduce(
list.__add__, node.op.view_map.values(), []
):
edge_params["color"] = self.node_colors["output"]
elif node.op.destroy_map and id in reduce(
list.__add__, node.op.destroy_map.values(), []
):
edge_params["color"] = "red"
edge_label = vparams["dtype"]
if len(node.inputs) > 1:
edge_label = str(id) + " " + edge_label
pdedge = pd.Edge(var_id, __node_id, label=edge_label, **edge_params)
graph.add_edge(pdedge)
# Loop over output nodes
for id, var in enumerate(node.outputs):
var_id = self.__node_id(var)
if var in outputs or len(fgraph.clients[var]) == 0:
vparams = {
"name": var_id,
"label": var_label(var),
"node_type": "output",
"dtype": type_to_str(var.type),
"tag": var_tag(var),
"style": "filled",
}
if len(fgraph.clients[var]) == 0:
vparams["fillcolor"] = self.node_colors["unused"]
else:
vparams["fillcolor"] = self.node_colors["output"]
vparams["shape"] = self.shapes["output"]
pd_var = dict_to_pdnode(vparams)
graph.add_node(pd_var)
graph.add_edge(pd.Edge(__node_id, var_id, label=vparams["dtype"]))
elif var.name or not self.compact:
graph.add_edge(pd.Edge(__node_id, var_id, label=vparams["dtype"]))
# Create sub-graph for OpFromGraph nodes
if isinstance(node.op, builders.OpFromGraph):
subgraph = pd.Cluster(__node_id)
gf = PyDotFormatter()
# Use different node prefix for sub-graphs
gf.__node_prefix = __node_id
node.op.prepare_node(node, None, None, "py")
gf(node.op.fn, subgraph)
graph.add_subgraph(subgraph)
pd_node.get_attributes()["subg"] = subgraph.get_name()
def format_map(m):
return str([list(x) for x in m])
# Inputs mapping
ext_inputs = [self.__node_id(x) for x in node.inputs]
int_inputs = [gf.__node_id(x) for x in node.op.local_inputs]
assert len(ext_inputs) == len(int_inputs)
h = format_map(zip(ext_inputs, int_inputs))
pd_node.get_attributes()["subg_map_inputs"] = h
# Outputs mapping
ext_outputs = [self.__node_id(x) for x in node.outputs]
int_outputs = [gf.__node_id(x) for x in node.op.local_outputs]
assert len(ext_outputs) == len(int_outputs)
h = format_map(zip(int_outputs, ext_outputs))
pd_node.get_attributes()["subg_map_outputs"] = h
return graph
def var_label(var, precision=3):
"""Return label of variable node."""
if var.name is not None:
return var.name
elif isinstance(var, Constant):
h = np.asarray(var.data)
is_const = False
if h.ndim == 0:
is_const = True
h = np.array([h])
dstr = np.array2string(h, precision=precision)
if "\n" in dstr:
dstr = dstr[: dstr.index("\n")]
if is_const:
dstr = dstr.replace("[", "").replace("]", "")
return dstr
else:
return type_to_str(var.type)
def var_tag(var):
"""Parse tag attribute of variable node."""
tag = var.tag
if hasattr(tag, "trace") and len(tag.trace) and len(tag.trace[0]) == 4:
if isinstance(tag.trace[0][0], (tuple, list)):
path, line, _, src = tag.trace[0][-1]
else:
path, line, _, src = tag.trace[0]
path = os.path.basename(path)
path = path.replace("<", "")
path = path.replace(">", "")
src = src.encode()
return [path, line, src]
else:
return None
def apply_label(node):
"""Return label of apply node."""
return node.op.__class__.__name__
def apply_profile(fgraph, node, profile):
"""Return apply profiling information."""
if not profile or profile.fct_call_time == 0:
return None
time = profile.apply_time.get((fgraph, node), 0)
call_time = profile.fct_call_time
return [time, call_time]
def broadcastable_to_str(b):
"""Return string representation of broadcastable."""
named_broadcastable = {
(): "scalar",
(False,): "vector",
(False, True): "col",
(True, False): "row",
(False, False): "matrix",
}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
bcast = ""
return bcast
def dtype_to_char(dtype):
"""Return character that represents data type."""
dtype_char = {
"complex64": "c",
"complex128": "z",
"float32": "f",
"float64": "d",
"int8": "b",
"int16": "w",
"int32": "i",
"int64": "l",
}
if dtype in dtype_char:
return dtype_char[dtype]
else:
return "X"
def type_to_str(t):
"""Return str of variable type."""
if not hasattr(t, "broadcastable"):
return str(t)
s = broadcastable_to_str(t.broadcastable)
if s == "":
s = str(t.dtype)
else:
s = dtype_to_char(t.dtype) + s
return s
def dict_to_pdnode(d):
"""Create pydot node from dict."""
e = dict()
for k, v in d.items():
if v is not None:
if isinstance(v, list):
v = "\t".join([str(x) for x in v])
else:
v = str(v)
v = str(v)
v = v.replace('"', "'")
e[k] = v
pynode = pd.Node(**e)
return pynode
| 32.269542
| 87
| 0.520214
|
596fffb4ada77ad3b96a3a02ff70535b32174100
| 12,464
|
py
|
Python
|
appengine/findit/findit_v2/services/analysis/compile_failure/compile_analysis_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/findit/findit_v2/services/analysis/compile_failure/compile_analysis_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/findit/findit_v2/services/analysis/compile_failure/compile_analysis_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Special logic of pre compile analysis.
Build with compile failures will be pre-processed to determine if a new compile
analysis is needed or not.
"""
import logging
from google.appengine.ext import ndb
from google.protobuf.field_mask_pb2 import FieldMask
from services import gerrit
from services import git
from services import deps
from waterfall import waterfall_config
from common.waterfall import buildbucket_client
from findit_v2.model import luci_build
from findit_v2.model import compile_failure
from findit_v2.model.compile_failure import CompileFailure
from findit_v2.model.compile_failure import CompileFailureAnalysis
from findit_v2.model.compile_failure import CompileFailureGroup
from findit_v2.model.compile_failure import CompileRerunBuild
from findit_v2.model.culprit_action import CulpritAction
from findit_v2.services import build_util
from findit_v2.services import constants
from findit_v2.services import projects
from findit_v2.services.analysis.analysis_api import AnalysisAPI
from findit_v2.services.failure_type import StepTypeEnum
class CompileAnalysisAPI(AnalysisAPI):
@property
def step_type(self):
return StepTypeEnum.COMPILE
def _GetMergedFailureKey(self, failure_entities, referred_build_id,
step_ui_name, atomic_failure):
return CompileFailure.GetMergedFailureKey(
failure_entities, referred_build_id, step_ui_name, atomic_failure)
def _GetFailuresInBuild(self, project_api, build, failed_steps):
return project_api.GetCompileFailures(build, failed_steps)
def _GetFailuresWithMatchingFailureGroups(self, project_api, context, build,
first_failures_in_current_build):
return project_api.GetFailuresWithMatchingCompileFailureGroups(
context, build, first_failures_in_current_build)
def _CreateFailure(self, failed_build_key, step_ui_name,
first_failed_build_id, last_passed_build_id,
merged_failure_key, atomic_failure, properties):
"""Creates a CompileFailure entity."""
return CompileFailure.Create(
failed_build_key=failed_build_key,
step_ui_name=step_ui_name,
output_targets=list(atomic_failure or []),
rule=(properties or {}).get('rule'),
first_failed_build_id=first_failed_build_id,
last_passed_build_id=last_passed_build_id,
# Default to first_failed_build_id, will be updated later if matching
# group exists.
failure_group_build_id=first_failed_build_id,
merged_failure_key=merged_failure_key,
properties=properties)
def GetFailureEntitiesForABuild(self, build):
compile_failure_entities = CompileFailure.query(
ancestor=ndb.Key(luci_build.LuciFailedBuild, build.id)).fetch()
assert compile_failure_entities, (
'No compile failure saved in datastore for build {}'.format(build.id))
return compile_failure_entities
def _CreateFailureGroup(self, context, build, compile_failure_keys,
last_passed_gitiles_id, last_passed_commit_position,
first_failed_commit_position):
group_entity = CompileFailureGroup.Create(
luci_project=context.luci_project_name,
luci_bucket=build.builder.bucket,
build_id=build.id,
gitiles_host=context.gitiles_host,
gitiles_project=context.gitiles_project,
gitiles_ref=context.gitiles_ref,
last_passed_gitiles_id=last_passed_gitiles_id,
last_passed_commit_position=last_passed_commit_position,
first_failed_gitiles_id=context.gitiles_id,
first_failed_commit_position=first_failed_commit_position,
compile_failure_keys=compile_failure_keys)
return group_entity
def _CreateFailureAnalysis(
self, luci_project, context, build, last_passed_gitiles_id,
last_passed_commit_position, first_failed_commit_position,
rerun_builder_id, compile_failure_keys):
analysis = CompileFailureAnalysis.Create(
luci_project=luci_project,
luci_bucket=build.builder.bucket,
luci_builder=build.builder.builder,
build_id=build.id,
gitiles_host=context.gitiles_host,
gitiles_project=context.gitiles_project,
gitiles_ref=context.gitiles_ref,
last_passed_gitiles_id=last_passed_gitiles_id,
last_passed_commit_position=last_passed_commit_position,
first_failed_gitiles_id=context.gitiles_id,
first_failed_commit_position=first_failed_commit_position,
rerun_builder_id=rerun_builder_id,
compile_failure_keys=compile_failure_keys)
return analysis
def _GetFailuresInAnalysis(self, analysis):
return ndb.get_multi(analysis.compile_failure_keys)
def _FetchRerunBuildsOfAnalysis(self, analysis):
return CompileRerunBuild.query(ancestor=analysis.key).order(
CompileRerunBuild.gitiles_commit.commit_position).fetch()
def _GetFailureAnalysis(self, analyzed_build_id):
analysis = CompileFailureAnalysis.GetVersion(analyzed_build_id)
assert analysis, 'Failed to get CompileFailureAnalysis for build {}'.format(
analyzed_build_id)
return analysis
def _GetFailuresToRerun(self, failure_entities):
return compile_failure.GetFailedTargets(failure_entities)
def _GetExistingRerunBuild(self, analysis_key, rerun_commit):
return CompileRerunBuild.SearchBuildOnCommit(analysis_key, rerun_commit)
def _CreateRerunBuild(self, rerun_builder, new_build, rerun_commit,
analysis_key):
return CompileRerunBuild.Create(
luci_project=rerun_builder.project,
luci_bucket=rerun_builder.bucket,
luci_builder=rerun_builder.builder,
build_id=new_build.id,
legacy_build_number=new_build.number,
gitiles_host=rerun_commit.gitiles_host,
gitiles_project=rerun_commit.gitiles_project,
gitiles_ref=rerun_commit.gitiles_ref,
gitiles_id=rerun_commit.gitiles_id,
commit_position=rerun_commit.commit_position,
status=new_build.status,
create_time=new_build.create_time.ToDatetime(),
parent_key=analysis_key)
def _GetRerunBuildTags(self, analyzed_build_id):
return [
{
'key': constants.RERUN_BUILD_PURPOSE_TAG_KEY,
'value': constants.COMPILE_RERUN_BUILD_PURPOSE,
},
{
'key': constants.ANALYZED_BUILD_ID_TAG_KEY,
'value': str(analyzed_build_id),
},
]
def _GetRerunBuildInputProperties(self, project_api, rerun_failures,
analyzed_build_id):
return project_api.GetCompileRerunBuildInputProperties(
rerun_failures, analyzed_build_id)
def GetSuspectedCulprits(self, project_api, context, build,
first_failures_in_current_build):
failure_info = project_api.GetCompileFailureInfo(
context, build, first_failures_in_current_build)
# Projects that support heuristic analysis for compile must implement
# GetCompileFailureInfo.
if failure_info:
signals = project_api.ExtractSignalsForCompileFailure(failure_info)
change_logs = git.PullChangeLogs(
first_failures_in_current_build['last_passed_build']['commit_id'],
context.gitiles_id)
deps_info = deps.ExtractDepsInfo(failure_info, change_logs)
return project_api.HeuristicAnalysisForCompile(failure_info, change_logs,
deps_info, signals)
return None
def _GetFailureGroupByContext(self, context):
groups = CompileFailureGroup.query(
CompileFailureGroup.luci_project == context.luci_project_name).filter(
CompileFailureGroup.first_failed_commit.gitiles_id == context
.gitiles_id).fetch()
return groups[0] if groups else None
def OnCulpritFound(self, context, analyzed_build_id, culprit):
"""Decides and executes the action for the found culprit change.
This possible actions include:
- No action.
- Notify the culprit CL.
- Create revert and request that it's reviewed.
- Create a revert and submit it.
Selecting the appropriate action will be based on the project's configured
options and daily limits as well as whether the action can be taken safely.
Refer to the code below for details.
Args:
context (findit_v2.services.context.Context): Scope of the analysis.
analyzed_build_id: Buildbucket id of the continuous build being analyzed.
culprit: The Culprit entity for the change identified as causing the
failures.
Returns:
The CulpritAction entity describing the action taken, None if no action
was performed.
"""
project_api = projects.GetProjectAPI(context.luci_project_name)
project_config = projects.PROJECT_CFG.get(context.luci_project_name, {})
action_settings = waterfall_config.GetActionSettings()
if not action_settings.get('v2_actions', False):
logging.info('V2 auto-action flow globally disabled')
return None
if not project_config.get('auto_actions_enabled_for_project', False):
return self._NoAction(culprit, 'Auto-actions disabled for project')
if not build_util.AllLaterBuildsHaveOverlappingFailure(
context, analyzed_build_id, culprit):
return self._NoAction(culprit, 'Build has recovered')
change_info, gerrit_client = (
project_api.gerrit_actions.ChangeInfoAndClientFromCommit(culprit))
cl_details = gerrit_client.GetClDetails(change_info['review_change_id'])
if bool(cl_details.revert_of):
return self._Notify(project_api, culprit, 'The culprit is a revert')
reverted, by_findit = self._CheckIfReverted(
cl_details, culprit,
project_config.get('auto_actions_service_account', ''))
if reverted and by_findit:
return self._NoAction(culprit,
'We already created a revert for this culprit')
if reverted:
return self._Notify(
project_api,
culprit,
'A revert was manually created for this culprit',
silent=True)
if CulpritAction.GetRecentActionsByType(
CulpritAction.REVERT, revert_committed=False) >= action_settings.get(
'auto_create_revert_daily_threshold_compile', 10):
return self._Notify(project_api, culprit, 'Reached revert creation quota')
if not project_config.get('auto_revert_enabled_for_project', False):
return self._Notify(project_api, culprit,
'Auto-revert disabled for this project')
if cl_details.auto_revert_off:
return self._Notify(project_api, culprit,
'The culprit has been tagged with NOAUTOREVERT=True')
if gerrit.ExistCQedDependingChanges(change_info):
return self._Notify(project_api, culprit,
'Changes already in the CQ depend on culprit')
if not git.ChangeCommittedWithinTime(
culprit.gitiles_id,
repo_url=git.GetRepoUrlFromContext(context),
hours=project_config.get('max_revertible_culprit_age_hours', 24)):
return self._Notify(project_api, culprit,
'Culprit is too old to auto-revert')
if cl_details.owner_email in project_config.get(
'automated_account_whitelist', []):
return self._Notify(project_api, culprit,
'Culprit was created by a whitelisted account')
revert_description = self._ComposeRevertDescription(project_api, culprit)
if project_config.get('auto_commit_enabled_for_project', False):
if CulpritAction.GetRecentActionsByType(
CulpritAction.REVERT, revert_committed=True) < action_settings.get(
'auto_commit_revert_daily_threshold_compile', 4):
action = self._CommitRevert(project_api, revert_description, culprit)
if action:
return action
logging.info(
'Could not land revert automatically, requesting manual review')
else:
logging.info('Reached auto-commit quota, requesting manual review')
else:
logging.info('Auto-committing disabled, requesting manual review')
return self._RequestReview(project_api, revert_description, culprit)
| 41.546667
| 80
| 0.730263
|
e26bd903f28ba37dbbf8629ad1b697cb6e9bfc3f
| 29,733
|
py
|
Python
|
xcs/bitstrings.py
|
crim-ca/xcs
|
53304d234f667628a1cc4f1a982f9a1cb9b7d401
|
[
"BSD-3-Clause"
] | null | null | null |
xcs/bitstrings.py
|
crim-ca/xcs
|
53304d234f667628a1cc4f1a982f9a1cb9b7d401
|
[
"BSD-3-Clause"
] | null | null | null |
xcs/bitstrings.py
|
crim-ca/xcs
|
53304d234f667628a1cc4f1a982f9a1cb9b7d401
|
[
"BSD-3-Clause"
] | 1
|
2019-02-08T07:48:28.000Z
|
2019-02-08T07:48:28.000Z
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# xcs
# ---
# Accuracy-based Classifier Systems for Python 3
#
# http://hosford42.github.io/xcs/
#
# (c) Aaron Hosford 2015, all rights reserved
# Revised (3 Clause) BSD License
#
# Implements the XCS (Accuracy-based Classifier System) algorithm,
# as described in the 2001 paper, "An Algorithmic Description of XCS,"
# by Martin Butz and Stewart Wilson.
#
# -------------------------------------------------------------------------
"""
Accuracy-based Classifier Systems for Python 3
This xcs submodule provides bit-string and bit-condition data types used by
the XCS algorithm.
Pure Python versus numpy:
This submodule has two alternate implementations for the BitString
class. One is based on Python ints, has no external dependencies, and
is used by default. The other is based on numpy arrays, requires numpy
to be installed, and can be activated by calling use_numpy(). If you
change your mind, you can always switch back to the pure Python
implementation by calling use_pure_python(). If you're not sure which
one you're using, you can tell by calling using_numpy(). Before you
call use_numpy(), it is recommended that you verify that numpy is
available by calling numpy_is_available() to avoid an import error.
While it is safe to switch back and forth between implementations as
many times as you like, you must not mix BitString or BitCondition
instances from one implementation with those of the other; to do so may
lead to undefined behavior.
It is worth noting that the Python int-based and numpy array-based
implementations have (somewhat surprisingly) roughly comparable speeds.
In fact, on some systems, the Python-based implementation is visibly
faster. If you are concerned with speed, it is best to actually test
the two implementations on your system to see which is faster. If not,
the pure Python implementation, enabled by default, is recommended.
Copyright (c) 2015, Aaron Hosford
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of xcs nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'Aaron Hosford'
__all__ = [
# Classes
'BitCondition',
'BitString',
'BitConditionBase',
# Functions
'numpy_is_available',
'use_numpy',
'use_pure_python',
'using_numpy',
]
from abc import ABCMeta, abstractmethod
import random
import xcs
def numpy_is_available():
"""Return a Boolean indicating whether numpy can be imported.
Usage:
if numpy_is_available():
import numpy
Arguments: None
Return:
A bool indicating whether numpy can be imported.
"""
return xcs.numpy is not None
# IMPORTANT:
# This class must appear *before* the BitString class is imported. It is
# a rather ugly solution, but _numpy_bitstrings.py and
# _python_bitstrings.py import this module, and then this module imports
# one of them. This lets us switch back and forth as needed.
class BitStringBase(metaclass=ABCMeta):
"""Abstract base class for hashable, immutable sequences of bits
(Boolean values). There are two separate implementations of the
BitString class, each of which inherits from this base class. One is
implemented in pure Python (using Python ints), and the other is
implemented using numpy arrays. Inheriting from this abstract base
class serves to ensure that both implementations provide the same
interface.
Usage:
This is an abstract base class. Use the BitString subclass to
create an instance.
Init Arguments:
bits: The object the implementation uses to represent the bits of
the BitString.
hash_value: None, indicating the hash value will be computed later,
or an int representing the hash value of the BitString.
"""
@classmethod
@abstractmethod
def random(cls, length, bit_prob=.5):
"""Create a bit string of the given length, with the probability of
each bit being set equal to bit_prob, which defaults to .5.
Usage:
# Create a random BitString of length 10 with mostly zeros.
bits = BitString.random(10, bit_prob=.1)
Arguments:
length: An int, indicating the desired length of the result.
bit_prob: A float in the range [0, 1]. This is the probability
of any given bit in the result having a value of 1; default
is .5, giving 0 and 1 equal probabilities of appearance for
each bit's value.
Return:
A randomly generated BitString instance of the requested
length.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def crossover_template(cls, length, points):
"""Create a crossover template with the given number of points. The
crossover template can be used as a mask to crossover two
bitstrings of the same length.
Usage:
assert len(parent1) == len(parent2)
template = BitString.crossover_template(len(parent1))
inv_template = ~template
child1 = (parent1 & template) | (parent2 & inv_template)
child2 = (parent1 & inv_template) | (parent2 & template)
Arguments:
length: An int, indicating the desired length of the result.
block_size: An int, indicating semantically grouped bits (that I don't want destroyed by crossover). -1 for "none".
points: An int, the number of crossover points.
Return:
A BitString instance of the requested length which can be used
as a crossover template.
"""
raise NotImplementedError()
def __init__(self, bits, hash_value):
assert hash_value is None or isinstance(hash_value, int)
self._bits = bits
self._hash = hash_value
@abstractmethod
def any(self):
"""Returns True iff at least one bit is set.
Usage:
assert not BitString('0000').any()
assert BitString('0010').any()
Arguments: None
Return:
A bool indicating whether at least one bit has value 1.
"""
raise NotImplementedError()
@abstractmethod
def count(self):
"""Returns the number of bits set to True in the bit string.
Usage:
assert BitString('00110').count() == 2
Arguments: None
Return:
An int, the number of bits with value 1.
"""
raise NotImplementedError()
def __str__(self):
"""Overloads str(bitstring)"""
return ''.join('1' if bit else '0' for bit in self)
def __repr__(self):
"""Overloads repr(bitstring)"""
return type(self).__name__ + '(' + repr(str(self)) + ')'
@abstractmethod
def __int__(self):
"""Overloads int(instance)"""
raise NotImplementedError()
@abstractmethod
def __len__(self):
"""Overloads len(instance)"""
raise NotImplementedError()
@abstractmethod
def __iter__(self):
"""Overloads iter(instance)"""
raise NotImplementedError()
@abstractmethod
def __getitem__(self, index):
raise NotImplementedError()
@abstractmethod
def __hash__(self):
"""Overloads hash(instance)"""
raise NotImplementedError()
@abstractmethod
def __eq__(self, other):
"""Overloads instance1 == instance2"""
raise NotImplementedError()
def __ne__(self, other):
"""Overloads !="""
return not self == other
@abstractmethod
def __and__(self, other):
"""Overloads instance1 & instance2"""
raise NotImplementedError()
@abstractmethod
def __or__(self, other):
"""Overloads instance1 | instance2"""
raise NotImplementedError()
@abstractmethod
def __xor__(self, other):
"""Overloads instance1 ^ instance2"""
raise NotImplementedError()
@abstractmethod
def __invert__(self):
"""Overloads ~instance"""
raise NotImplementedError()
@abstractmethod
def __add__(self, other):
"""Overloads instance1 + instance2"""
raise NotImplementedError()
@abstractmethod
def cover(self, wildcard_probability: float):
"""Returns a condition covering this situation."""
raise NotImplementedError()
# There are two different implementations of BitString, one in
# _numpy_bitstrings and one in _python_bitstrings. The numpy version is
# dependent on numpy being installed, whereas the python one is written in
# pure Python with no external dependencies. By default, the python
# implementation is used, since it is of comparable speed and has no
# external dependencies. The user can override this behavior, if desired,
# by calling use_numpy().
from ._python_bitstrings import BitString
_using_numpy = False
def using_numpy():
"""Return a Boolean indicating whether the numpy implementation is
currently in use.
Usage:
if using_numpy():
use_pure_python()
Arguments: None
Return:
A bool indicating whether the numpy implementation is currently in
use, as opposed to the pure Python implementation.
"""
return _using_numpy
def use_numpy():
"""Force the package to use the numpy-based BitString implementation.
If numpy is not available, this will result in an ImportError.
IMPORTANT: Bitstrings of different implementations cannot be mixed.
Attempting to do so will result in undefined behavior.
Usage:
use_numpy()
assert using_numpy()
Arguments: None
Return: None
"""
global BitString, _using_numpy
from ._numpy_bitstrings import BitString
_using_numpy = True
raise RuntimeError("Implementation of new functionalities in numpy is not complete") # TODO: Luis, fix this.
def use_pure_python():
"""Force the package to use the pure Python BitString implementation.
IMPORTANT: Bitstrings of different implementations cannot be mixed.
Attempting to do so will result in undefined behavior.
Usage:
use_pure_python()
assert not using_numpy()
Arguments: None
Return: None
"""
global BitString, _using_numpy
from ._python_bitstrings import BitString
_using_numpy = False
class BitConditionBase(metaclass=ABCMeta):
"""A pair of bit strings, one indicating the bit values, and the other
indicating the bit mask, which together act as a matching template for
bit strings. Like bit strings, bit conditions are hashable and
immutable. Think of BitConditions as patterns which can match against
BitStrings of the same length. At each index, we can have a 1, a 0, or
a # (wildcard). If the value is 1 or 0, the BitString must have the
same value at that index. If the value is #, the BitString can have any
value at that index.
BitConditions are matched against BitStrings in one of two ways:
Method 1:
result = condition // bitstring
# result now contains a new BitString which contains a 1 for
# each position that violated the pattern, and a 0 for each
# position that did not. This tells us exactly where the
# condition and the bitstring disagree
Method 2:
result = condition(bitstring)
# result now contains a single Boolean value which is True if
# the bitstring fully satisfies the pattern specified by the
# condition, or False if the bitstring disagrees with the
# condition at at least one index
BitConditions can also match against other BitConditions in the same
way that they are matched against BitStrings, with the sole exception
that if the condition being used as the pattern specifies a 1 or 0 at a
particular index, and the condition being used as the substrate
contains an # at that point, the match fails. This means that if
you have two conditions, condition1 and condition2, where condition1
matches a bitstring and condition2 matches condition1, then condition2
is guaranteed to match the bitstring, as well.
Usage:
# A few ways to create a BitCondition instance
condition1 = BitCondition('001###01#1')
condition2 = BitCondition(BitString('0010010111'),
BitString('1110001101'))
assert condition1 == condition2
condition3 = BitCondition.cover('0010010111', .25)
assert condition3(BitString('0010010111')) # It matches
# They print up nicely
assert str(condition1) == '001###01#1'
print(condition1) # Prints: 001###01#1
print(repr(condition1)) # Prints: BitCondition('001###01#1')
# Indexing is from left to right, like an ordinary string.
# (Wildcards are represented as the value None at the given index.)
assert condition1[0] == 0
assert condition1[-1] == 1
assert condition1[4] is None
# They are immutable
condition1[3] = 0 # This will raise a TypeError
# Slicing works
assert condition1[3:-3] == BitCondition('###0')
# You can iterate over them
for bit in condition1:
if bit is None:
print("Found a wildcard!)
# Unlike bitstrings, they cannot be cast as ints
as_int = int(condition1) # This will raise a TypeError
# They can be used in hash-based containers
s = {condition1, condition3}
d = {condition1: "a", condition3: "b"}
# Unlike bitstrings, they do not support the any() method
condition1.any() # This will raise an AttributeError
# Unlike bitstrings, BitCondition.count() returns the number of
# bits that are not wildcards, rather than the number of bits that
# have a value of 1.
assert condition1.count() == condition1.mask.count() == 6
# The bitwise operators for BitConditions work differently from
# those of BitStrings; provided the bits of each condition are
# compatible, i.e. there is no point where their bits disagree
# and neither of them is a wildcard, then &, |, and ~ actually
# represent set operations over the BitStrings that the conditions
# will match.
assert condition1 & condition1 == condition1
assert condition1 | condition1 == condition1
assert (condition1 | ~condition1)(BitString.random(10))
assert condition1(condition1 & condition3) # They are compatible
assert condition3(condition1 & condition3) # They are compatible
assert (condition1 | condition3)(condition1) # They are compatible
assert (condition1 | condition3)(condition3) # They are compatible
# BitConditions can also be concatenated together like strings
concatenation = condition1 + condition3
assert len(concatenation) == 10 * 2
# They support the Genetic Algorithm's crossover operator directly
child1, child2 = condition1.crossover_with(condition3)
Init Arguments:
bits: If mask is provided, a sequence from which the bits of the
condition can be determined. If mask is omitted, a sequence
from which the bits and mask of the condition can be
determined.
mask: None, or a sequence from which the mask can be determined,
having the same length as the sequence provided for bits.
"""
# @classmethod
# @abstractmethod
# def cover(cls, bits, wildcard_probability):
# """Create a new bit condition that matches the provided bit string,
# with the indicated per-index wildcard probability.
#
# Usage:
# condition = BitCondition.cover(bitstring, .33)
# assert condition(bitstring)
#
# Arguments:
# bits: A BitString which the resulting condition must match.
# wildcard_probability: A float in the range [0, 1] which
# indicates the likelihood of any given bit position containing
# a wildcard.
# Return:
# A randomly generated BitCondition which matches the given bits.
# """
# raise NotImplementedError()
def __init__(self, bits, mask=None, mutation_prob=.5):
if mask is None:
if isinstance(bits, str):
bit_list = []
mask = []
for char in bits:
if char == '1':
bit_list.append(True)
mask.append(True)
elif char == '0':
bit_list.append(False)
mask.append(True)
elif char == '#':
bit_list.append(False)
mask.append(False)
else:
raise ValueError("Invalid character: " +
repr(char))
bits = BitString(bit_list)
mask = BitString(mask)
hash_value = None
elif isinstance(bits, BitCondition):
bits, mask, hash_value = bits._bits, bits._mask, bits._hash
else:
if not isinstance(bits, BitString):
bits = BitString(bits)
mask = BitString(~0, len(bits))
hash_value = None
else:
if not isinstance(bits, BitString):
bits = BitString(bits)
if not isinstance(mask, BitString):
mask = BitString(mask)
hash_value = None
assert len(bits) == len(mask)
self._bits = bits & mask
self._mask = mask
self._hash = hash_value
self.mutation_prob = mutation_prob
@property
def bits(self):
"""The bit string indicating the bit values of this bit condition.
Indices that are wildcarded will have a value of False."""
return self._bits
@property
def mask(self):
"""The bit string indicating the bit mask. A value of 1 for a
bit indicates it must match the value bit string. A value of 0
indicates it is masked/wildcarded."""
return self._mask
def count(self):
"""Return the number of bits that are not wildcards.
Usage:
non_wildcard_count = condition.count()
Arguments: None
Return:
An int, the number of positions in the BitCondition which are
not wildcards.
"""
return self._mask.count()
def __str__(self):
"""Overloads str(condition)"""
return ''.join(
'1' if bit else ('#' if bit is None else '0')
for bit in self
)
def __repr__(self):
"""Overloads repr(condition)"""
return type(self).__name__ + '(' + repr(str(self)) + ')'
def __len__(self):
"""Overloads len(condition)"""
return len(self._bits)
def __iter__(self):
"""Overloads iter(condition), and also, for bit in condition. The
values yielded by the iterator are True (1), False (0), or
None (#)."""
for bit, mask in zip(self._bits, self._mask):
yield bit if mask else None
def __getitem__(self, index):
"""Overloads condition[index]. The values yielded by the index
operator are True (1), False (0), or None (#)."""
if isinstance(index, slice):
return BitCondition(self._bits[index], self._mask[index])
return self._bits[index] if self._mask[index] else None
def __hash__(self):
"""Overloads hash(condition)."""
# If we haven't already calculated the hash value, do so now.
if self._hash is None:
self._hash = hash(tuple(self))
return self._hash
def __eq__(self, other):
"""Overloads =="""
# if not isinstance(other, BitCondition):
if not type(self) == type(other):
return False
return (
len(self._bits) == len(other._bits) and
self._bits == other._bits and
self._mask == other._mask
)
def __ne__(self, other):
"""Overloads !="""
return not self == other
def __and__(self, other):
"""Overloads &"""
if not isinstance(other, BitCondition):
return NotImplemented
return type(self)(
(self._bits | ~self._mask) & (other._bits | ~other._mask),
self._mask | other._mask
)
def __or__(self, other):
"""Overloads |"""
if not isinstance(other, BitCondition):
return NotImplemented
return type(self)(
self._bits | other._bits,
self._mask & other._mask & ~(self._bits ^ other._bits)
)
def __invert__(self):
"""Overloads unary ~"""
return type(self)(~self._bits, self._mask)
def __add__(self, other):
"""Overloads +"""
if not isinstance(other, BitCondition):
return NotImplemented
return type(self)(
self._bits + other._bits,
self._mask + other._mask
)
# def __floordiv__(self, other):
# """Overloads the // operator, which we use to find the indices in
# the other value that do/can disagree with this condition."""
# if isinstance(other, BitCondition):
# return ((self._bits ^ other._bits) | ~other._mask) & self._mask
#
# if isinstance(other, int):
# other = BitString.from_int(other, len(self._bits))
# elif not isinstance(other, BitString):
# other = BitString(other)
#
# return (self._bits ^ other) & self._mask
@abstractmethod
def __call__(self, other):
"""Overloads condition(bitstring). Returns a Boolean value that
indicates whether the other value satisfies this condition."""
raise NotImplementedError()
@abstractmethod
def mutate(self, situation):
"""
Mutates the instance and returns a mutated one.
:param prob: probability of mutation of every individual element of this bitcondition.
:param situation: the mutated condition must match this situation
:return: Another instance, mutated.
"""
raise NotImplementedError()
@abstractmethod
def crossover_with(self, other, points):
"""Perform 2-point crossover on this bit condition and another of
the same length, returning the two resulting children.
Usage:
offspring1, offspring2 = condition1.crossover_with(condition2)
Arguments:
other: A second BitCondition of the same length as this one.
points: An int, the number of crossover points of the
crossover operation.
Return:
A tuple (condition1, condition2) of BitConditions, where the
value at each position of this BitCondition and the other is
preserved in one or the other of the two resulting conditions.
"""
raise NotImplementedError()
assert isinstance(other, BitCondition)
assert len(self) == len(other)
template = BitString.crossover_template(len(self), block_size, points)
inv_template = ~template
bits1 = (self._bits & template) | (other._bits & inv_template)
mask1 = (self._mask & template) | (other._mask & inv_template)
bits2 = (self._bits & inv_template) | (other._bits & template)
mask2 = (self._mask & inv_template) | (other._mask & template)
# Convert the modified sequences back into BitConditions
return type(self)(bits1, mask1), type(self)(bits2, mask2)
class BitCondition(BitConditionBase):
"""See Documentation of base class."""
# @classmethod
# def cover(cls, bits, wildcard_probability):
# """Create a new bit condition that matches the provided bit string,
# with the indicated per-index wildcard probability.
#
# Usage:
# condition = BitCondition.cover(bitstring, .33)
# assert condition(bitstring)
#
# Arguments:
# bits: A BitString which the resulting condition must match.
# wildcard_probability: A float in the range [0, 1] which
# indicates the likelihood of any given bit position containing
# a wildcard.
# Return:
# A randomly generated BitCondition which matches the given bits.
# """
#
# if not isinstance(bits, BitString):
# bits = BitString(bits)
#
# mask = BitString([
# random.random() > wildcard_probability
# for _ in range(len(bits))
# ])
#
# return cls(bits, mask)
def __floordiv__(self, other):
"""Overloads the // operator, which we use to find the indices in
the other value that do/can disagree with this condition."""
if isinstance(other, BitCondition):
return ((self._bits ^ other._bits) | ~other._mask) & self._mask
if isinstance(other, int):
other = BitString.from_int(other, len(self._bits))
elif not isinstance(other, BitString):
other = BitString(other)
return (self._bits ^ other) & self._mask
def __call__(self, other):
"""Overloads condition(bitstring). Returns a Boolean value that
indicates whether the other value satisfies this condition."""
assert isinstance(other, (BitString, BitCondition))
mismatches = self // other
return not mismatches.any()
def mutate(self, situation: BitString):
"""Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it."""
# Go through each position in the condition, randomly flipping
# whether the position is a value (0 or 1) or a wildcard (#). We do
# this in a new list because the original condition's mask is
# immutable.
mutation_points = xcs.bitstrings.BitString.random(
len(self.mask),
self.mutation_prob
)
mask = self.mask ^ mutation_points
# The bits that aren't wildcards always have the same value as the
# situation, which ensures that the mutated condition still matches
# the situation.
if isinstance(situation, xcs.bitstrings.BitCondition):
mask &= situation.mask
return xcs.bitstrings.BitCondition(situation.bits, mask)
return xcs.bitstrings.BitCondition(situation, mask)
def crossover_with(self, other, points):
"""Perform 2-point crossover on this bit condition and another of
the same length, returning the two resulting children.
Usage:
offspring1, offspring2 = condition1.crossover_with(condition2)
Arguments:
other: A second BitCondition of the same length as this one.
points: An int, the number of crossover points of the
crossover operation.
Return:
A tuple (condition1, condition2) of BitConditions, where the
value at each position of this BitCondition and the other is
preserved in one or the other of the two resulting conditions.
"""
assert isinstance(other, BitCondition)
assert len(self) == len(other)
template = BitString.crossover_template(len(self), points)
inv_template = ~template
bits1 = (self._bits & template) | (other._bits & inv_template)
mask1 = (self._mask & template) | (other._mask & inv_template)
bits2 = (self._bits & inv_template) | (other._bits & template)
mask2 = (self._mask & inv_template) | (other._mask & template)
# Convert the modified sequences back into BitConditions
return type(self)(bits1, mask1), type(self)(bits2, mask2)
| 36.935404
| 127
| 0.637675
|
c228861ecfbbd889147310e811e1bb42f4962098
| 60,385
|
py
|
Python
|
PyFlow/UI/Canvas/UINodeBase.py
|
hellobiek/PyFlow
|
cde924f3bd1613f36f9312e7313e036ae68a0e34
|
[
"Apache-2.0"
] | null | null | null |
PyFlow/UI/Canvas/UINodeBase.py
|
hellobiek/PyFlow
|
cde924f3bd1613f36f9312e7313e036ae68a0e34
|
[
"Apache-2.0"
] | null | null | null |
PyFlow/UI/Canvas/UINodeBase.py
|
hellobiek/PyFlow
|
cde924f3bd1613f36f9312e7313e036ae68a0e34
|
[
"Apache-2.0"
] | null | null | null |
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
import logging
from Qt import QtCore
from Qt import QtGui
from Qt import QtSvg
from Qt.QtWidgets import *
from PyFlow.ConfigManager import ConfigManager
from PyFlow.Core.Common import *
from PyFlow.UI.Canvas.UIPinBase import (
UIPinBase,
getUIPinInstance,
PinGroup
)
from PyFlow.UI.EditorHistory import EditorHistory
from PyFlow.UI.Canvas.UICommon import *
from PyFlow.UI.Widgets.InputWidgets import createInputWidget
from PyFlow.UI.Canvas.Painters import NodePainter
from PyFlow.UI.Widgets.PropertiesFramework import CollapsibleFormWidget
from PyFlow.UI.UIInterfaces import IPropertiesViewSupport
from PyFlow.UI.UIInterfaces import IUINode
from PyFlow.UI.Canvas.NodeActionButton import NodeActionButtonBase
from PyFlow.UI.Utils.stylesheet import Colors
from collections import OrderedDict
UI_NODES_FACTORIES = {}
class CollapseNodeActionButton(NodeActionButtonBase):
"""docstring for CollapseNodeActionButton."""
def __init__(self, svgFilePath, action, uiNode):
super(CollapseNodeActionButton, self).__init__(svgFilePath, action, uiNode)
self.svgIcon.setElementId("Collapse")
def mousePressEvent(self, event):
super(CollapseNodeActionButton, self).mousePressEvent(event)
if self.parentItem().collapsed:
self.svgIcon.setElementId("Expand")
else:
self.svgIcon.setElementId("Collapse")
class NodeNameValidator(QtGui.QRegExpValidator):
"""docstring for NodeNameValidator."""
def __init__(self, parent=None):
super(NodeNameValidator, self).__init__(QtCore.QRegExp('^[a-zA-Z][a-zA-Z0-9_]*$'), parent)
class InputTextField(QGraphicsTextItem):
editingFinished = QtCore.Signal(bool)
startEditing = QtCore.Signal()
def __init__(self, text, node, parent=None, singleLine=False, validator=None):
super(InputTextField, self).__init__(text, parent)
self.node = node
self.setFlags(QGraphicsWidget.ItemSendsGeometryChanges | QGraphicsWidget.ItemIsSelectable)
self.singleLine = singleLine
self.setObjectName("Nothing")
self.origMoveEvent = self.mouseMoveEvent
self.mouseMoveEvent = self.node.mouseMoveEvent
self.validator = validator
self.textBeforeEditing = ""
def keyPressEvent(self, event):
currentKey = event.key()
if self.validator is not None:
keyButtonText = event.text()
doc = QtGui.QTextDocument(self.document().toPlainText())
selectedText = self.textCursor().selectedText()
cursor = doc.find(selectedText)
cursor.insertText(keyButtonText)
futureText = doc.toPlainText()
validatorState, chunk, pos = self.validator.validate(futureText, 0)
if currentKey not in (QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete):
if validatorState == QtGui.QValidator.Invalid:
return
if currentKey == QtCore.Qt.Key_Escape:
# user rejects action. Restore text before editing
self.setPlainText(self.textBeforeEditing)
self.clearFocus()
super(InputTextField, self).keyPressEvent(event)
return
if self.singleLine:
if currentKey in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
if self.toPlainText() == "":
self.setPlainText(self.textBeforeEditing)
event.ignore()
self.editingFinished.emit(False)
self.clearFocus()
else:
event.ignore()
# self.editingFinished.emit(True)
self.clearFocus()
else:
super(InputTextField, self).keyPressEvent(event)
else:
super(InputTextField, self).keyPressEvent(event)
def mousePressEvent(self, event):
if self.objectName() == "MouseLocked":
super(InputTextField, self).mousePressEvent(event)
else:
self.node.mousePressEvent(event)
self.clearFocus()
def mouseReleaseEvent(self, event):
if self.objectName() == "MouseLocked":
super(InputTextField, self).mouseReleaseEvent(event)
else:
self.node.mouseReleaseEvent(event)
self.clearFocus()
def mouseDoubleClickEvent(self, event):
super(InputTextField, self).mouseDoubleClickEvent(event)
self.setFlag(QGraphicsWidget.ItemIsFocusable, True)
self.startEditing.emit()
self.setFocus()
def focusInEvent(self, event):
self.node.canvasRef().disableSortcuts()
self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.setObjectName("MouseLocked")
self.textBeforeEditing = self.toPlainText()
self.mouseMoveEvent = self.origMoveEvent
super(InputTextField, self).focusInEvent(event)
def focusOutEvent(self, event):
self.node.canvasRef().enableSortcuts()
cursor = self.textCursor()
cursor.clearSelection()
self.setTextCursor(cursor)
super(InputTextField, self).focusOutEvent(event)
self.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.setFlag(QGraphicsWidget.ItemIsFocusable, False)
self.setObjectName("Nothing")
if self.toPlainText() == "" and self.validator is not None:
self.setPlainText(self.textBeforeEditing)
self.editingFinished.emit(False)
else:
self.editingFinished.emit(True)
self.mouseMoveEvent = self.node.mouseMoveEvent
def setGeometry(self, rect):
self.prepareGeometryChange()
self.setPos(rect.topLeft())
class NodeName(QGraphicsWidget):
"""docstring for NodeName"""
def __init__(self, parent=None):
super(NodeName, self).__init__(parent)
self.setAcceptHoverEvents(True)
self.setFlag(QGraphicsWidget.ItemSendsGeometryChanges)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
self.labelItem = InputTextField(self.parentItem().getName(), parent, self, singleLine=True, validator=NodeNameValidator())
self.labelItem.setDefaultTextColor(self.parentItem()._labelTextColor)
self.labelItem.setAcceptHoverEvents(True)
self.labelItem.document().contentsChanged.connect(self.parentItem().updateNodeShape)
self.labelItem.editingFinished.connect(self.parentItem().finalizeRename)
self.labelItem.hoverMoveEvent = self.hoverMoveEvent
self._font = QtGui.QFont("Consolas")
self._font.setPointSize(6)
self.labelItem.setFont(self._font)
self.setGraphicsItem(self.labelItem)
self.hovered = False
self.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
def getFont(self):
return self.labelItem.font()
def getPlainText(self):
return self.labelItem.toPlainText()
def getHtml(self):
return self.labelItem.toHtml()
def setHtml(self, html):
self.prepareGeometryChange()
self.labelItem.setHtml(html)
self._font.setPointSize(6)
self.labelItem.setFont(self._font)
self.updateGeometry()
self.update()
def setTextColor(self, color):
self.labelItem.setDefaultTextColor(color)
def mouseDoubleClickEvent(self, event):
super(NodeName, self).mouseDoubleClickEvent(event)
def isRenamable(self):
return self.parentItem().isRenamable()
def hoverEnterEvent(self, event):
super(NodeName, self).hoverEnterEvent(event)
self.hovered = True
self.update()
def hoverMoveEvent(self, event):
self.parentItem().hoverMoveEvent(event)
def hoverLeaveEvent(self, event):
super(NodeName, self).hoverLeaveEvent(event)
self.hovered = False
self.update()
def sizeHint(self, which, constraint):
w = QtGui.QFontMetrics(self.getFont()).width(self.getPlainText())
h = self.labelItem.boundingRect().height() + 5
return QtCore.QSizeF(w, h)
def setGeometry(self, rect):
self.prepareGeometryChange()
super(QGraphicsWidget, self).setGeometry(rect)
self.setPos(rect.topLeft())
self.labelItem.setGeometry(rect)
class UINodeBase(QGraphicsWidget, IPropertiesViewSupport, IUINode):
"""
Default node description
"""
# Event called when node name changes
displayNameChanged = QtCore.Signal(str)
drawlabel = None
def __init__(self, raw_node, w=80, color=Colors.NodeBackgrounds, headColorOverride=None):
super(UINodeBase, self).__init__()
self.setFlag(QGraphicsWidget.ItemIsMovable)
self.setFlag(QGraphicsWidget.ItemIsFocusable)
self.setFlag(QGraphicsWidget.ItemIsSelectable)
self.setFlag(QGraphicsWidget.ItemSendsGeometryChanges)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setAcceptHoverEvents(True)
self.setZValue(NodeDefaults().Z_LAYER)
# Raw Node Definition
self.dirty = True
self.computing = False
self._rawNode = raw_node
self._rawNode.setWrapper(self)
self._rawNode.killed.connect(self.kill)
self._rawNode.tick.connect(self.Tick)
self._rawNode.errorOccured.connect(self.onNodeErrorOccurred)
self._rawNode.errorCleared.connect(self.onNodeErrorCleared)
self._rawNode.setDirty.connect(self.setDirty)
self._rawNode.computing.connect(self.setComputing)
self._rawNode.computed.connect(self.setClean)
self.custom_widget_data = {}
self.heartBeatDelay = 0.5
self.heartBeatTimeDelta = 0.0
# Color and Size Options
self.opt_node_base_color = Colors.NodeBackgrounds
self.opt_selected_pen_color = Colors.NodeSelectedPenColor
self.optPenSelectedType = QtCore.Qt.SolidLine
self.optPenErrorType = QtCore.Qt.DashLine
self._collapsed = False
self._left_stretch = 0
self.color = color
if self.drawlabel is None:
self.drawlabel = True
self.headColorOverride = headColorOverride
self.headColor = NodeDefaults().PURE_NODE_HEAD_COLOR
if raw_node.headerColor is not None:
self.headColorOverride = QtGui.QColor.fromRgb(*raw_node.headerColor)
self._w = 0
self.h = 30
self.minWidth = 50
self.minHeight = self.h
self._labelTextColor = QtCore.Qt.white
# Font Options
self.nodeNameFont = QtGui.QFont("Consolas")
self.nodeNameFont.setPointSize(6)
# GUI Layout
self.drawLayoutsDebug = False
self.nodeLayout = QGraphicsLinearLayout(QtCore.Qt.Vertical)
self.nodeLayout.setContentsMargins(NodeDefaults().CONTENT_MARGINS,
NodeDefaults().CONTENT_MARGINS,
NodeDefaults().CONTENT_MARGINS,
NodeDefaults().CONTENT_MARGINS)
self.nodeLayout.setSpacing(NodeDefaults().LAYOUTS_SPACING)
self.headerLayout = QGraphicsLinearLayout(QtCore.Qt.Horizontal)
self.nodeNameWidget = NodeName(self)
if self.drawlabel:
self.headerLayout.addItem(self.nodeNameWidget)
self.nodeNameWidget.setPos(0, 1)
self.headerLayout.setContentsMargins(0, 0, 0, 0)
self.headerLayout.setSpacing(3)
self.headerLayout.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.headerLayout.setMaximumHeight(self.labelHeight)
self.exposedActionButtonsLayout = QGraphicsLinearLayout(QtCore.Qt.Horizontal)
self.exposedActionButtonsLayout.setContentsMargins(0, 0, 0, 0)
self.exposedActionButtonsLayout.setSpacing(2)
self.exposedActionButtonsLayout.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
self.headerLayout.addItem(self.exposedActionButtonsLayout)
self.headerLayout.setAlignment(self.exposedActionButtonsLayout, QtCore.Qt.AlignRight)
self.customLayout = QGraphicsLinearLayout(QtCore.Qt.Vertical)
self.customLayout.setContentsMargins(0, 0, 0, 0)
self.customLayout.setSpacing(NodeDefaults().LAYOUTS_SPACING)
self.customLayout.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
self.hasCustomLayout = False
self.pinsLayout = QGraphicsLinearLayout(QtCore.Qt.Horizontal)
self.pinsLayout.setContentsMargins(0, 0, 0, 0)
self.pinsLayout.setSpacing(NodeDefaults().LAYOUTS_SPACING)
self.pinsLayout.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
self.inputsLayout = QGraphicsLinearLayout(QtCore.Qt.Vertical)
self.inputsLayout.setContentsMargins(0, 0, 0, 0)
self.inputsLayout.setSpacing(NodeDefaults().LAYOUTS_SPACING)
self.inputsLayout.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
self.outputsLayout = QGraphicsLinearLayout(QtCore.Qt.Vertical)
self.outputsLayout.setContentsMargins(0, 0, 0, 0)
self.outputsLayout.setSpacing(NodeDefaults().LAYOUTS_SPACING)
self.outputsLayout.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
self.pinsLayout.addItem(self.inputsLayout)
self.pinsLayout.addItem(self.outputsLayout)
self.pinsLayout.setAlignment(self.inputsLayout, QtCore.Qt.AlignLeft)
self.pinsLayout.setAlignment(self.outputsLayout, QtCore.Qt.AlignRight)
self.pinsLayout.setPreferredWidth(self.nodeLayout.preferredWidth())
self.nodeLayout.addItem(self.headerLayout)
# self.nodeLayout.addItem(self.customLayout)
self.nodeLayout.addItem(self.pinsLayout)
self.setLayout(self.nodeLayout)
self.svgIcon = QtSvg.QGraphicsSvgItem(self)
self.svgIcon.setPos(-6, -6)
self._image = None
self.canvasRef = None
self._menu = QMenu()
# Resizing Options
self.initialRectWidth = self.minWidth
self.initialRectHeight = self.minHeight
self.expanded = True
self.resizable = False
self.bResize = False
self.resizeDirection = (0, 0)
self.resizeStripsSize = 2
self.resizeStrips = [0, 0, 0, 0, # Left, Top, Right, Bottom
0, 0, 0, 0] # BottomRight, BottomLeft, TopLeft, TopRight
self.roundness = NodeDefaults().CORNERS_ROUND_FACTOR
# Hiding/Moving By Group/collapse/By Pin
self.pressedCommentNode = None
self.owningCommentNode = None
self._rect = QtCore.QRectF(0, 0, self.minWidth, self.minHeight)
self.mousePressPos = QtCore.QPointF()
# Group pins
self.inputGroupPins = {}
self.outputGroupPins = {}
# Action buttons
self._actionButtons = set()
# Core nodes support
self.isTemp = False
self.isCommentNode = False
self.bExposeInputsToCompound = False
self.originalPropertyIndexes = {}
self.editedPropertyIndexes = {}
# collapse action
self._groups = {"input": {}, "output": {}}
self.actionToggleCollapse = self._menu.addAction("ToggleCollapse")
self.actionToggleCollapse.setToolTip("Toggles node's body collapsed or not")
self.actionToggleCollapse.triggered.connect(self.toggleCollapsed)
self.actionToggleCollapse.setData(NodeActionButtonInfo(":/nodeCollapse.svg", CollapseNodeActionButton))
self.actionRefresh = self._menu.addAction("Refresh")
self.actionRefresh.triggered.connect(self.onRefresh)
self.actionToggleExposeWidgetsToCompound = self._menu.addAction("Expose properties")
self.actionToggleExposeWidgetsToCompound.triggered.connect(self.onToggleExposeProperties)
self.actionCopyPath = self._menu.addAction("Copy path")
self.actionCopyPath.triggered.connect(self.onCopyPathToClipboard)
self._rawNode.computed.connect(self.onComputed)
def onRefresh(self):
self._rawNode.processNode()
def onCopyPathToClipboard(self):
QApplication.clipboard().clear()
QApplication.clipboard().setText(self.path())
def getLastErrorMessage(self):
return self._rawNode.getLastErrorMessage()
def hoverEnterEvent(self, event):
super(UINodeBase, self).hoverEnterEvent(event)
if not self.isValid():
self.setToolTip(self.getLastErrorMessage())
else:
self.setToolTip("%s\nComputingTime: %s"%(rst2html(self.description()),self._rawNode._computingTime))
def eventDropOnCanvas(self):
pass
def setSingleLineName(self, bSingleLine=True):
self.nodeNameWidget.labelItem.singleLine = bSingleLine
def setNameValidationEnabled(self, bEnabled=True):
self.nodeNameWidget.labelItem.validator = None if not bEnabled else NodeNameValidator()
def isNameValidationEnabled(self):
return self.nodeNameWidget.labelItem.validator is not None
def onToggleExposeProperties(self):
self.setExposePropertiesToCompound(not self.bExposeInputsToCompound)
EditorHistory().saveState("{} exposing widgets".format("Start" if self.bExposeInputsToCompound else "Stop"), modify=True)
def setExposePropertiesToCompound(self, bExpose):
self.bExposeInputsToCompound = bExpose
self.update()
def __repr__(self):
return self._rawNode.__repr__()
def __str__(self):
return self._rawNode.__str__()
@property
def packageName(self):
return self._rawNode.packageName
@property
def uid(self):
return self._rawNode._uid
@uid.setter
def uid(self, value):
self._rawNode._uid = value
@property
def name(self):
return self._rawNode.name
@name.setter
def name(self, value):
self._rawNode.setName(value)
@property
def pins(self):
return self._rawNode.pins
@property
def orderedInputs(self):
return self._rawNode.orderedInputs
@property
def orderedOutputs(self):
return self._rawNode.orderedOutputs
@property
def isCompoundNode(self):
return self._rawNode.isCompoundNode
def isValid(self):
return self._rawNode.isValid()
def setError(self, errorString):
self._rawNode.setError(errorString)
def clearError(self):
self._rawNode.clearError()
def getName(self):
return self._rawNode.getName()
def setName(self, name):
self._rawNode.setName(name)
def serialize(self):
return self._rawNode.serialize()
def location(self):
return self._rawNode.location()
def path(self):
return self._rawNode.path()
def graph(self):
return self._rawNode.graph()
def isUnderActiveGraph(self):
return self._rawNode.isUnderActiveGraph()
def autoAffectPins(self):
self._rawNode.autoAffectPins()
def isCallable(self):
return self._rawNode.isCallable()
def category(self):
return self._rawNode.category()
def description(self):
return self._rawNode.description()
def call(self, name):
self._rawNode.call(name)
@property
def groups(self):
return self._groups
@property
def collapsed(self):
return self._collapsed
@collapsed.setter
def collapsed(self, bCollapsed):
if bCollapsed != self._collapsed:
self._collapsed = bCollapsed
self.aboutToCollapse(self._collapsed)
for i in range(0, self.inputsLayout.count()):
inp = self.inputsLayout.itemAt(i)
inp.setVisible(not bCollapsed)
for o in range(0, self.outputsLayout.count()):
out = self.outputsLayout.itemAt(o)
out.setVisible(not bCollapsed)
for cust in range(0, self.customLayout.count()):
out = self.customLayout.itemAt(cust)
out.setVisible(not bCollapsed)
self.updateNodeShape()
@property
def image(self):
return self._image
@image.setter
def image(self, value):
self._image = value
self.svgIcon.renderer().load(value)
elementName = QtCore.QFileInfo(value).baseName()
self.svgIcon.setElementId(elementName)
# self.svgIcon.setPos(self.geometry().topRight())
@property
def labelTextColor(self):
return self._labelTextColor
@labelTextColor.setter
def labelTextColor(self, value):
self._labelTextColor = value
self.nodeNameWidget.setTextColor(self._labelTextColor)
@property
def UIPins(self):
result = OrderedDict()
for rawPin in self._rawNode.pins:
uiPinRef = rawPin.getWrapper()
if uiPinRef is not None:
result[rawPin.uid] = uiPinRef()
return result
@property
def UIinputs(self):
result = OrderedDict()
for rawPin in self._rawNode.orderedInputs.values():
wrapper = rawPin.getWrapper()
if wrapper is not None:
result[rawPin.uid] = wrapper()
return result
@property
def UIoutputs(self):
result = OrderedDict()
for rawPin in self._rawNode.orderedOutputs.values():
wrapper = rawPin.getWrapper()
if wrapper is not None:
result[rawPin.uid] = wrapper()
return result
@property
def namePinOutputsMap(self):
result = OrderedDict()
for rawPin in self._rawNode.pins:
if rawPin.direction == PinDirection.Output:
wrapper = rawPin.getWrapper()
if wrapper is not None:
result[rawPin.name] = wrapper()
return result
@property
def namePinInputsMap(self):
result = OrderedDict()
for rawPin in self._rawNode.pins:
if rawPin.direction == PinDirection.Input:
result[rawPin.name] = rawPin.getWrapper()()
return result
@property
def w(self):
return self._w
@w.setter
def w(self, value):
self._w = value
@property
def labelHeight(self):
return self.nodeNameWidget.sizeHint(None, None).height()
@property
def labelWidth(self):
headerWidth = self.nodeNameWidget.sizeHint(None, None).width()
headerWidth += self.buttonsWidth()
return max(headerWidth, self.minWidth)
def getData(self, pinName):
if pinName in [p.name for p in self.inputs.values()]:
p = self.getPinSG(pinName, PinSelectionGroup.Inputs)
return p.getData()
def setData(self, pinName, data):
if pinName in [p.name for p in self.outputs.values()]:
p = self.getPinSG(pinName, PinSelectionGroup.Outputs)
p.setData(data)
def getPinSG(self, name, pinsGroup=PinSelectionGroup.BothSides):
pin = self._rawNode.getPinSG(str(name), pinsGroup)
if pin is not None:
if pin.getWrapper() is not None:
return pin.getWrapper()()
return None
def isRenamable(self):
return True
def finalizeRename(self, accepted=False):
"""Called by :class:`~PyFlow.UI.Canvas.UINodeBase.NodeName`
If user pressed :kbd:`escape` name before editing will be restored. If User pressed :kbd:`enter` or removed focus
rename action will be accepted and node will be renamed and name will be checked for uniqueness.
:param accepted: Wheter user accepted editing or not
:type accepted: :class:`bool`
"""
if accepted:
name = self.nodeNameWidget.getPlainText()
if self.isNameValidationEnabled():
name = name.replace(" ", "")
newName = self.canvasRef().graphManager.getUniqNodeName(name)
self.setName(newName)
self.setHeaderHtml(newName)
self.canvasRef().requestFillProperties.emit(self.createPropertiesWidget)
def onNodeErrorOccurred(self, *args, **kwargs):
# change node ui to invalid
errorString = args[0]
error = {"Node": self._rawNode.name, "Error": errorString}
if ConfigManager().shouldRedirectOutput():
errorLink = """<a href=%s><span style=" text-decoration: underline; color:red;">%s</span></a></p>""" % (self._rawNode.name, str(error))
logging.error(errorLink)
else:
logging.error(errorString)
self.setToolTip(errorString)
self.update()
def onNodeErrorCleared(self, *args, **kwargs):
# restore node ui to clean
self.setToolTip("%s\nComputingTime: %s"%(rst2html(self.description()),self._rawNode._computingTime))
self.update()
def onComputed(self, *args, **kwargs):
self.setToolTip("%s\nComputingTime: %s"%(rst2html(self.description()),self._rawNode._computingTime))
self.update()
def toggleCollapsed(self):
self.collapsed = not self.collapsed
def aboutToCollapse(self, futureCollapseState):
"""Called before collapsing or expanding."""
pass
def setHeaderHtml(self, html):
self.nodeNameWidget.setHtml(html)
def getHeaderText(self):
return self.nodeNameWidget.getPlainText()
def getHeaderHtml(self):
return self.nodeNameWidget.getHtml()
def serializationHook(self):
# this will be called by raw node
# to gather ui specific info
template = {}
if self.resizable:
template['resize'] = {'w': self._rect.right(), 'h': self._rect.bottom()}
template['collapsed'] = self.collapsed
template['headerHtml'] = self.nodeNameWidget.getHtml()
template['exposeInputsToCompound'] = self.bExposeInputsToCompound
if len(self.groups) > 0:
template['groups'] = {'input': {}, 'output': {}}
for name, grp in self.groups['input'].items():
template['groups']['input'][name] = grp.expanded
for name, grp in self.groups['output'].items():
template['groups']['output'][name] = grp.expanded
return template
def buttonsWidth(self):
# actions width. 10 is svg icon size, probably need to move this value to some preferences
try:
headerWidth = 0
numActions = len(self._actionButtons)
headerWidth += numActions * 10
headerWidth += self.headerLayout.spacing() * 2 + NodeDefaults().CONTENT_MARGINS * 2
return headerWidth
except:
return 0
def getNodeWidth(self):
width = self.getPinsWidth() + self.pinsLayout.spacing() * 2
if self.resizable:
width = max(self._rect.width(), width)
width = max(width, self.labelWidth)
return width
def getNodeHeight(self):
h = self.nodeNameWidget.sizeHint(None, None).height()
h += self.nodeLayout.spacing()
try:
numInputs = len(self.UIinputs)
numOutputs = len(self.UIoutputs)
ipins = self.UIinputs.values()
opins = self.UIoutputs.values()
h += NodeDefaults().CONTENT_MARGINS * 2
iPinsHeight = 0
for pin in ipins:
if pin.isVisible():
iPinsHeight += pin.sizeHint(None, None).height() + NodeDefaults().LAYOUTS_SPACING
oPinsHeight = 0
for pin in opins:
if pin.isVisible():
oPinsHeight += pin.sizeHint(None, None).height() + NodeDefaults().LAYOUTS_SPACING
h += max(iPinsHeight, oPinsHeight)
igrhHeight = 0
ogrhHeight = 0
for grp in self.groups["input"].values():
igrhHeight += grp.sizeHint(None, None).height() + NodeDefaults().LAYOUTS_SPACING
for grp in self.groups["output"].values():
ogrhHeight += grp.sizeHint(None, None).height() + NodeDefaults().LAYOUTS_SPACING
h += max(igrhHeight, ogrhHeight)
except Exception as e:
print(e)
pass
custCount = 0
for cust in range(0, self.customLayout.count()):
out = self.customLayout.itemAt(cust)
if out.isVisible():
h += out.minimumHeight()
custCount += 1
if custCount > 0:
h += self.customLayout.spacing() * self.customLayout.count()
if h < self.minHeight:
h = self.minHeight
if self.resizable:
h = max(self._rect.height(), h)
if self.collapsed:
h = min(self.minHeight, self.labelHeight + self.nodeLayout.spacing() * 2)
return h
def getPinsWidth(self):
iwidth = 0
owidth = 0
pinwidth = 0
pinwidth2 = 0
for i in self.UIPins.values():
if i.direction == PinDirection.Input:
iwidth = max(iwidth, i.sizeHint(None, None).width())
else:
owidth = max(owidth, i.sizeHint(None, None).width())
for igrp in self.groups["input"].values():
w = igrp.geometry().width()
iwidth = max(iwidth, w)
for ogrp in self.groups["output"].values():
w = ogrp.geometry().width()
owidth = max(owidth, w)
return iwidth + owidth + pinwidth + pinwidth2 + Spacings.kPinOffset
def setGeometry(self, rect):
self.prepareGeometryChange()
super(QGraphicsWidget, self).setGeometry(rect)
self.setPos(rect.topLeft())
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionChange:
self._rawNode.setPosition(value.x(), value.y())
if change == QGraphicsItem.ItemVisibleChange:
if self.owningCommentNode is not None:
if self.owningCommentNode.collapsed:
self.onVisibilityChanged(False)
else:
self.onVisibilityChanged(bool(value))
if change == QGraphicsItem.ItemSelectedChange:
if not value:
self.nodeNameWidget.labelItem.clearFocus()
return super(UINodeBase, self).itemChange(change, value)
def updateNodeShape(self):
self.prepareGeometryChange()
self.invalidateNodeLayouts()
self.updateGeometry()
self.update()
if self.canvasRef is not None:
self.canvasRef().update()
self.nodeNameWidget.updateGeometry()
self.nodeNameWidget.update()
self.pinsLayout.setPreferredWidth(self.getNodeWidth() - self.nodeLayout.spacing())
self.headerLayout.setPreferredWidth(self.getNodeWidth() - self.nodeLayout.spacing())
self.customLayout.setPreferredWidth(self.getNodeWidth() - self.nodeLayout.spacing())
def onVisibilityChanged(self, bVisible):
pass
def translate(self, x, y):
super(UINodeBase, self).moveBy(x, y)
def sizeHint(self, which, constraint):
return QtCore.QSizeF(self.getNodeWidth(), self.getNodeHeight())
def getImageDrawRect(self):
topRight = self.boundingRect().topRight()
topRight.setY(-12)
topRight.setX(self.boundingRect().width() - 12)
r = self.boundingRect()
r.setWidth(24)
r.setHeight(24)
r.translate(topRight)
return r
def onChangeColor(self, label=False):
res = QColorDialog.getColor(self.color, None, 'Node color setup')
if res.isValid():
res.setAlpha(80)
self.color = res
if label:
self.update()
def updateNodeHeaderColor(self):
if self.headColorOverride is None:
if self.isCallable():
self.headColor = NodeDefaults().CALLABLE_NODE_HEAD_COLOR
else:
self.headColor = NodeDefaults().PURE_NODE_HEAD_COLOR
else:
self.headColor = self.headColorOverride
def postCreate(self, jsonTemplate=None):
self.updateNodeHeaderColor()
# create ui pin wrappers
for i in self._rawNode.getOrderedPins():
self._createUIPinWrapper(i)
self.updateNodeShape()
self.setPos(self._rawNode.x, self._rawNode.y)
if self._rawNode.graph is None:
print(self._rawNode.getName())
assert(self._rawNode.graph() is not None), "NODE GRAPH IS NONE"
if self.canvasRef is not None:
if self.canvasRef().graphManager.activeGraph() != self._rawNode.graph():
self.hide()
if not self.drawlabel:
self.nodeNameWidget.hide()
self.createActionButtons()
headerHtml = self.name
if jsonTemplate is not None and jsonTemplate["wrapper"] is not None:
if "exposeInputsToCompound" in jsonTemplate["wrapper"]:
self.setExposePropertiesToCompound(jsonTemplate["wrapper"]["exposeInputsToCompound"])
if "collapsed" in jsonTemplate["wrapper"]:
self.collapsed = jsonTemplate["wrapper"]["collapsed"]
if "headerHtml" in jsonTemplate["wrapper"]:
headerHtml = jsonTemplate["wrapper"]["headerHtml"]
if "groups" in jsonTemplate["wrapper"]:
try:
for groupName, expanded in jsonTemplate["wrapper"]["groups"]["input"].items():
self.groups["input"][groupName].setExpanded(expanded)
for groupName, expanded in jsonTemplate["wrapper"]["groups"]["output"].items():
self.groups["output"][groupName].setExpanded(expanded)
except Exception as e:
pass
description = self.description()
if self.isDeprecated():
description = self.deprecationMessage()
if description:
self.setToolTip("%s\nComputingTime: %s"%(rst2html(self.description()),self._rawNode._computingTime))
else:
self.setToolTip("\nComputingTime: %s"%self._rawNode._computingTime)
if self.resizable:
w = self.getNodeWidth()
h = self.getNodeHeight()
if jsonTemplate is not None:
if "resize" in jsonTemplate["wrapper"]:
w = jsonTemplate["wrapper"]["resize"]["w"]
h = jsonTemplate["wrapper"]["resize"]["h"]
self._rect.setWidth(w)
self._rect.setHeight(h)
self.updateNodeShape()
self.setHeaderHtml(headerHtml)
def getMetaData(self):
return self._rawNode.getMetaData()
def createActionButtons(self):
# NOTE: actions with action button class specified will be added next to node name
for action in self._menu.actions():
actionData = action.data()
if isinstance(actionData, NodeActionButtonInfo):
actionButtonClass = actionData.actionButtonClass()
svgFilePath = actionData.filePath()
if actionButtonClass is None:
actionButtonClass = NodeActionButtonBase
butt = actionButtonClass(svgFilePath, action, self)
self.exposedActionButtonsLayout.insertItem(0, butt)
self.exposedActionButtonsLayout.setAlignment(butt, QtCore.Qt.AlignRight)
action.setVisible(False)
def addWidget(self, widget):
if not self.hasCustomLayout:
self.nodeLayout.insertItem(1, self.customLayout)
self.hasCustomLayout = True
ProxyWidget = QGraphicsProxyWidget()
ProxyWidget.setWidget(widget)
self.customLayout.addItem(ProxyWidget)
def invalidateNodeLayouts(self):
self.inputsLayout.invalidate()
self.outputsLayout.invalidate()
self.pinsLayout.invalidate()
self.headerLayout.invalidate()
self.exposedActionButtonsLayout.invalidate()
self.nodeLayout.invalidate()
self.customLayout.invalidate()
def isUnderCollapsedComment(self):
if self.owningCommentNode is None:
return False
else:
if self.owningCommentNode.collapsed:
return True
parent = self.owningCommentNode.owningCommentNode
while parent is not None:
upperComment = parent
if upperComment.collapsed:
return True
parent = upperComment.owningCommentNode
return False
def getTopMostOwningCollapsedComment(self):
"""Returns top most owning comment. If bCollapsed=True, it will stop when first collapsed comment is found.
"""
if self.owningCommentNode is None:
return None
# build chain of comments collapse states
topMostComment = self.owningCommentNode
parent = topMostComment.owningCommentNode
chain = OrderedDict()
chain[topMostComment] = topMostComment.collapsed
while parent is not None:
topMostComment = parent
chain[topMostComment] = topMostComment.collapsed
parent = topMostComment.owningCommentNode
last = None
for comment, collapsed in chain.items():
if not comment.isVisible():
continue
if last is not None:
if collapsed + last.collapsed == 1:
topMostComment = last
break
last = comment
else:
last = comment
return topMostComment
def isDeprecated(self):
return self._rawNode.isDeprecated()
def isExperimental(self):
return self._rawNode.isExperimental()
def deprecationMessage(self):
return self._rawNode.deprecationMessage()
def updateOwningCommentNode(self):
if self.owningCommentNode is not None and self.owningCommentNode.collapsed:
return
collidingItems = self.collidingItems(QtCore.Qt.ContainsItemShape)
collidingNodes = set()
for item in collidingItems:
if item.sceneBoundingRect().contains(self.sceneBoundingRect()) and isinstance(item, UINodeBase):
if item.isCommentNode:
collidingNodes.add(item)
owningCommentNode = None
if len(collidingNodes) == 1:
owningCommentNode = list(collidingNodes)[0]
elif len(collidingNodes) > 1:
# find smallest rect
smallest = list(collidingNodes)[0]
for commentNode in collidingNodes:
s1 = smallest.boundingRect().size()
s2 = commentNode.boundingRect().size()
if s1.width() > s2.width() and s1.height() > s2.height():
smallest = commentNode
if self in commentNode.owningNodes:
commentNode.owningNodes.remove(self)
owningCommentNode = smallest
self.owningCommentNode = owningCommentNode
if self.owningCommentNode is not None:
if owningCommentNode._rawNode.graph() == self.canvasRef().graphManager.activeGraph():
self.owningCommentNode.owningNodes.add(self)
def getCollidedNodes(self, bFullyCollided=True, classNameFilters=set()):
collidingItems = self.collidingItems()
collidingNodes = set()
for item in collidingItems:
node = item.topLevelItem()
if bFullyCollided:
if self.sceneBoundingRect().contains(node.sceneBoundingRect()):
if node is not self and isinstance(node, UINodeBase):
if classNameFilters:
if node.__class__.__name__ not in classNameFilters:
continue
if node._rawNode.graph() != self.canvasRef().graphManager.activeGraph():
continue
collidingNodes.add(node)
else:
if node is not self and isinstance(node, UINodeBase):
if classNameFilters:
if node.__class__.__name__ not in classNameFilters:
continue
if node._rawNode.graph() != self.canvasRef().graphManager.activeGraph():
continue
collidingNodes.add(node)
return collidingNodes
def setDirty(self,*args, **kwargs):
self.computing = False
self.dirty = True
self.update()
def setComputing(self,*args, **kwargs):
self.computing = True
self.update()
def setClean(self,*args, **kwargs):
self.computing = False
self.dirty = False
self.update()
def paint(self, painter, option, widget):
NodePainter.default(self, painter, option, widget)
if self.drawLayoutsDebug:
painter.setPen(QtGui.QPen(QtCore.Qt.green, 0.75))
painter.drawRect(self.headerLayout.geometry())
painter.setPen(QtGui.QPen(QtCore.Qt.black, 0.75))
painter.drawRect(self.nodeNameWidget.geometry())
painter.drawRect(self.exposedActionButtonsLayout.geometry())
painter.setPen(QtGui.QPen(QtCore.Qt.red, 0.75))
painter.drawRect(self.pinsLayout.geometry())
painter.setPen(QtGui.QPen(QtCore.Qt.green, 0.75))
painter.drawRect(self.inputsLayout.geometry())
painter.drawRect(self.outputsLayout.geometry())
painter.setPen(QtGui.QPen(QtCore.Qt.blue, 0.75))
painter.drawRect(self.customLayout.geometry())
def shouldResize(self, cursorPos):
result = {"resize": False, "direction": self.resizeDirection}
if self.resizeStrips[0] == 1: # left
result["resize"] = True
result["direction"] = (-1, 0)
if self.resizeStrips[1] == 1: # top
result["resize"] = True
result["direction"] = (0, -1)
if self.resizeStrips[2] == 1: # right
result["resize"] = True
result["direction"] = (1, 0)
if self.resizeStrips[3] == 1: # bottom
result["resize"] = True
result["direction"] = (0, 1)
if self.resizeStrips[4] == 1: # bottom right
result["resize"] = True
result["direction"] = (1, 1)
if self.resizeStrips[5] == 1: # bottom left
result["resize"] = True
result["direction"] = (-1, 1)
if self.resizeStrips[6] == 1: # top left
result["resize"] = True
result["direction"] = (-1, -1)
if self.resizeStrips[7] == 1: # top right
result["resize"] = True
result["direction"] = (1, -1)
return result
def mousePressEvent(self, event):
self.update()
self.mousePressPos = event.pos()
self.pressedCommentNode = self.owningCommentNode
super(UINodeBase, self).mousePressEvent(event)
self.mousePressPos = event.scenePos()
self.origPos = self.pos()
self.initPos = self.pos()
self.initialRect = self.boundingRect()
if self.expanded and self.resizable:
resizeOpts = self.shouldResize(self.mapToScene(event.pos()))
if resizeOpts["resize"]:
self.resizeDirection = resizeOpts["direction"]
self.initialRectWidth = self.initialRect.width()
self.initialRectHeight = self.initialRect.height()
self.setFlag(QGraphicsItem.ItemIsMovable, False)
self.bResize = True
def mouseMoveEvent(self, event):
super(UINodeBase, self).mouseMoveEvent(event)
# resize
if self.bResize:
delta = event.scenePos() - self.mousePressPos
if self.resizeDirection == (-1, 0): # left
posdelta = self.mapToScene(event.pos()) - self.origPos
posdelta2 = self.mapToScene(event.pos()) - self.initPos
newWidth = -posdelta2.x() + self.initialRectWidth
if newWidth > self.minWidth:
self.translate(posdelta.x(), 0)
self.origPos = self.pos()
self._rect.setWidth(newWidth)
self.updateNodeShape()
elif self.resizeDirection == (0, -1): # top
posdelta = self.mapToScene(event.pos()) - self.origPos
posdelta2 = self.mapToScene(event.pos()) - self.initPos
minHeight = -posdelta2.y() + self.initialRectHeight
if minHeight > self.minHeight:
self.translate(0, posdelta.y())
self.origPos = self.pos()
self._rect.setHeight(minHeight)
self.updateNodeShape()
elif self.resizeDirection == (1, 0): # right
newWidth = delta.x() + self.initialRectWidth
if newWidth > self.minWidth:
self._rect.setWidth(newWidth)
self.w = newWidth
self.updateNodeShape()
elif self.resizeDirection == (0, 1): # bottom
newHeight = delta.y() + self.initialRectHeight
if newHeight > self.minHeight:
self._rect.setHeight(newHeight)
self.updateNodeShape()
elif self.resizeDirection == (1, 1): # bottom right
newWidth = delta.x() + self.initialRectWidth
newHeight = delta.y() + self.initialRectHeight
if newWidth > self.minWidth:
self._rect.setWidth(newWidth)
self.w = newWidth
self.updateNodeShape()
if newHeight > self.minHeight:
self._rect.setHeight(newHeight)
self.updateNodeShape()
elif self.resizeDirection == (-1, 1): # bottom left
newHeight = delta.y() + self.initialRectHeight
if newHeight > self.minHeight:
self._rect.setHeight(newHeight)
posdelta = self.mapToScene(event.pos()) - self.origPos
posdelta2 = self.mapToScene(event.pos()) - self.initPos
newWidth = -posdelta2.x() + self.initialRectWidth
if newWidth > self.minWidth:
self.translate(posdelta.x(), 0)
self.origPos = self.pos()
self._rect.setWidth(newWidth)
self.updateNodeShape()
elif self.resizeDirection == (-1, -1): # top left
posdelta = self.mapToScene(event.pos()) - self.origPos
posdelta2 = self.mapToScene(event.pos()) - self.initPos
minHeight = -posdelta2.y() + self.initialRectHeight
if minHeight > self.minHeight:
self.translate(0, posdelta.y())
self.origPos = self.pos()
self._rect.setHeight(minHeight)
newWidth = -posdelta2.x() + self.initialRectWidth
if newWidth > self.minWidth:
self.translate(posdelta.x(), 0)
self.origPos = self.pos()
self._rect.setWidth(newWidth)
self.updateNodeShape()
elif self.resizeDirection == (1, -1): # top right
posdelta = self.mapToScene(event.pos()) - self.origPos
posdelta2 = self.mapToScene(event.pos()) - self.initPos
minHeight = -posdelta2.y() + self.initialRectHeight
if minHeight > self.minHeight:
self.translate(0, posdelta.y())
self.origPos = self.pos()
self._rect.setHeight(minHeight)
newWidth = delta.x() + self.initialRectWidth
if newWidth > self.minWidth:
self._rect.setWidth(newWidth)
self.w = newWidth
self.updateNodeShape()
self.update()
def mouseReleaseEvent(self, event):
self.bResize = False
self.resetResizeStrips()
self.update()
self.updateOwningCommentNode()
if self.owningCommentNode != self.pressedCommentNode:
if self.pressedCommentNode is not None:
if self in self.pressedCommentNode.owningNodes:
self.pressedCommentNode.owningNodes.remove(self)
super(UINodeBase, self).mouseReleaseEvent(event)
def hoverLeaveEvent(self, event):
self.resetResizeStrips()
self.update()
def hoverMoveEvent(self, event):
if self.resizable and not self.collapsed:
height = self.geometry().height()
width = self.geometry().width()
rf = NodeDefaults().CORNERS_ROUND_FACTOR
leftStrip = QtCore.QRectF(0, rf, self.resizeStripsSize, height - rf * 2)
topStrip = QtCore.QRectF(rf, 0, width - rf * 2, self.resizeStripsSize)
rightStrip = QtCore.QRectF(width - self.resizeStripsSize, rf, self.resizeStripsSize, height - rf * 2)
bottomStrip = QtCore.QRectF(rf, height - self.resizeStripsSize, width - rf * 2, self.resizeStripsSize)
bottomRightStrip = QtCore.QRectF(width - rf, height - rf, rf, rf)
bottomLeftStrip = QtCore.QRectF(0, height - rf, rf, rf)
topLeftStrip = QtCore.QRectF(0, 0, rf, rf)
topRightStrip = QtCore.QRectF(width - rf, 0, rf, rf)
# detect where on the node
self.resizeStrips[0] = 1 if leftStrip.contains(event.pos()) else 0
self.resizeStrips[1] = 1 if topStrip.contains(event.pos()) else 0
self.resizeStrips[2] = 1 if rightStrip.contains(event.pos()) else 0
self.resizeStrips[3] = 1 if bottomStrip.contains(event.pos()) else 0
self.resizeStrips[4] = 1 if bottomRightStrip.contains(event.pos()) else 0
self.resizeStrips[5] = 1 if bottomLeftStrip.contains(event.pos()) else 0
self.resizeStrips[6] = 1 if topLeftStrip.contains(event.pos()) else 0
self.resizeStrips[7] = 1 if topRightStrip.contains(event.pos()) else 0
self.update()
def contextMenuEvent(self, event):
self._menu.exec_(event.screenPos())
def clone(self):
templ = self.serialize()
templ['name'] = self.name
templ['uuid'] = str(uuid.uuid4())
for inp in templ['inputs']:
inp['uuid'] = str(uuid.uuid4())
for out in templ['outputs']:
out['uuid'] = str(uuid.uuid4())
new_node = self.canvasRef().createNode(templ)
return new_node
def createPropertiesWidget(self, propertiesWidget):
baseCategory = CollapsibleFormWidget(headName="Base")
le_name = QLineEdit(self.getName())
le_name.setReadOnly(True)
baseCategory.addWidget("Name", le_name)
leUid = QLineEdit(str(self._rawNode.graph().name))
leUid.setReadOnly(True)
baseCategory.addWidget("Owning graph", leUid)
text = "{0}".format(self.packageName)
if self._rawNode.lib:
text += " | {0}".format(self._rawNode.lib)
text += " | {0}".format(self._rawNode.__class__.__name__)
leType = QLineEdit(text)
leType.setReadOnly(True)
baseCategory.addWidget("Type", leType)
propertiesWidget.addWidget(baseCategory)
inputsCategory = CollapsibleFormWidget(headName="Inputs")
self.createInputWidgets(inputsCategory)
if inputsCategory.Layout.count() > 0:
propertiesWidget.addWidget(inputsCategory)
Info = CollapsibleFormWidget(headName="Info", collapsed=True, hideLabels=True)
doc = QTextBrowser()
doc.setOpenExternalLinks(True)
doc.setHtml(rst2html(self.description()))
Info.addWidget(widget=doc)
propertiesWidget.addWidget(Info)
def createInputWidgets(self, inputsCategory, inGroup=None, pins=True):
# inputs
if len([i for i in self.UIinputs.values()]) != 0:
sortedInputs = self.UIinputs.values()
for inp in sortedInputs:
if inp.isArray() or inp.isDict() or inp._rawPin.hidden:
continue
dataSetter = inp.call if inp.isExec() else inp.setData
w = createInputWidget(inp.dataType, dataSetter, inp.defaultValue(), inp.getInputWidgetVariant(), pinAnnotations=inp._rawPin.annotationDescriptionDict)
if w:
w.setToolTip(inp.description)
inp._rawPin.dataBeenSet.connect(w.setWidgetValueNoSignals)
w.blockWidgetSignals(True)
data = inp.currentData()
if isinstance(inp.currentData(), DictElement):
data = inp.currentData()[1]
w.setWidgetValue(data)
w.blockWidgetSignals(False)
w.setObjectName(inp.getFullName())
group = inGroup
if inGroup is None:
group = inp._rawPin.group
inputsCategory.addWidget(inp.name, w, group=group)
if inp.hasConnections():
w.setEnabled(False)
return inputsCategory
def createOutputWidgets(self, inputsCategory, headName="Outputs"):
sortedInputs = sorted(self.UIPins.values(), key=lambda x: x.name)
for inp in sortedInputs:
if inp.isArray() or inp.isDict() or inp._rawPin.hidden:
continue
dataSetter = inp.call if inp.isExec() else inp.setData
w = createInputWidget(inp.dataType, dataSetter, inp.defaultValue(), inp.getInputWidgetVariant(), pinAnnotations=inp._rawPin.annotationDescriptionDict)
if w:
w.setToolTip(inp.description)
inp._rawPin.dataBeenSet.connect(w.setWidgetValueNoSignals)
w.blockWidgetSignals(True)
data = inp.currentData()
if isinstance(inp.currentData(), DictElement):
data = inp.currentData()[1]
w.setWidgetValue(data)
w.blockWidgetSignals(False)
w.setObjectName(inp.getFullName())
inputsCategory.addWidget(inp.name, w)
return inputsCategory
def getChainedNodes(self):
nodes = []
for pin in self.UIinputs.values():
for connection in pin.connections:
node = connection.source().topLevelItem() # topLevelItem
nodes.append(node)
nodes += node.getChainedNodes()
return nodes
def getBetwenLoopNodes(self, orig, bVisibleOnly=True):
nodes = []
for pin in self.UIoutputs.values():
for connection in pin.connections:
node = connection.destination().topLevelItem() # topLevelItem
if node._rawNode.__class__.__name__ != "loopEnd":
if bVisibleOnly:
if node.isVisible():
nodes.append(node)
else:
nodes.append(node)
nodes += node.getBetwenLoopNodes(orig)
else:
if node._rawNode.loopBeginNode.getData() != orig.path():
nodes.append(node)
nodes += node.getBetwenLoopNodes(orig)
return nodes
def collidesWithCommentNode(self):
nodes = self.getCollidedNodes()
result = None
for n in nodes:
if n.isCommentNode:
result = n
break
return result
def resetResizeStrips(self):
for i in range(len(self.resizeStrips)):
self.resizeStrips[i] = 0
def kill(self, *args, **kwargs):
scene = self.scene()
if scene is not None:
self.scene().removeItem(self)
del(self)
def shoutDown(self):
pass
def heartBeat(self):
for pin in self.UIPins.values():
pin.heartBeat()
def Tick(self, delta, *args, **kwargs):
# NOTE: Do not call wrapped raw node Tick method here!
# this ui node tick called from underlined raw node's emitted signal
# do here only UI stuff
self.heartBeatTimeDelta += delta
if self.heartBeatTimeDelta >= self.heartBeatDelay:
self.heartBeat()
self.heartBeatTimeDelta = 0.0
def _createUIPinWrapper(self, rawPin, index=-1, group=None, linkedPin=None):
wrapper = rawPin.getWrapper()
if wrapper is not None:
return wrapper()
p = getUIPinInstance(self, rawPin)
p.call = rawPin.call
grpItem = None
if rawPin.group != "":
if rawPin.direction == PinDirection.Input:
if rawPin.group not in self.groups["input"]:
grpItem = PinGroup(self, rawPin.direction, rawPin.group)
self.inputsLayout.addItem(grpItem)
elif rawPin.group in self.groups["input"]:
grpItem = self.groups["input"][rawPin.group]
if rawPin.direction == PinDirection.Output:
if rawPin.group not in self.groups["output"]:
grpItem = PinGroup(self, rawPin.direction, rawPin.group)
self.outputsLayout.addItem(grpItem)
elif rawPin.group in self.groups["output"]:
grpItem = self.groups["output"][rawPin.group]
name = rawPin.name
lblName = name
if rawPin.direction == PinDirection.Input:
insertionIndex = -1
if grpItem is not None:
self.groups["input"][rawPin.group] = grpItem
insertionIndex = findItemIndex(self.inputsLayout, grpItem) + grpItem.numPins() + 1
self.inputsLayout.setAlignment(grpItem, QtCore.Qt.AlignLeft)
grpItem.addPin(p)
self.inputsLayout.insertItem(insertionIndex, p)
self.inputsLayout.setAlignment(p, QtCore.Qt.AlignLeft)
self.inputsLayout.invalidate()
elif rawPin.direction == PinDirection.Output:
insertionIndex = -1
if grpItem is not None:
self.groups["output"][rawPin.group] = grpItem
insertionIndex = findItemIndex(self.outputsLayout, grpItem) + grpItem.numPins() + 1
self.outputsLayout.setAlignment(grpItem, QtCore.Qt.AlignRight)
grpItem.addPin(p)
self.outputsLayout.insertItem(insertionIndex, p)
self.outputsLayout.setAlignment(p, QtCore.Qt.AlignRight)
self.outputsLayout.invalidate()
p.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.update()
self.updateNodeShape()
p.syncDynamic()
p.syncRenamable()
if self.collapsed:
p.hide()
return p
@staticmethod
def removePinByName(node, name):
pin = node.getPinSG(name)
if pin:
pin.kill()
@staticmethod
def recreate(node):
templ = node.serialize()
uid = node.uid
node.kill()
newNode = node.canvas.createNode(templ)
newNode.uid = uid
return newNode
def REGISTER_UI_NODE_FACTORY(packageName, factory):
if packageName not in UI_NODES_FACTORIES:
UI_NODES_FACTORIES[packageName] = factory
def getUINodeInstance(raw_instance):
# through here to generate all node add UI
packageName = raw_instance.packageName
instance = None
if packageName in UI_NODES_FACTORIES:
return UI_NODES_FACTORIES[packageName](raw_instance)
else:
return UINodeBase(raw_instance)
| 39.160182
| 166
| 0.617057
|
b47712b4c603bfdd1d92e3a2e807eab94361be86
| 2,136
|
py
|
Python
|
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.front.mxnet.ssd_pattern_flatten_softmax_activation import SsdPatternFlattenSoftmaxActivation
from extensions.front.mxnet.ssd_pattern_remove_flatten import SsdPatternRemoveFlatten
from extensions.front.mxnet.ssd_pattern_remove_reshape import SsdPatternRemoveReshape
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.graph.graph import Graph
class SsdPatternRemoveTranspose(FrontReplacementSubgraph):
enabled = True
def run_before(self):
return [SsdPatternFlattenSoftmaxActivation, SsdPatternRemoveFlatten, SsdPatternRemoveReshape]
def pattern(self):
return dict(
nodes=[
('transpose', dict(op='Transpose')),
('softmax_activation', dict(op='SoftMax')),
('multi_box_detection', dict(op='_contrib_MultiBoxDetection'))
],
edges=[
('transpose', 'softmax_activation', {'in': 0}),
('softmax_activation', 'multi_box_detection', {'in': 1}),
]
)
def replace_sub_graph(self, graph: Graph, match: dict):
"""
Need to find each occurrence of pattern:
transpose -> SoftmaxActivation -> _contrib_MultiBoxDetection
remove transpose layer to secure the order of weights in SoftMax to be the same as IE expects
IE expects weights to be in following order: class-wise values for each priorbox.
priorboxes change the quickest
Parameters
----------
graph : Graph
Graph with loaded model.
match : dict
Patterns which were found in graph structure.
"""
transpose_node = match['transpose']
softmax_activation = match['softmax_activation']
transpose_in_node = transpose_node.in_node(0)
graph.remove_edge(transpose_in_node.id, transpose_node.id)
graph.remove_edge(transpose_node.id, softmax_activation.id)
graph.remove_node(transpose_node.id)
graph.create_edge(transpose_in_node, softmax_activation)
| 40.301887
| 108
| 0.685393
|
3224544515d65192fba57bbfb73bb3200a0d500e
| 23,769
|
py
|
Python
|
test/functional/test_framework/test_framework.py
|
krewshul/Auroracoin
|
e0e3323e992b631606dd7bd8b75edb068dad76a9
|
[
"MIT"
] | 1
|
2021-04-05T20:07:32.000Z
|
2021-04-05T20:07:32.000Z
|
test/functional/test_framework/test_framework.py
|
krewshul/Auroracoin
|
e0e3323e992b631606dd7bd8b75edb068dad76a9
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
krewshul/Auroracoin
|
e0e3323e992b631606dd7bd8b75edb068dad76a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "auroracoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class AuroracoinTestMetaClass(type):
"""Metaclass for AuroracoinTestFramework.
Ensures that any attempt to register a subclass of `AuroracoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'AuroracoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("AuroracoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("AuroracoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class AuroracoinTestFramework(metaclass=AuroracoinTestMetaClass):
"""Base class for a auroracoin test script.
Individual auroracoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.mocktime = 0
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave digibyteds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop digibyteds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use digibyte-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.digibyted = os.getenv("DIGIBYTED", default=config["environment"]["BUILDDIR"] + '/src/digibyted' + config["environment"]["EXEEXT"])
self.options.digibytecli = os.getenv("DIGIBYTECLI", default=config["environment"]["BUILDDIR"] + '/src/digibyte-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: digibyteds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.digibyted] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
rpchost=rpchost,
timewait=self.rpc_timeout,
auroracoind=binary[i],
auroracoin_cli=self.options.auroracoincli,
mocktime=self.mocktime,
coverage_dir=self.options.coveragedir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
))
def start_node(self, i, *args, **kwargs):
"""Start a digibyted"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple digibyteds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop an auroracoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple auroracoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as digibyted's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("DigiByteRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run auroracoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.auroracoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"],
extra_args=[],
rpchost=None,
timewait=self.rpc_timeout,
auroracoind=self.options.auroracoind,
auroracoin_cli=self.options.auroracoincli,
mocktime=self.mocktime,
coverage_dir=None,
))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# For backward compatibility of the python scripts with previous
# versions of the cache, set mocktime to Jan 1,
# 2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generatetoaddress(1, self.nodes[peer].get_deterministic_priv_key().address)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.mocktime = 0
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in digibyte.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_digibyted_zmq(self):
"""Skip the running test if digibyted has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("digibyted has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if digibyte-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("digibyte-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether digibyte-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
| 41.409408
| 312
| 0.62144
|
18343ff0759e4173734193d8fad780c280807cc1
| 1,894
|
py
|
Python
|
components/handlers/star_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-30T17:59:08.000Z
|
2017-04-30T17:59:08.000Z
|
components/handlers/star_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 87
|
2017-02-13T09:06:13.000Z
|
2017-04-14T09:23:08.000Z
|
components/handlers/star_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-11T05:26:00.000Z
|
2017-04-11T05:26:00.000Z
|
'''
This module handles starring of modules.
'''
import web
from app import RENDER
from components import model, session
class StarModule(object):
'''
Class handles starring and unstarring of modules.
'''
def GET(self):
'''
This function is called when /starModule is accessed.
'''
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
else:
module_code = web.input().code
action = web.input().action
return_path = web.input().return_path
# modify return path if individual module info to include aySem
if return_path == '/individualModuleInfo':
target_ay = web.input().aysem
return_path = return_path + '?code=' + module_code + '&aysem=' + target_ay
if action == "star":
model.star_module(module_code, web.cookies().get('user'))
else:
model.unstar_module(module_code, web.cookies().get('user'))
raise web.seeother(return_path)
class StarredModulesList(object):
'''
Class handles showing of starredModules
'''
URL_THIS_PAGE = '/starredModules'
def GET(self):
'''
This function is called when /starredModules is accessed.
'''
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
else:
starred_module_infos = model.get_starred_modules(web.cookies().get('user'))
return RENDER.starredModulesListing(starred_module_infos)
| 33.22807
| 90
| 0.594509
|
14264dde075d2904ecf683831238b50bac31c5b3
| 11,668
|
py
|
Python
|
gym_cooking/environment/cooking_zoo.py
|
Adelynrayne/gym-cooking
|
0219259b39419712d6616b6733eb98f789c8a5f6
|
[
"MIT"
] | null | null | null |
gym_cooking/environment/cooking_zoo.py
|
Adelynrayne/gym-cooking
|
0219259b39419712d6616b6733eb98f789c8a5f6
|
[
"MIT"
] | null | null | null |
gym_cooking/environment/cooking_zoo.py
|
Adelynrayne/gym-cooking
|
0219259b39419712d6616b6733eb98f789c8a5f6
|
[
"MIT"
] | null | null | null |
# Other core modules
import copy
from gym_cooking.cooking_world.cooking_world import CookingWorld
from gym_cooking.cooking_world.world_objects import *
from gym_cooking.cooking_book.recipe_drawer import RECIPES, NUM_GOALS
import numpy as np
from collections import namedtuple, defaultdict
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector
from pettingzoo.utils import wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
import gym
CollisionRepr = namedtuple("CollisionRepr", "time agent_names agent_locations")
COLORS = ['blue', 'magenta', 'yellow', 'green']
def env(level, num_agents, record, max_steps, recipes, obs_spaces):
"""
The env function wraps the environment in 3 wrappers by default. These
wrappers contain logic that is common to many pettingzoo environments.
We recommend you use at least the OrderEnforcingWrapper on your own environment
to provide sane error messages. You can find full documentation for these methods
elsewhere in the developer documentation.
"""
env_init = CookingEnvironment(level, num_agents, record, max_steps, recipes, obs_spaces)
env_init = wrappers.CaptureStdoutWrapper(env_init)
env_init = wrappers.AssertOutOfBoundsWrapper(env_init)
env_init = wrappers.OrderEnforcingWrapper(env_init)
return env_init
parallel_env = parallel_wrapper_fn(env)
class CookingEnvironment(AECEnv):
"""Environment object for Overcooked."""
metadata = {'render.modes': ['human'], 'name': "cooking_zoo"}
def __init__(self, level, num_agents, record, max_steps, recipes, obs_spaces=["numeric"], allowed_objects=None):
super().__init__()
self.allowed_obs_spaces = ["symbolic", "numeric"]
assert len(set(obs_spaces + self.allowed_obs_spaces)) == 2, \
f"Selected invalid obs spaces. Allowed {self.allowed_obs_spaces}"
assert len(obs_spaces) != 0, f"Please select an observation space from: {self.allowed_obs_spaces}"
self.obs_spaces = obs_spaces
self.allowed_objects = allowed_objects or []
self.possible_agents = ["player_" + str(r) for r in range(num_agents)]
self.agents = self.possible_agents[:]
self.level = level
self.record = record
self.max_steps = max_steps
self.t = 0
self.filename = ""
self.set_filename()
self.world = CookingWorld()
self.recipes = recipes
self.game = None
self.recipe_graphs = [RECIPES[recipe]() for recipe in recipes]
self.termination_info = ""
self.world.load_level(level=self.level, num_agents=num_agents)
self.graph_representation_length = sum([tup[1] for tup in GAME_CLASSES_STATE_LENGTH])
numeric_obs_space = {'symbolic_observation': gym.spaces.Box(low=0, high=10,
shape=(self.world.width, self.world.height,
self.graph_representation_length), dtype=np.int32),
'agent_location': gym.spaces.Box(low=0, high=max(self.world.width, self.world.height),
shape=(2,)),
'goal_vector': gym.spaces.MultiBinary(NUM_GOALS)}
self.observation_spaces = {agent: gym.spaces.Dict(numeric_obs_space) for agent in self.possible_agents}
self.action_spaces = {agent: gym.spaces.Discrete(6) for agent in self.possible_agents}
self.has_reset = True
self.recipe_mapping = dict(zip(self.possible_agents, self.recipe_graphs))
self.agent_name_mapping = dict(zip(self.possible_agents, list(range(len(self.possible_agents)))))
self.world_agent_mapping = dict(zip(self.possible_agents, self.world.agents))
self.world_agent_to_env_agent_mapping = dict(zip(self.world.agents, self.possible_agents))
self.agent_selection = None
self._agent_selector = agent_selector(self.agents)
self.done = False
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.accumulated_actions = []
self.current_tensor_observation = np.zeros((self.world.width, self.world.height,
self.graph_representation_length))
def set_filename(self):
self.filename = f"{self.level}_agents{self.num_agents}"
def state(self):
pass
def reset(self):
self.world = CookingWorld()
self.t = 0
# For tracking data during an episode.
self.termination_info = ""
# Load world & distances.
self.world.load_level(level=self.level, num_agents=self.num_agents)
for recipe in self.recipe_graphs:
recipe.update_recipe_state(self.world)
# if self.record:
# self.game = GameImage(
# filename=self.filename,
# world=self.world,
# record=self.record)
# self.game.on_init()
# self.game.save_image_obs(self.t)
# else:
# self.game = None
self.agents = self.possible_agents[:]
self._agent_selector.reinit(self.agents)
self.agent_selection = self._agent_selector.next()
# Get an image observation
# image_obs = self.game.get_image_obs()
self.recipe_mapping = dict(zip(self.possible_agents, self.recipe_graphs))
self.agent_name_mapping = dict(zip(self.possible_agents, list(range(len(self.possible_agents)))))
self.world_agent_mapping = dict(zip(self.possible_agents, self.world.agents))
self.world_agent_to_env_agent_mapping = dict(zip(self.world.agents, self.possible_agents))
self.current_tensor_observation = self.get_tensor_representation()
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.accumulated_actions = []
def close(self):
return
def step(self, action):
agent = self.agent_selection
self.accumulated_actions.append(action)
for idx, agent in enumerate(self.agents):
self.rewards[agent] = 0
if self._agent_selector.is_last():
self.accumulated_step(self.accumulated_actions)
self.accumulated_actions = []
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
def accumulated_step(self, actions):
# Track internal environment info.
self.t += 1
# translated_actions = [action_translation_dict[actions[f"player_{idx}"]] for idx in range(len(actions))]
self.world.perform_agent_actions(self.world.agents, actions)
# Visualize.
if self.record:
self.game.on_render()
if self.record:
self.game.save_image_obs(self.t)
# Get an image observation
# image_obs = self.game.get_image_obs()
self.current_tensor_observation = self.get_tensor_representation()
info = {"t": self.t, "termination_info": self.termination_info}
done, rewards, goals = self.compute_rewards()
for idx, agent in enumerate(self.agents):
self.dones[agent] = done
self.rewards[agent] = rewards[idx]
self.infos[agent] = info
def observe(self, agent):
observation = []
if "numeric" in self.obs_spaces:
num_observation = {'symbolic_observation': self.current_tensor_observation,
'agent_location': np.asarray(self.world_agent_mapping[agent].location, np.int32),
'goal_vector': self.recipe_mapping[agent].goals_completed(NUM_GOALS)}
observation.append(num_observation)
if "symbolic" in self.obs_spaces:
objects = defaultdict(list)
objects.update(self.world.world_objects)
objects["Agent"] = self.world.agents
sym_observation = copy.deepcopy(objects)
observation.append(sym_observation)
returned_observation = observation if not len(observation) == 1 else observation[0]
return returned_observation
def compute_rewards(self):
done = False
rewards = [0] * len(self.recipes)
open_goals = [[0]] * len(self.recipes)
# Done if the episode maxes out
if self.t >= self.max_steps and self.max_steps:
self.termination_info = f"Terminating because passed {self.max_steps} timesteps"
done = True
for idx, recipe in enumerate(self.recipe_graphs):
goals_before = recipe.goals_completed(NUM_GOALS)
recipe.update_recipe_state(self.world)
open_goals[idx] = recipe.goals_completed(NUM_GOALS)
bonus = recipe.completed() * 0.1
rewards[idx] = (sum(goals_before) - sum(open_goals[idx]) + bonus) * 10
if rewards[idx] < 0:
print(f"Goals before: {goals_before}")
print(f"Goals after: {open_goals}")
if all((recipe.completed() for recipe in self.recipe_graphs)):
self.termination_info = "Terminating because all deliveries were completed"
done = True
return done, rewards, open_goals
def get_tensor_representation(self):
tensor = np.zeros((self.world.width, self.world.height, self.graph_representation_length))
objects = defaultdict(list)
objects.update(self.world.world_objects)
idx = 0
for game_class in GAME_CLASSES:
if game_class is Agent:
continue
for obj in objects[ClassToString[game_class]]:
x, y = obj.location
tensor[x, y, idx] += 1
idx += 1
for stateful_class in STATEFUL_GAME_CLASSES:
if issubclass(game_class, stateful_class):
n = 1
for obj in objects[ClassToString[game_class]]:
representation = self.handle_stateful_class_representation(obj, stateful_class)
n = len(representation)
x, y = obj.location
for i in range(n):
tensor[x, y, idx + i] += representation[i]
idx += n
for agent in self.world.agents:
x, y = agent.location
tensor[x, y, idx] = 1
tensor[x, y, idx + 1] = 1 if agent.orientation == 1 else 0
tensor[x, y, idx + 2] = 1 if agent.orientation == 2 else 0
tensor[x, y, idx + 3] = 1 if agent.orientation == 3 else 0
tensor[x, y, idx + 4] = 1 if agent.orientation == 4 else 0
return tensor
def get_agent_names(self):
return [agent.name for agent in self.world.agents]
def render(self, mode='human'):
pass
@staticmethod
def handle_stateful_class_representation(obj, stateful_class):
if stateful_class is ChopFood:
return [int(obj.chop_state == ChopFoodStates.CHOPPED)]
if stateful_class is BlenderFood:
return [obj.current_progress]
raise ValueError(f"Could not process stateful class {stateful_class}")
| 43.700375
| 118
| 0.634128
|
5f10a276c43fd747e05834367becdb437c106f81
| 475
|
py
|
Python
|
arachne/hdl/xilinx/ps8/resources/uart.py
|
shrine-maiden-heavy-industries/arachne
|
1d0320bf6e77653656f8ce1874900743452dbac4
|
[
"BSD-3-Clause"
] | 3
|
2021-09-13T20:23:42.000Z
|
2022-01-19T13:12:32.000Z
|
arachne/hdl/xilinx/ps8/resources/uart.py
|
shrine-maiden-heavy-industries/arachne
|
1d0320bf6e77653656f8ce1874900743452dbac4
|
[
"BSD-3-Clause"
] | null | null | null |
arachne/hdl/xilinx/ps8/resources/uart.py
|
shrine-maiden-heavy-industries/arachne
|
1d0320bf6e77653656f8ce1874900743452dbac4
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import *
from amaranth.build import *
from .common import PS8Resource, MIOSet
__all__ = (
'UARTResource',
)
class UARTResource(PS8Resource):
name = 'uart'
claimable_mio = [ ]
def __init__(self, num, mio_set):
super().__init__(num, 1, mio_set, True)
def used_mio(self, **kwargs):
raise NotImplementedError # :nocov:
def generate_mapping(self, **kwargs):
raise NotImplementedError # :nocov:
| 19.791667
| 46
| 0.703158
|
7bbe1493d10cd833b901d5e3efb852b3eaf807ea
| 7,437
|
py
|
Python
|
h/presenters/document_html.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
h/presenters/document_html.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
h/presenters/document_html.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
from urllib.parse import unquote, urlparse
import jinja2
class DocumentHTMLPresenter:
"""Wraps Document model objects and adds some HTML properties."""
def __init__(self, document):
self.document = document
@property
def filename(self):
"""
Return the filename of this document, or ''.
If the document's URI is a file:// URI then return the filename part
of it, otherwise return ''.
The filename is escaped and safe to be rendered.
If it contains escaped characters then the filename will be a
Markup object so it won't be double-escaped.
"""
if self.uri.lower().startswith("file:///"):
return jinja2.escape(self.uri.split("/")[-1])
return ""
@property
def href(self):
"""
Return an href for this document, or ''.
Returns a value suitable for use as the value of the href attribute in
an <a> element in an HTML document.
Returns an empty string if the document doesn't have an http(s):// URI.
The href is escaped and safe to be rendered.
If it contains escaped characters the returned value will be a
Markup object so that it doesn't get double-escaped.
"""
if self.document.web_uri:
return jinja2.escape(self.document.web_uri)
return ""
@property
def hostname_or_filename(self):
"""
Return the hostname or filename of this document.
Returns the hostname part of the document's URI, e.g.
'www.example.com' for 'http://www.example.com/example.html'.
If the URI is a file:// URI then return the filename part of it
instead.
The returned hostname or filename is escaped and safe to be rendered.
If it contains escaped characters the returned value will be a Markup
object so that it doesn't get double-escaped.
"""
if self.filename:
return jinja2.escape(unquote(self.filename))
hostname = urlparse(self.uri).hostname
# urlparse()'s .hostname is sometimes None.
hostname = hostname or ""
return jinja2.escape(hostname)
@property
def link(self):
"""
Return a link to this document.
Returns HTML strings like:
<a href="{href}" title="{title}">{link_text}</a> {hostname}
<em>Local file:</em> {title}<br>{hostname}
where:
- {href} is the uri of the document, if it has an http(s):// uri
- {title} is the title of the document.
If the document has no title then its uri will be used instead.
If it's a local file:// uri then only the filename part is used,
not the full path.
- {link_text} is the same as {title}, but truncated with … if
it's too long
- {hostname} is the hostname name of the document's uri without
the scheme (http(s)://) and www parts, e.g. 'example.com'.
If it's a local file:// uri then the filename is used as the
hostname.
If the hostname is too long it is truncated with ….
The {hostname} part will be missing if it wouldn't be any different
from the {link_text} part.
The href="{href}" will be missing if there's no http(s) uri to link to
for this annotation's document.
User-supplied values are escaped so the string is safe for raw
rendering (the returned string is actually a Markup object and
won't be escaped by Jinja2 when rendering).
"""
return _format_document_link(
self.href, self.title, self.link_text, self.hostname_or_filename
)
@property
def link_text(self):
"""
Return some link text for this document.
Return a text representation of this document suitable for use as the
link text in a link like <a ...>{link_text}</a>.
Returns the document's title if it has one, or failing that uses part
of the document's URI if it has one.
The link text is escaped and safe for rendering.
If it contains escaped characters the returned value will be a
Markup object so it doesn't get double-escaped.
"""
title = jinja2.escape(self.title)
# Sometimes self.title is the annotated document's URI (if the document
# has no title). In those cases we want to remove the http(s):// from
# the front and unquote it for link text.
lower = title.lower()
if lower.startswith("http://") or lower.startswith("https://"):
parts = urlparse(title)
return unquote(parts.netloc + parts.path)
return title
@property
def title(self):
"""
Return a title for this document.
Return the document's title or if the document has no title then return
its filename (if it's a file:// URI) or its URI for non-file URIs.
The title is escaped and safe to be rendered.
If it contains escaped characters then the title will be a
Markup object, so that it won't be double-escaped.
"""
title = self.document.title
if title:
# Convert non-string titles into strings.
# We're assuming that title cannot be a byte string.
title = str(title)
return jinja2.escape(title)
if self.filename:
return jinja2.escape(unquote(self.filename))
return jinja2.escape(unquote(self.uri))
@property
def uri(self):
if self.document.document_uris:
return jinja2.escape(self.document.document_uris[0].uri)
return ""
@property
def web_uri(self):
via_prefix = "https://via.hypothes.is/"
web_uri = self.document.web_uri
if web_uri and web_uri != via_prefix and web_uri.startswith(via_prefix):
web_uri = web_uri[len(via_prefix) :]
return web_uri
def _format_document_link(href, title, link_text, host_or_filename):
"""Return a document link for the given components.
Helper function for the .document_link property below.
:returns: A document link as an HTML string, escaped and safe for
rendering. The returned string is a Markup object so that it won't be
double-escaped.
"""
if href and host_or_filename and host_or_filename in link_text:
host_or_filename = ""
elif not href and title == host_or_filename:
title = ""
def truncate(content, length=55):
"""Truncate the given string to at most length chars."""
if len(content) <= length:
return content
return content[:length] + jinja2.Markup("…")
host_or_filename = truncate(host_or_filename)
link_text = truncate(link_text)
if href and host_or_filename:
link = '<a href="{href}" title="{title}">{link_text}</a><br>{host_or_filename}'
elif href:
link = '<a href="{href}" title="{title}">{link_text}</a>'
else:
link = "<em>Local file:</em> {title}"
if host_or_filename:
link += "<br>{host_or_filename}"
link = link.format(
href=jinja2.escape(href),
title=jinja2.escape(title),
link_text=jinja2.escape(link_text),
host_or_filename=jinja2.escape(host_or_filename),
)
return jinja2.Markup(link)
| 32.056034
| 87
| 0.622159
|
58e40d3e26a6257895e3b3e365885a7d9a9d7b24
| 1,183
|
py
|
Python
|
utils/parsers/calendar_parser.py
|
Imperat/ADFS_managers
|
10bedae2be3c443d141941c710f05a8a60c3cdfd
|
[
"Apache-2.0"
] | 11
|
2017-07-05T20:03:00.000Z
|
2018-09-19T17:18:26.000Z
|
utils/parsers/calendar_parser.py
|
Imperat/ADFS_managers
|
10bedae2be3c443d141941c710f05a8a60c3cdfd
|
[
"Apache-2.0"
] | 50
|
2016-07-15T16:21:03.000Z
|
2018-04-17T11:18:06.000Z
|
utils/parsers/calendar_parser.py
|
Imperat/ADFS_managers
|
10bedae2be3c443d141941c710f05a8a60c3cdfd
|
[
"Apache-2.0"
] | 6
|
2017-06-23T14:53:38.000Z
|
2022-01-03T12:38:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def parse_calendar(filename):
calendar = open('./utils/parsers/datas/primary_2014.txt', 'r')
tour,res = 0, []
for line in calendar:
if 'тур' in line:
tour = line.split(' ')[0]
teams = line.split(' - ')
if len(teams) > 1:
res.append([tour, teams[0], teams[1][0:-1]])
return res
def add_data():
a = parse_calendar('kefic')
from teamlogic.models import MatchInLeague, Team
for line in a:
home = Team.objects.filter(name=line[1].capitalize())
away = Team.objects.filter(name=line[2].capitalize())
if len(home) > 0:
home = home[0]
else:
print('can not find home team! <<%s>>' % line[1].capitalize())
continue
if len(away) > 0:
away = away[0]
else:
print ('can not find away team! <<%s>>' % line[2].capitalize())
continue
print('add data: (%s, %s, %s)' %(home, away, line[0]))
print(home, away, 'RASIM', int(line[0]))
MatchInLeague.objects.create(
league_id=8, home=home, away=away, tour=int(line[0]))
| 28.166667
| 75
| 0.530008
|
cbf1adec42fb6c5c7162726c8b23e48063413a2d
| 647
|
py
|
Python
|
examples/pybullet/gym/pybullet_utils/examples/multipleScenes.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 9,136
|
2015-01-02T00:41:45.000Z
|
2022-03-31T15:30:02.000Z
|
examples/pybullet/gym/pybullet_utils/examples/multipleScenes.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,424
|
2015-01-05T08:55:58.000Z
|
2022-03-30T19:34:55.000Z
|
examples/pybullet/gym/pybullet_utils/examples/multipleScenes.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,921
|
2015-01-02T10:19:30.000Z
|
2022-03-31T02:48:42.000Z
|
from pybullet_utils import bullet_client as bc
import pybullet
import pybullet_data
p0 = bc.BulletClient(connection_mode=pybullet.DIRECT)
p0.setAdditionalSearchPath(pybullet_data.getDataPath())
p1 = bc.BulletClient(connection_mode=pybullet.DIRECT)
p1.setAdditionalSearchPath(pybullet_data.getDataPath())
#can also connect using different modes, GUI, SHARED_MEMORY, TCP, UDP, SHARED_MEMORY_SERVER, GUI_SERVER
#pgui = bc.BulletClient(connection_mode=pybullet.GUI)
p0.loadURDF("r2d2.urdf")
p1.loadSDF("stadium.sdf")
print(p0._client)
print(p1._client)
print("p0.getNumBodies()=", p0.getNumBodies())
print("p1.getNumBodies()=", p1.getNumBodies())
| 32.35
| 103
| 0.806801
|
6b13b8ed379ebc23a594ed9827ef41cc397cc923
| 137
|
py
|
Python
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsMultilineNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsMultilineNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsMultilineNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
def func():
value = "not-none"
# Is none
# If it's none
<caret>if value is None:
print("None")
print(value)
| 15.222222
| 28
| 0.518248
|
e9636e2ff528d536df6f49ea831e2345a0d8c8cc
| 133
|
py
|
Python
|
src/frog_of_the_week/__init__.py
|
rlipperts/frog-of-the-week
|
7794da564ad1ee5be57d386012ce1c1e4557a4f8
|
[
"MIT"
] | null | null | null |
src/frog_of_the_week/__init__.py
|
rlipperts/frog-of-the-week
|
7794da564ad1ee5be57d386012ce1c1e4557a4f8
|
[
"MIT"
] | null | null | null |
src/frog_of_the_week/__init__.py
|
rlipperts/frog-of-the-week
|
7794da564ad1ee5be57d386012ce1c1e4557a4f8
|
[
"MIT"
] | null | null | null |
"""
Import the module data, that you want to have externally accessible here.
"""
from frog_of_the_week.bot import run # NOQA: F401
| 26.6
| 73
| 0.75188
|
18d5b8c37265880ab635a0b39023e2f836ac99a8
| 491
|
py
|
Python
|
ger/TTP/SV4/S3/3740.py
|
aLagoG/kygerand
|
0991cf5d5c3d49f4602b6992d4e3bdec8e27898e
|
[
"MIT"
] | 1
|
2017-09-16T04:05:31.000Z
|
2017-09-16T04:05:31.000Z
|
ger/TTP/SV4/S3/3740.py
|
aLagoG/kygerand
|
0991cf5d5c3d49f4602b6992d4e3bdec8e27898e
|
[
"MIT"
] | 9
|
2017-01-25T19:34:38.000Z
|
2020-07-27T17:02:09.000Z
|
ger/TTP/SV4/S3/3740.py
|
aLagoG/kygerand
|
0991cf5d5c3d49f4602b6992d4e3bdec8e27898e
|
[
"MIT"
] | null | null | null |
def nxt(i): return 0 if i == n-1 else i+1
def prev(i): return n-1 if i == 0 else i-1
for t in xrange(input()):
flipped = 0
line = [int(i) for i in raw_input().split()]
n = line[0]
line = line[1:]
one = 0
for i in xrange(n):
if line[i] == 1:
if line[prev(i)] == 2:
one = prev(i)
elif line[nxt(i)] == n:
one = nxt(i)
else:
one = i
i = one
true = 1
while True:
if line[i] != true: flipped += 1
i = nxt(i)
true += 1
if i == one: break
print flipped
| 16.931034
| 45
| 0.533605
|
d1ad9f70acfe6b2a128a5b393b4236a9461d0260
| 579
|
py
|
Python
|
padinfo/view_state/common.py
|
loopdeer/pad-cogs
|
a71e737dc3cc805885c5a99b7ee175711e614734
|
[
"MIT"
] | null | null | null |
padinfo/view_state/common.py
|
loopdeer/pad-cogs
|
a71e737dc3cc805885c5a99b7ee175711e614734
|
[
"MIT"
] | null | null | null |
padinfo/view_state/common.py
|
loopdeer/pad-cogs
|
a71e737dc3cc805885c5a99b7ee175711e614734
|
[
"MIT"
] | null | null | null |
from padinfo.common.config import UserConfig
from padinfo.core.find_monster import findMonsterCustom2
async def get_monster_from_ims(dgcog, user_config: UserConfig, ims: dict):
query = ims.get('query') or ims['raw_query']
resolved_monster_id_str = ims.get('resolved_monster_id')
resolved_monster_id = int(resolved_monster_id_str) if resolved_monster_id_str else None
if resolved_monster_id:
return dgcog.database.graph.get_monster(resolved_monster_id)
monster, _, _ = await findMonsterCustom2(dgcog, user_config.beta_id3, query)
return monster
| 41.357143
| 91
| 0.787565
|
95178d9626e2386c66ce527013c34d4ede9e8172
| 1,302
|
py
|
Python
|
tests/flatpages_tests/test_models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
tests/flatpages_tests/test_models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
tests/flatpages_tests/test_models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 3
|
2019-08-20T13:29:34.000Z
|
2020-01-30T22:05:10.000Z
|
from django.contrib.flatpages.models import FlatPage
from django.test import SimpleTestCase, override_settings
from django.test.utils import override_script_prefix
class FlatpageModelTests(SimpleTestCase):
def setUp(self):
self.page = FlatPage(title="Café!", url="/café/")
def test_get_absolute_url_urlencodes(self):
self.assertEqual(self.page.get_absolute_url(), "/caf%C3%A9/")
@override_script_prefix("/prefix/")
def test_get_absolute_url_honors_script_prefix(self):
self.assertEqual(self.page.get_absolute_url(), "/prefix/caf%C3%A9/")
def test_str(self):
self.assertEqual(str(self.page), "/café/ -- Café!")
@override_settings(ROOT_URLCONF="flatpages_tests.urls")
def test_get_absolute_url_include(self):
self.assertEqual(self.page.get_absolute_url(), "/flatpage_root/caf%C3%A9/")
@override_settings(ROOT_URLCONF="flatpages_tests.no_slash_urls")
def test_get_absolute_url_include_no_slash(self):
self.assertEqual(self.page.get_absolute_url(), "/flatpagecaf%C3%A9/")
@override_settings(ROOT_URLCONF="flatpages_tests.absolute_urls")
def test_get_absolute_url_with_hardcoded_url(self):
fp = FlatPage(title="Test", url="/hardcoded/")
self.assertEqual(fp.get_absolute_url(), "/flatpage/")
| 40.6875
| 83
| 0.739631
|
d2e738c37ae742394b21aa40912fca669c8044a1
| 4,830
|
py
|
Python
|
parser/AMRGraph.py
|
mrdrozdov/AMR-gs
|
9d76a3449ec91ec6bd2d47c43de61f6960010d94
|
[
"MIT"
] | 72
|
2020-04-14T04:03:27.000Z
|
2022-03-17T09:00:44.000Z
|
parser/AMRGraph.py
|
mrdrozdov/AMR-gs
|
9d76a3449ec91ec6bd2d47c43de61f6960010d94
|
[
"MIT"
] | 19
|
2020-04-23T03:17:15.000Z
|
2022-03-25T16:56:37.000Z
|
parser/AMRGraph.py
|
mrdrozdov/AMR-gs
|
9d76a3449ec91ec6bd2d47c43de61f6960010d94
|
[
"MIT"
] | 21
|
2020-04-22T18:10:07.000Z
|
2021-12-06T06:57:18.000Z
|
# encoding=utf8
import re
import random
number_regexp = re.compile(r'^-?(\d)+(\.\d+)?$')
abstract_regexp0 = re.compile(r'^([A-Z]+_)+\d+$')
abstract_regexp1 = re.compile(r'^\d0*$')
discard_regexp = re.compile(r'^n(\d+)?$')
attr_value_set = set(['-', '+', 'interrogative', 'imperative', 'expressive'])
def _is_attr_form(x):
return (x in attr_value_set or x.endswith('_') or number_regexp.match(x) is not None)
def _is_abs_form(x):
return (abstract_regexp0.match(x) is not None or abstract_regexp1.match(x) is not None)
def is_attr_or_abs_form(x):
return _is_attr_form(x) or _is_abs_form(x)
def need_an_instance(x):
return (not _is_attr_form(x) or (abstract_regexp0.match(x) is not None))
class AMRGraph(object):
def __init__(self, smatch_amr):
# transform amr from original smatch format into our own data structure
instance_triple, attribute_triple, relation_triple = smatch_amr.get_triples()
self.root = smatch_amr.root
self.nodes = set()
self.edges = dict()
self.reversed_edges = dict()
self.undirected_edges = dict()
self.name2concept = dict()
# will do some adjustments
self.abstract_concepts = dict()
for _, name, concept in instance_triple:
if is_attr_or_abs_form(concept):
if _is_abs_form(concept):
self.abstract_concepts[name] = concept
else:
print ('bad concept', _, name, concept)
self.name2concept[name] = concept
self.nodes.add(name)
for rel, concept, value in attribute_triple:
if rel == 'TOP':
continue
# discard some empty names
if rel == 'name' and discard_regexp.match(value):
continue
# abstract concept can't have an attribute
if concept in self.abstract_concepts:
print (rel, self.abstract_concepts[concept], value, "abstract concept cannot have an attribute")
continue
name = "%s_attr_%d"%(value, len(self.name2concept))
if not _is_attr_form(value):
if _is_abs_form(value):
self.abstract_concepts[name] = value
else:
print ('bad attribute', rel, concept, value)
continue
self.name2concept[name] = value
self._add_edge(rel, concept, name)
for rel, head, tail in relation_triple:
self._add_edge(rel, head, tail)
# lower concept
for name in self.name2concept:
v = self.name2concept[name]
if not _is_abs_form(v):
v = v.lower()
self.name2concept[name] = v
def __len__(self):
return len(self.name2concept)
def _add_edge(self, rel, src, des):
self.nodes.add(src)
self.nodes.add(des)
self.edges[src] = self.edges.get(src, []) + [(rel, des)]
self.reversed_edges[des] = self.reversed_edges.get(des, []) + [(rel, src)]
self.undirected_edges[src] = self.undirected_edges.get(src, []) + [(rel, des)]
self.undirected_edges[des] = self.undirected_edges.get(des, []) + [(rel + '_reverse_', src)]
def root_centered_sort(self, rel_order=None):
queue = [self.root]
visited = set(queue)
step = 0
while len(queue) > step:
src = queue[step]
step += 1
if src not in self.undirected_edges:
continue
random.shuffle(self.undirected_edges[src])
if rel_order is not None:
# Do some random thing here for performance enhancement
if random.random() < 0.5:
self.undirected_edges[src].sort(key=lambda x: -rel_order(x[0]) if (x[0].startswith('snt') or x[0].startswith('op') ) else -1)
else:
self.undirected_edges[src].sort(key=lambda x: -rel_order(x[0]))
for rel, des in self.undirected_edges[src]:
if des in visited:
continue
else:
queue.append(des)
visited.add(des)
not_connected = len(queue) != len(self.nodes)
assert (not not_connected)
name2pos = dict(zip(queue, range(len(queue))))
visited = set()
edge = []
for x in queue:
if x not in self.undirected_edges:
continue
for r, y in self.undirected_edges[x]:
if y in visited:
r = r[:-9] if r.endswith('_reverse_') else r+'_reverse_'
edge.append((name2pos[x], name2pos[y], r)) # x -> y: r
visited.add(x)
return [self.name2concept[x] for x in queue], edge, not_connected
| 39.590164
| 145
| 0.567909
|
61115dbb2abdf4a54f9fad8c6ad3c39b9e63c05b
| 2,956
|
py
|
Python
|
auth0/v3/management/resource_servers.py
|
meatbody/auth0-python
|
5f9373f520f7eb5cf8572dfc835f075d81681f55
|
[
"MIT"
] | null | null | null |
auth0/v3/management/resource_servers.py
|
meatbody/auth0-python
|
5f9373f520f7eb5cf8572dfc835f075d81681f55
|
[
"MIT"
] | null | null | null |
auth0/v3/management/resource_servers.py
|
meatbody/auth0-python
|
5f9373f520f7eb5cf8572dfc835f075d81681f55
|
[
"MIT"
] | null | null | null |
from .rest import RestClient
class ResourceServers(object):
"""Auth0 resource servers endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
"""
def __init__(self, domain, token, telemetry=True, timeout=5.0):
self.domain = domain
self.client = RestClient(jwt=token, telemetry=telemetry, timeout=timeout)
def _url(self, id=None):
url = 'https://{}/api/v2/resource-servers'.format(self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def create(self, body):
"""Create a new resource server.
Args:
body (dict): Attributes for the new resource Server
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/post_resource_servers
"""
return self.client.post(self._url(), data=body)
def get_all(self, page=None, per_page=None, include_totals=False):
"""Retrieves all resource servers
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers
"""
params = {
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower()
}
return self.client.get(self._url(), params=params)
def get(self, id):
"""Retrieves a resource server by its id.
Args:
id (str): Id of the resource server to get.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers_by_id
"""
return self.client.get(self._url(id))
def delete(self, id):
"""Deletes a resource server.
Args:
id (str): Id of resource server to delete.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/delete_resource_servers_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Modifies a resource server.
Args:
id (str): The id of the resource server to update.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/patch_resource_servers_by_id
"""
return self.client.patch(self._url(id), data=body)
| 30.474227
| 107
| 0.609946
|
5abe9398a21e8a7202a4f5b199fa6702fbc4ca36
| 522
|
py
|
Python
|
tests/test_builtins/it/test_italy_spec.py
|
Shanmugapriya03/mimesis
|
649253fef05c6b5c362805000c1d7a99898aa0fe
|
[
"MIT"
] | 3
|
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
tests/test_builtins/it/test_italy_spec.py
|
Shanmugapriya03/mimesis
|
649253fef05c6b5c362805000c1d7a99898aa0fe
|
[
"MIT"
] | 1
|
2020-06-05T10:34:20.000Z
|
2020-08-08T08:45:18.000Z
|
tests/test_builtins/it/test_italy_spec.py
|
Shanmugapriya03/mimesis
|
649253fef05c6b5c362805000c1d7a99898aa0fe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import pytest
from mimesis.builtins import ItalySpecProvider
from mimesis.enums import Gender
@pytest.fixture
def italy():
return ItalySpecProvider()
def test_noun(italy):
result = italy.fiscal_code(gender=Gender.MALE)
assert re.fullmatch(
r'^[A-Z]{6}\d{2}[A-EHLMPR-T][0123][0-9][A-MZ]\d{3}[A-Z]$', result)
result = italy.fiscal_code(gender=Gender.FEMALE)
assert re.fullmatch(
r'^[A-Z]{6}\d{2}[A-EHLMPR-T][4567][0-9][A-MZ]\d{3}[A-Z]$', result)
| 21.75
| 74
| 0.649425
|
fc589fdc5acd94f94f668ce5f6c78ed8ccee3076
| 3,085
|
py
|
Python
|
config/settings.py
|
faisalnazik/Django-React-Project-Rest-Framework
|
55b1d333c969804350235513fca811d00f3e52a6
|
[
"MIT"
] | 2
|
2021-03-01T10:04:30.000Z
|
2021-07-17T16:20:44.000Z
|
config/settings.py
|
faisalnazik/Django-React-Project-Rest-Framework
|
55b1d333c969804350235513fca811d00f3e52a6
|
[
"MIT"
] | null | null | null |
config/settings.py
|
faisalnazik/Django-React-Project-Rest-Framework
|
55b1d333c969804350235513fca811d00f3e52a6
|
[
"MIT"
] | null | null | null |
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k+#^q-@5-9xb41+*+)&inwxc68*dx2q!x343z84_f1a#u5b6#7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.286885
| 91
| 0.694652
|
126bdb971a80a05fd5451a61045a8857a65c5469
| 1,197
|
py
|
Python
|
aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DisableHostAvailabilityRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DisableHostAvailabilityRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DisableHostAvailabilityRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DisableHostAvailabilityRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'DisableHostAvailability','cms')
def get_Ids(self):
return self.get_query_params().get('Ids')
def set_Ids(self,Ids):
for i in range(len(Ids)):
if Ids[i] is not None:
self.add_query_param('Id.' + str(i + 1) , Ids[i]);
| 37.40625
| 82
| 0.744361
|
7897adada1ac9fe4bb674fcaa68ef76279dfdf72
| 3,429
|
py
|
Python
|
Utils/pgConnector.py
|
lionick/rciam-metrics-script
|
3b9019f8455cee92d7a0b1c7927d98e6f684830d
|
[
"Apache-2.0"
] | null | null | null |
Utils/pgConnector.py
|
lionick/rciam-metrics-script
|
3b9019f8455cee92d7a0b1c7927d98e6f684830d
|
[
"Apache-2.0"
] | null | null | null |
Utils/pgConnector.py
|
lionick/rciam-metrics-script
|
3b9019f8455cee92d7a0b1c7927d98e6f684830d
|
[
"Apache-2.0"
] | 1
|
2021-09-28T06:27:22.000Z
|
2021-09-28T06:27:22.000Z
|
from configparser import ConfigParser
import sys
import psycopg2
# import the error handling libraries for psycopg2
from psycopg2 import OperationalError, errorcodes, errors
from Logger import log
def singleton(theClass):
""" decorator for a class to make a singleton out of it """
classInstances = {}
def getInstance(*args, **kwargs):
""" creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__ """
key = (theClass, args, str(kwargs))
if key not in classInstances:
classInstances[key] = theClass(*args, **kwargs)
return classInstances[key]
return getInstance
class pgConnector:
logger = log.get_logger("pgConnector")
conn = None
def __init__(self, filename = "config.py", section = "source_database"):
self.filename = filename
self.section = section
self.params = self.config(filename, section)
if self.conn == None:
try:
self.logger.debug('Connecting to the PostgreSQL database...{0}'.format(section))
self.conn = psycopg2.connect(**self.params)
except psycopg2.OperationalError as err:
self.logger.error(str(err).strip())
sys.exit(1)
def config(self, filename='config.py', section='source_database'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to source_database
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
self.logger.error('Section {0} not found in the {1} file'.format(section, filename))
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
def execute_select(self, query):
# create a cursor
if not hasattr(self.conn, 'cursor'):
self.__init__()
cur = self.conn.cursor()
# execute a statement
cur.execute(query)
return cur.fetchall()
def close(self):
self.conn.close()
self.conn = None
self.logger.debug('Database connection "{0}" closed'.format(self.section))
# Subclass of pgConnector
@singleton
class comanagePgConnector(pgConnector):
def __init__(self, filename = "config.py", section = "comanage_database"):
super().__init__(filename, section)
# Subclass of pgConnector
@singleton
class proxystatisticsPgConnector(pgConnector):
def __init__(self, filename = "config.py", section = "proxystatistics_database"):
super().__init__(filename, section)
# Subclass of pgConnector
@singleton
class destinationPgConnector(pgConnector):
def __init__(self, filename = "config.py", section = "destination_database"):
super().__init__(filename, section)
def execute_insert(self, query, params):
try:
# create a cursor
if not hasattr(self.conn, 'cursor'):
self.__init__()
cur = self.conn.cursor()
cur.execute(query, params)
self.conn.commit()
except Exception as err:
self.logger.error(str(err).strip())
sys.exit(1)
def execute_and_commit(self, query):
try:
# create a cursor
if not hasattr(self.conn, 'cursor'):
self.__init__()
cur = self.conn.cursor()
cur.execute(query)
self.conn.commit()
except Exception as err:
self.logger.error(str(err).strip())
sys.exit(1)
| 29.059322
| 90
| 0.666958
|
70e04b578af7faa27f44eaaa29abbe4b78d7d889
| 1,742
|
py
|
Python
|
setup.py
|
WarrenWeckesser/numpngw
|
f6b5f25bdab939ed75cac62c78f7d3a22bc3348b
|
[
"BSD-2-Clause"
] | 50
|
2015-10-28T12:46:20.000Z
|
2022-03-09T23:37:38.000Z
|
setup.py
|
WarrenWeckesser/numpngw
|
f6b5f25bdab939ed75cac62c78f7d3a22bc3348b
|
[
"BSD-2-Clause"
] | 7
|
2015-12-29T23:09:29.000Z
|
2019-12-26T18:41:09.000Z
|
setup.py
|
WarrenWeckesser/pngw
|
264fb53b12b2f7d5b60688d005fbbf81052a0c4c
|
[
"BSD-2-Clause"
] | 7
|
2015-10-28T13:33:05.000Z
|
2020-11-12T12:37:30.000Z
|
from setuptools import setup
from os import path
def get_numpngw_version():
"""
Find the value assigned to __version__ in numpngw.py.
This function assumes that there is a line of the form
__version__ = "version-string"
in numpngw.py. It returns the string version-string, or None if such a
line is not found.
"""
with open("numpngw.py", "r") as f:
for line in f:
s = [w.strip() for w in line.split("=", 1)]
if len(s) == 2 and s[0] == "__version__":
return s[1][1:-1]
# Get the long description from README.rst.
_here = path.abspath(path.dirname(__file__))
with open(path.join(_here, 'README.rst')) as f:
_long_description = f.read()
setup(
name='numpngw',
version=get_numpngw_version(),
author='Warren Weckesser',
description="Write numpy array(s) to a PNG or animated PNG file.",
long_description=_long_description,
license="BSD",
url="https://github.com/WarrenWeckesser/numpngw",
classifiers=[
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
py_modules=["numpngw"],
install_requires=[
'numpy >= 1.6.0',
],
keywords="numpy png matplotlib animation",
)
| 31.107143
| 75
| 0.608496
|
6f472c656cd6d6cfa7c6f19afb8f5bd10735b719
| 141,011
|
py
|
Python
|
tests/unit/modules/test_virt.py
|
srg91/salt
|
9e5df06af0f2ff4810bb88db82fe59dc4f3e1a96
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_virt.py
|
srg91/salt
|
9e5df06af0f2ff4810bb88db82fe59dc4f3e1a96
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_virt.py
|
srg91/salt
|
9e5df06af0f2ff4810bb88db82fe59dc4f3e1a96
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
virt execution module unit tests
'''
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import datetime
import shutil
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import MagicMock, patch
# Import salt libs
import salt.utils.yaml
import salt.modules.virt as virt
import salt.modules.config as config
from salt._compat import ElementTree as ET
import salt.config
import salt.syspaths
import tempfile
from salt.exceptions import CommandExecutionError
# Import third party libs
from salt.ext import six
# pylint: disable=import-error
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
# pylint: disable=invalid-name,protected-access,attribute-defined-outside-init,too-many-public-methods,unused-argument
class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
'''
Libvirt library mock
'''
class virDomain(MagicMock):
'''
virDomain mock
'''
class libvirtError(Exception):
'''
libvirtError mock
'''
class VirtTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.module.virt
'''
def setup_loader_modules(self):
self.mock_libvirt = LibvirtMock()
self.mock_conn = MagicMock()
self.mock_libvirt.openAuth.return_value = self.mock_conn
self.mock_popen = MagicMock()
self.addCleanup(delattr, self, 'mock_libvirt')
self.addCleanup(delattr, self, 'mock_conn')
self.addCleanup(delattr, self, 'mock_popen')
self.mock_subprocess = MagicMock()
self.mock_subprocess.return_value = self.mock_subprocess # pylint: disable=no-member
self.mock_subprocess.Popen.return_value = self.mock_popen # pylint: disable=no-member
loader_globals = {
'__salt__': {
'config.get': config.get,
'config.option': config.option,
},
'libvirt': self.mock_libvirt,
'subprocess': self.mock_subprocess
}
return {virt: loader_globals, config: loader_globals}
def set_mock_vm(self, name, xml):
'''
Define VM to use in tests
'''
self.mock_conn.listDefinedDomains.return_value = [name] # pylint: disable=no-member
mock_domain = self.mock_libvirt.virDomain()
self.mock_conn.lookupByName.return_value = mock_domain # pylint: disable=no-member
mock_domain.XMLDesc.return_value = xml # pylint: disable=no-member
# Return state as shutdown
mock_domain.info.return_value = [4, 2048 * 1024, 1024 * 1024, 2, 1234] # pylint: disable=no-member
mock_domain.ID.return_value = 1
mock_domain.name.return_value = name
return mock_domain
def test_disk_profile_merge(self):
'''
Test virt._disk_profile() when merging with user-defined disks
'''
root_dir = os.path.join(salt.syspaths.ROOT_DIR, 'srv', 'salt-images')
userdisks = [{'name': 'data', 'size': 16384, 'format': 'raw'}]
disks = virt._disk_profile('default', 'kvm', userdisks, 'myvm', image='/path/to/image')
self.assertEqual(
[{'name': 'system',
'device': 'disk',
'size': 8192,
'format': 'qcow2',
'model': 'virtio',
'filename': 'myvm_system.qcow2',
'image': '/path/to/image',
'source_file': '{0}{1}myvm_system.qcow2'.format(root_dir, os.sep)},
{'name': 'data',
'device': 'disk',
'size': 16384,
'format': 'raw',
'model': 'virtio',
'filename': 'myvm_data.raw',
'source_file': '{0}{1}myvm_data.raw'.format(root_dir, os.sep)}],
disks
)
def test_boot_default_dev(self):
'''
Test virt._gen_xml() default boot device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64'
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('os/boot').attrib['dev'], 'hd')
self.assertEqual(root.find('os/type').attrib['arch'], 'x86_64')
self.assertEqual(root.find('os/type').text, 'hvm')
def test_boot_custom_dev(self):
'''
Test virt._gen_xml() custom boot device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
boot_dev='cdrom'
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('os/boot').attrib['dev'], 'cdrom')
def test_boot_multiple_devs(self):
'''
Test virt._gen_xml() multiple boot devices
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
boot_dev='cdrom network'
)
root = ET.fromstring(xml_data)
devs = root.findall('.//boot')
self.assertTrue(len(devs) == 2)
def test_gen_xml_no_nic(self):
'''
Test virt._gen_xml() serial console
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='pty',
console=True
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
self.assertEqual(root.find('devices/console').attrib['type'], 'pty')
def test_gen_xml_for_serial_console(self):
'''
Test virt._gen_xml() serial console
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='pty',
console=True
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
self.assertEqual(root.find('devices/console').attrib['type'], 'pty')
def test_gen_xml_for_telnet_console(self):
'''
Test virt._gen_xml() telnet console
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='tcp',
console=True,
telnet_port=22223
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console/source').attrib['service'], '22223')
def test_gen_xml_for_telnet_console_unspecified_port(self):
'''
Test virt._gen_xml() telnet console without any specified port
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='tcp',
console=True
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console').attrib['type'], 'tcp')
self.assertIsInstance(int(root.find('devices/console/source').attrib['service']), int)
def test_gen_xml_for_serial_no_console(self):
'''
Test virt._gen_xml() with no serial console
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='pty',
console=False
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
self.assertEqual(root.find('devices/console'), None)
def test_gen_xml_for_telnet_no_console(self):
'''
Test virt._gen_xml() with no telnet console
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
serial_type='tcp',
console=False,
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console'), None)
def test_gen_xml_nographics_default(self):
'''
Test virt._gen_xml() with default no graphics device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64'
)
root = ET.fromstring(xml_data)
self.assertIsNone(root.find('devices/graphics'))
def test_gen_xml_vnc_default(self):
'''
Test virt._gen_xml() with default vnc graphics device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
graphics={'type': 'vnc', 'port': 1234, 'tlsPort': 5678,
'listen': {'type': 'address', 'address': 'myhost'}},
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/graphics').attrib['type'], 'vnc')
self.assertEqual(root.find('devices/graphics').attrib['autoport'], 'no')
self.assertEqual(root.find('devices/graphics').attrib['port'], '1234')
self.assertFalse('tlsPort' in root.find('devices/graphics').attrib)
self.assertEqual(root.find('devices/graphics').attrib['listen'], 'myhost')
self.assertEqual(root.find('devices/graphics/listen').attrib['type'], 'address')
self.assertEqual(root.find('devices/graphics/listen').attrib['address'], 'myhost')
def test_gen_xml_spice_default(self):
'''
Test virt._gen_xml() with default spice graphics device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
graphics={'type': 'spice'},
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/graphics').attrib['type'], 'spice')
self.assertEqual(root.find('devices/graphics').attrib['autoport'], 'yes')
self.assertEqual(root.find('devices/graphics').attrib['listen'], '0.0.0.0')
self.assertEqual(root.find('devices/graphics/listen').attrib['type'], 'address')
self.assertEqual(root.find('devices/graphics/listen').attrib['address'], '0.0.0.0')
def test_gen_xml_spice(self):
'''
Test virt._gen_xml() with spice graphics device
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
graphics={'type': 'spice', 'port': 1234, 'tls_port': 5678, 'listen': {'type': 'none'}},
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/graphics').attrib['type'], 'spice')
self.assertEqual(root.find('devices/graphics').attrib['autoport'], 'no')
self.assertEqual(root.find('devices/graphics').attrib['port'], '1234')
self.assertEqual(root.find('devices/graphics').attrib['tlsPort'], '5678')
self.assertFalse('listen' in root.find('devices/graphics').attrib)
self.assertEqual(root.find('devices/graphics/listen').attrib['type'], 'none')
self.assertFalse('address' in root.find('devices/graphics/listen').attrib)
def test_default_disk_profile_hypervisor_esxi(self):
'''
Test virt._disk_profile() default ESXi profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._disk_profile('nonexistent', 'vmware')
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk['name'] == 'system']
self.assertTrue(bool(found))
system = found[0]
self.assertEqual(system['format'], 'vmdk')
self.assertEqual(system['model'], 'scsi')
self.assertTrue(int(system['size']) >= 1)
def test_default_disk_profile_hypervisor_kvm(self):
'''
Test virt._disk_profile() default KVM profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._disk_profile('nonexistent', 'kvm')
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk['name'] == 'system']
self.assertTrue(bool(found))
system = found[0]
self.assertEqual(system['format'], 'qcow2')
self.assertEqual(system['model'], 'virtio')
self.assertTrue(int(system['size']) >= 1)
def test_default_disk_profile_hypervisor_xen(self):
'''
Test virt._disk_profile() default XEN profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._disk_profile('nonexistent', 'xen')
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk['name'] == 'system']
self.assertTrue(bool(found))
system = found[0]
self.assertEqual(system['format'], 'qcow2')
self.assertEqual(system['model'], 'xen')
self.assertTrue(int(system['size']) >= 1)
def test_default_nic_profile_hypervisor_esxi(self):
'''
Test virt._nic_profile() default ESXi profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._nic_profile('nonexistent', 'vmware')
self.assertTrue(len(ret) == 1)
eth0 = ret[0]
self.assertEqual(eth0['name'], 'eth0')
self.assertEqual(eth0['type'], 'bridge')
self.assertEqual(eth0['source'], 'DEFAULT')
self.assertEqual(eth0['model'], 'e1000')
def test_default_nic_profile_hypervisor_kvm(self):
'''
Test virt._nic_profile() default KVM profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._nic_profile('nonexistent', 'kvm')
self.assertTrue(len(ret) == 1)
eth0 = ret[0]
self.assertEqual(eth0['name'], 'eth0')
self.assertEqual(eth0['type'], 'bridge')
self.assertEqual(eth0['source'], 'br0')
self.assertEqual(eth0['model'], 'virtio')
def test_default_nic_profile_hypervisor_xen(self):
'''
Test virt._nic_profile() default XEN profile
'''
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member
ret = virt._nic_profile('nonexistent', 'xen')
self.assertTrue(len(ret) == 1)
eth0 = ret[0]
self.assertEqual(eth0['name'], 'eth0')
self.assertEqual(eth0['type'], 'bridge')
self.assertEqual(eth0['source'], 'br0')
self.assertFalse(eth0['model'])
def test_gen_vol_xml(self):
'''
Test virt._get_vol_xml()
'''
xml_data = virt._gen_vol_xml('vmname', 'system', 'qcow2', 8192, '/path/to/image/')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'vmname/system.qcow2')
self.assertEqual(root.find('key').text, 'vmname/system')
self.assertEqual(root.find('capacity').attrib['unit'], 'KiB')
self.assertEqual(root.find('capacity').text, six.text_type(8192 * 1024))
def test_gen_xml_for_kvm_default_profile(self):
'''
Test virt._gen_xml(), KVM default profile case
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'kvm')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, six.text_type(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
disks = root.findall('.//disk')
self.assertEqual(len(disks), 1)
disk = disks[0]
root_dir = salt.config.DEFAULT_MINION_OPTS.get('root_dir')
self.assertTrue(disk.find('source').attrib['file'].startswith(root_dir))
self.assertTrue('hello_system' in disk.find('source').attrib['file'])
self.assertEqual(disk.find('target').attrib['dev'], 'vda')
self.assertEqual(disk.find('target').attrib['bus'], 'virtio')
self.assertEqual(disk.find('driver').attrib['name'], 'qemu')
self.assertEqual(disk.find('driver').attrib['type'], 'qcow2')
interfaces = root.findall('.//interface')
self.assertEqual(len(interfaces), 1)
iface = interfaces[0]
self.assertEqual(iface.attrib['type'], 'bridge')
self.assertEqual(iface.find('source').attrib['bridge'], 'br0')
self.assertEqual(iface.find('model').attrib['type'], 'virtio')
mac = iface.find('mac').attrib['address']
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def test_gen_xml_for_esxi_default_profile(self):
'''
Test virt._gen_xml(), ESXi/vmware default profile case
'''
diskp = virt._disk_profile('default', 'vmware', [], 'hello')
nicp = virt._nic_profile('default', 'vmware')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'vmware',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'vmware')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, six.text_type(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
disks = root.findall('.//disk')
self.assertEqual(len(disks), 1)
disk = disks[0]
self.assertTrue('[0]' in disk.find('source').attrib['file'])
self.assertTrue('hello_system' in disk.find('source').attrib['file'])
self.assertEqual(disk.find('target').attrib['dev'], 'sda')
self.assertEqual(disk.find('target').attrib['bus'], 'scsi')
self.assertEqual(disk.find('address').attrib['unit'], '0')
interfaces = root.findall('.//interface')
self.assertEqual(len(interfaces), 1)
iface = interfaces[0]
self.assertEqual(iface.attrib['type'], 'bridge')
self.assertEqual(iface.find('source').attrib['bridge'], 'DEFAULT')
self.assertEqual(iface.find('model').attrib['type'], 'e1000')
mac = iface.find('mac').attrib['address']
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def test_gen_xml_for_xen_default_profile(self):
'''
Test virt._gen_xml(), XEN PV default profile case
'''
diskp = virt._disk_profile('default', 'xen', [], 'hello')
nicp = virt._nic_profile('default', 'xen')
with patch.dict(virt.__grains__, {'os_family': 'Suse'}):
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'xen',
'xen',
'x86_64',
boot=None
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'xen')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, six.text_type(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
self.assertEqual(root.find('.//kernel').text, '/usr/lib/grub2/x86_64-xen/grub.xen')
disks = root.findall('.//disk')
self.assertEqual(len(disks), 1)
disk = disks[0]
root_dir = salt.config.DEFAULT_MINION_OPTS.get('root_dir')
self.assertTrue(disk.find('source').attrib['file'].startswith(root_dir))
self.assertTrue('hello_system' in disk.find('source').attrib['file'])
self.assertEqual(disk.find('target').attrib['dev'], 'xvda')
self.assertEqual(disk.find('target').attrib['bus'], 'xen')
self.assertEqual(disk.find('driver').attrib['name'], 'qemu')
self.assertEqual(disk.find('driver').attrib['type'], 'qcow2')
interfaces = root.findall('.//interface')
self.assertEqual(len(interfaces), 1)
iface = interfaces[0]
self.assertEqual(iface.attrib['type'], 'bridge')
self.assertEqual(iface.find('source').attrib['bridge'], 'br0')
self.assertIsNone(iface.find('model'))
def test_gen_xml_for_esxi_custom_profile(self):
'''
Test virt._gen_xml(), ESXi/vmware custom profile case
'''
disks = {
'noeffect': [
{'first': {'size': 8192, 'pool': 'datastore1'}},
{'second': {'size': 4096, 'pool': 'datastore2'}}
]
}
nics = {
'noeffect': [
{'name': 'eth1', 'source': 'ONENET'},
{'name': 'eth2', 'source': 'TWONET'}
]
}
with patch.dict(virt.__salt__, # pylint: disable=no-member
{'config.get': MagicMock(side_effect=[disks, nics])}):
diskp = virt._disk_profile('noeffect', 'vmware', [], 'hello')
nicp = virt._nic_profile('noeffect', 'vmware')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'vmware',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'vmware')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, six.text_type(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
self.assertTrue(len(root.findall('.//disk')) == 2)
self.assertTrue(len(root.findall('.//interface')) == 2)
def test_gen_xml_for_kvm_custom_profile(self):
'''
Test virt._gen_xml(), KVM custom profile case
'''
disks = {
'noeffect': [
{'first': {'size': 8192, 'pool': '/var/lib/images'}},
{'second': {'size': 4096, 'pool': '/var/lib/images'}}
]
}
nics = {
'noeffect': [
{'name': 'eth1', 'source': 'b2'},
{'name': 'eth2', 'source': 'b2'}
]
}
with patch.dict(virt.__salt__, {'config.get': MagicMock(side_effect=[ # pylint: disable=no-member
disks, nics])}):
diskp = virt._disk_profile('noeffect', 'kvm', [], 'hello')
nicp = virt._nic_profile('noeffect', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'kvm')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, six.text_type(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
self.assertTrue(len(root.findall('.//disk')) == 2)
self.assertTrue(len(root.findall('.//interface')) == 2)
@patch('salt.modules.virt.pool_info',
return_value={'mypool': {'target_path': os.path.join(salt.syspaths.ROOT_DIR, 'pools', 'mypool')}})
def test_disk_profile_kvm_disk_pool(self, mock_poolinfo):
'''
Test virt._gen_xml(), KVM case with pools defined.
'''
disks = {
'noeffect': [
{'first': {'size': 8192, 'pool': 'mypool'}},
{'second': {'size': 4096}}
]
}
# pylint: disable=no-member
with patch.dict(virt.__salt__, {'config.get': MagicMock(side_effect=[
disks,
os.path.join(salt.syspaths.ROOT_DIR, 'default', 'path')])}):
diskp = virt._disk_profile('noeffect', 'kvm', [], 'hello')
pools_path = os.path.join(salt.syspaths.ROOT_DIR, 'pools', 'mypool') + os.sep
default_path = os.path.join(salt.syspaths.ROOT_DIR, 'default', 'path') + os.sep
self.assertEqual(len(diskp), 2)
self.assertTrue(diskp[0]['source_file'].startswith(pools_path))
self.assertTrue(diskp[1]['source_file'].startswith(default_path))
# pylint: enable=no-member
def test_disk_profile_kvm_disk_external_image(self):
'''
Test virt._gen_xml(), KVM case with an external image.
'''
diskp = virt._disk_profile(None, 'kvm', [
{
'name': 'mydisk',
'source_file': '/path/to/my/image.qcow2'
}], 'hello')
self.assertEqual(len(diskp), 1)
self.assertEqual(diskp[0]['source_file'], ('/path/to/my/image.qcow2'))
@patch('salt.modules.virt.pool_info', return_value={})
def test_disk_profile_kvm_disk_pool_notfound(self, mock_poolinfo):
'''
Test virt._gen_xml(), KVM case with pools defined.
'''
disks = {
'noeffect': [
{'first': {'size': 8192, 'pool': 'default'}},
]
}
with patch.dict(virt.__salt__, {'config.get': MagicMock(side_effect=[ # pylint: disable=no-member
disks, "/default/path/"])}):
with self.assertRaises(CommandExecutionError):
virt._disk_profile('noeffect', 'kvm', [], 'hello')
@patch('salt.modules.virt.pool_info', return_value={'target_path': '/dev/disk/by-path'})
def test_disk_profile_kvm_disk_pool_invalid(self, mock_poolinfo):
'''
Test virt._gen_xml(), KVM case with pools defined.
'''
disks = {
'noeffect': [
{'first': {'size': 8192, 'pool': 'default'}},
]
}
with patch.dict(virt.__salt__, {'config.get': MagicMock(side_effect=[ # pylint: disable=no-member
disks, "/default/path/"])}):
with self.assertRaises(CommandExecutionError):
virt._disk_profile('noeffect', 'kvm', [], 'hello')
def test_gen_xml_cdrom(self):
'''
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
'''
diskp = virt._disk_profile(None, 'kvm', [{
'name': 'tested',
'device': 'cdrom',
'source_file': None,
'model': 'ide'}], 'hello')
nicp = virt._nic_profile(None, 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
disk = root.findall('.//disk')[0]
self.assertEqual(disk.attrib['device'], 'cdrom')
self.assertIsNone(disk.find('source'))
def test_controller_for_esxi(self):
'''
Test virt._gen_xml() generated device controller for ESXi/vmware
'''
diskp = virt._disk_profile('default', 'vmware', [], 'hello')
nicp = virt._nic_profile('default', 'vmware')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'vmware',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
controllers = root.findall('.//devices/controller')
self.assertTrue(len(controllers) == 1)
controller = controllers[0]
self.assertEqual(controller.attrib['model'], 'lsilogic')
def test_controller_for_kvm(self):
'''
Test virt._gen_xml() generated device controller for KVM
'''
diskp = virt._disk_profile('default', 'kvm', [], 'hello')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
'hvm',
'x86_64',
)
root = ET.fromstring(xml_data)
controllers = root.findall('.//devices/controller')
# There should be no controller
self.assertTrue(len(controllers) == 0)
# kvm mac address shoud start with 52:54:00
self.assertTrue("mac address='52:54:00" in xml_data)
def test_diff_disks(self):
'''
Test virt._diff_disks()
'''
old_disks = ET.fromstring('''
<devices>
<disk type='file' device='disk'>
<source file='/path/to/img0.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='disk'>
<source file='/path/to/img1.qcow2'/>
<target dev='vdb' bus='virtio'/>
</disk>
<disk type='file' device='disk'>
<source file='/path/to/img2.qcow2'/>
<target dev='hda' bus='ide'/>
</disk>
<disk type='file' device='disk'>
<source file='/path/to/img4.qcow2'/>
<target dev='hdb' bus='ide'/>
</disk>
<disk type='file' device='cdrom'>
<target dev='hdc' bus='ide'/>
</disk>
</devices>
''').findall('disk')
new_disks = ET.fromstring('''
<devices>
<disk type='file' device='disk'>
<source file='/path/to/img3.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='disk' cache='default'>
<source file='/path/to/img0.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='disk'>
<source file='/path/to/img4.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<target dev='hda' bus='ide'/>
</disk>
</devices>
''').findall('disk')
ret = virt._diff_disk_lists(old_disks, new_disks)
self.assertEqual([disk.find('source').get('file') if disk.find('source') is not None else None
for disk in ret['unchanged']], [])
self.assertEqual([disk.find('source').get('file') if disk.find('source') is not None else None
for disk in ret['new']],
['/path/to/img3.qcow2', '/path/to/img0.qcow2', '/path/to/img4.qcow2', None])
self.assertEqual([disk.find('target').get('dev') for disk in ret['sorted']],
['vda', 'vdb', 'vdc', 'hda'])
self.assertEqual([disk.find('source').get('file') if disk.find('source') is not None else None
for disk in ret['sorted']],
['/path/to/img3.qcow2',
'/path/to/img0.qcow2',
'/path/to/img4.qcow2',
None])
self.assertEqual(ret['new'][1].find('target').get('bus'), 'virtio')
self.assertEqual([disk.find('source').get('file') if disk.find('source') is not None else None
for disk in ret['deleted']],
['/path/to/img0.qcow2',
'/path/to/img1.qcow2',
'/path/to/img2.qcow2',
'/path/to/img4.qcow2',
None])
def test_diff_nics(self):
'''
Test virt._diff_nics()
'''
old_nics = ET.fromstring('''
<devices>
<interface type='network'>
<mac address='52:54:00:39:02:b1'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:39:02:b2'/>
<source network='admin'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:39:02:b3'/>
<source network='admin'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
</devices>
''').findall('interface')
new_nics = ET.fromstring('''
<devices>
<interface type='network'>
<mac address='52:54:00:39:02:b1'/>
<source network='default'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<mac address='52:54:00:39:02:b2'/>
<source network='default'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<mac address='52:54:00:39:02:b4'/>
<source network='admin'/>
<model type='virtio'/>
</interface>
</devices>
''').findall('interface')
ret = virt._diff_interface_lists(old_nics, new_nics)
self.assertEqual([nic.find('mac').get('address') for nic in ret['unchanged']],
['52:54:00:39:02:b1'])
self.assertEqual([nic.find('mac').get('address') for nic in ret['new']],
['52:54:00:39:02:b2', '52:54:00:39:02:b4'])
self.assertEqual([nic.find('mac').get('address') for nic in ret['deleted']],
['52:54:00:39:02:b2', '52:54:00:39:02:b3'])
def test_init(self):
'''
Test init() function
'''
xml = '''
<capabilities>
<host>
<uuid>44454c4c-3400-105a-8033-b3c04f4b344a</uuid>
<cpu>
<arch>x86_64</arch>
<model>Nehalem</model>
<vendor>Intel</vendor>
<microcode version='25'/>
<topology sockets='1' cores='4' threads='2'/>
<feature name='vme'/>
<feature name='ds'/>
<feature name='acpi'/>
<pages unit='KiB' size='4'/>
<pages unit='KiB' size='2048'/>
</cpu>
<power_management>
<suspend_mem/>
<suspend_disk/>
<suspend_hybrid/>
</power_management>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
<uri_transport>rdma</uri_transport>
</uri_transports>
</migration_features>
<topology>
<cells num='1'>
<cell id='0'>
<memory unit='KiB'>12367120</memory>
<pages unit='KiB' size='4'>3091780</pages>
<pages unit='KiB' size='2048'>0</pages>
<distances>
<sibling id='0' value='10'/>
</distances>
<cpus num='8'>
<cpu id='0' socket_id='0' core_id='0' siblings='0,4'/>
<cpu id='1' socket_id='0' core_id='1' siblings='1,5'/>
<cpu id='2' socket_id='0' core_id='2' siblings='2,6'/>
<cpu id='3' socket_id='0' core_id='3' siblings='3,7'/>
<cpu id='4' socket_id='0' core_id='0' siblings='0,4'/>
<cpu id='5' socket_id='0' core_id='1' siblings='1,5'/>
<cpu id='6' socket_id='0' core_id='2' siblings='2,6'/>
<cpu id='7' socket_id='0' core_id='3' siblings='3,7'/>
</cpus>
</cell>
</cells>
</topology>
<cache>
<bank id='0' level='3' type='both' size='8' unit='MiB' cpus='0-7'/>
</cache>
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
<secmodel>
<model>dac</model>
<doi>0</doi>
<baselabel type='kvm'>+487:+486</baselabel>
<baselabel type='qemu'>+487:+486</baselabel>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-i386</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
<pae/>
<nonpae/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
</capabilities>
'''
self.mock_conn.getCapabilities.return_value = xml # pylint: disable=no-member
root_dir = os.path.join(salt.syspaths.ROOT_DIR, 'srv', 'salt-images')
defineMock = MagicMock(return_value=1)
self.mock_conn.defineXML = defineMock
mock_chmod = MagicMock()
mock_run = MagicMock()
with patch.dict(os.__dict__, {'chmod': mock_chmod, 'makedirs': MagicMock()}): # pylint: disable=no-member
with patch.dict(virt.__salt__, {'cmd.run': mock_run}): # pylint: disable=no-member
# Ensure the init() function allows creating VM without NIC and disk
virt.init('test vm',
2,
1234,
nic=None,
disk=None,
seed=False,
start=False)
definition = defineMock.call_args_list[0][0][0]
self.assertFalse('<interface' in definition)
self.assertFalse('<disk' in definition)
# Ensure the init() function allows creating VM without NIC and
# disk but with boot parameters.
defineMock.reset_mock()
mock_run.reset_mock()
boot = {
'kernel': '/root/f8-i386-vmlinuz',
'initrd': '/root/f8-i386-initrd',
'cmdline':
'console=ttyS0 ks=http://example.com/f8-i386/os/'
}
retval = virt.init('test vm boot params',
2,
1234,
nic=None,
disk=None,
seed=False,
start=False,
boot=boot)
definition = defineMock.call_args_list[0][0][0]
self.assertEqual('<kernel' in definition, True)
self.assertEqual('<initrd' in definition, True)
self.assertEqual('<cmdline' in definition, True)
self.assertEqual(retval, True)
# Verify that remote paths are downloaded and the xml has been
# modified
mock_response = MagicMock()
mock_response.read = MagicMock(return_value='filecontent')
cache_dir = tempfile.mkdtemp()
with patch.dict(virt.__dict__, {'CACHE_DIR': cache_dir}):
with patch('salt.ext.six.moves.urllib.request.urlopen',
MagicMock(return_value=mock_response)):
with patch('salt.utils.files.fopen',
return_value=mock_response):
defineMock.reset_mock()
mock_run.reset_mock()
boot = {
'kernel':
'https://www.example.com/download/vmlinuz',
'initrd': '',
'cmdline':
'console=ttyS0 '
'ks=http://example.com/f8-i386/os/'
}
retval = virt.init('test remote vm boot params',
2,
1234,
nic=None,
disk=None,
seed=False,
start=False,
boot=boot)
definition = defineMock.call_args_list[0][0][0]
self.assertEqual(cache_dir in definition, True)
shutil.rmtree(cache_dir)
# Test case creating disks
defineMock.reset_mock()
mock_run.reset_mock()
virt.init('test vm',
2,
1234,
nic=None,
disk=None,
disks=[
{'name': 'system', 'size': 10240},
{'name': 'cddrive', 'device': 'cdrom', 'source_file': None, 'model': 'ide'}
],
seed=False,
start=False)
definition = ET.fromstring(defineMock.call_args_list[0][0][0])
disk_sources = [disk.find('source').get('file') if disk.find('source') is not None else None
for disk in definition.findall('./devices/disk')]
expected_disk_path = os.path.join(root_dir, 'test vm_system.qcow2')
self.assertEqual(disk_sources, [expected_disk_path, None])
self.assertEqual(mock_run.call_args[0][0],
'qemu-img create -f qcow2 "{0}" 10240M'.format(expected_disk_path))
self.assertEqual(mock_chmod.call_args[0][0], expected_disk_path)
def test_update(self):
'''
Test virt.update()
'''
root_dir = os.path.join(salt.syspaths.ROOT_DIR, 'srv', 'salt-images')
xml = '''
<domain type='kvm' id='7'>
<name>my vm</name>
<memory unit='KiB'>1048576</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<vcpu placement='auto'>1</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-2.6'>hvm</type>
</os>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='{0}{1}my vm_system.qcow2'/>
<backingStore/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='{0}{1}my vm_data.qcow2'/>
<backingStore/>
<target dev='vdb' bus='virtio'/>
<alias name='virtio-disk1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x1'/>
</disk>
<interface type='network'>
<mac address='52:54:00:39:02:b1'/>
<source network='default' bridge='virbr0'/>
<target dev='vnet0'/>
<model type='virtio'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:39:02:b2'/>
<source network='oldnet' bridge='virbr1'/>
<target dev='vnet1'/>
<model type='virtio'/>
<alias name='net1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x1'/>
</interface>
<graphics type='spice' port='5900' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
</graphics>
<video>
<model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1' primary='yes'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
</devices>
</domain>
'''.format(root_dir, os.sep)
domain_mock = self.set_mock_vm('my vm', xml)
domain_mock.OSType = MagicMock(return_value='hvm')
define_mock = MagicMock(return_value=True)
self.mock_conn.defineXML = define_mock
# Update vcpus case
setvcpus_mock = MagicMock(return_value=0)
domain_mock.setVcpusFlags = setvcpus_mock
self.assertEqual({
'definition': True,
'cpu': True,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', cpu=2))
setxml = ET.fromstring(define_mock.call_args[0][0])
self.assertEqual(setxml.find('vcpu').text, '2')
self.assertEqual(setvcpus_mock.call_args[0][0], 2)
boot = {
'kernel': '/root/f8-i386-vmlinuz',
'initrd': '/root/f8-i386-initrd',
'cmdline':
'console=ttyS0 ks=http://example.com/f8-i386/os/'
}
# Update with boot parameter case
self.assertEqual({
'definition': True,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', boot=boot))
# Update memory case
setmem_mock = MagicMock(return_value=0)
domain_mock.setMemoryFlags = setmem_mock
self.assertEqual({
'definition': True,
'mem': True,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', mem=2048))
setxml = ET.fromstring(define_mock.call_args[0][0])
self.assertEqual(setxml.find('memory').text, '2048')
self.assertEqual(setxml.find('memory').get('unit'), 'MiB')
self.assertEqual(setmem_mock.call_args[0][0], 2048 * 1024)
# Update disks case
devattach_mock = MagicMock(return_value=0)
devdetach_mock = MagicMock(return_value=0)
domain_mock.attachDevice = devattach_mock
domain_mock.detachDevice = devdetach_mock
mock_chmod = MagicMock()
mock_run = MagicMock()
with patch.dict(os.__dict__, {'chmod': mock_chmod, 'makedirs': MagicMock()}): # pylint: disable=no-member
with patch.dict(virt.__salt__, {'cmd.run': mock_run}): # pylint: disable=no-member
ret = virt.update('my vm', disk_profile='default', disks=[
{'name': 'cddrive', 'device': 'cdrom', 'source_file': None, 'model': 'ide'},
{'name': 'added', 'size': 2048}])
added_disk_path = os.path.join(
virt.__salt__['config.get']('virt:images'), 'my vm_added.qcow2') # pylint: disable=no-member
self.assertEqual(mock_run.call_args[0][0],
'qemu-img create -f qcow2 "{0}" 2048M'.format(added_disk_path))
self.assertEqual(mock_chmod.call_args[0][0], added_disk_path)
self.assertListEqual(
[None, os.path.join(root_dir, 'my vm_added.qcow2')],
[ET.fromstring(disk).find('source').get('file') if str(disk).find('<source') > -1 else None
for disk in ret['disk']['attached']])
self.assertListEqual(
[os.path.join(root_dir, 'my vm_data.qcow2')],
[ET.fromstring(disk).find('source').get('file') for disk in ret['disk']['detached']])
self.assertEqual(devattach_mock.call_count, 2)
devdetach_mock.assert_called_once()
# Update nics case
yaml_config = '''
virt:
nic:
myprofile:
- network: default
name: eth0
'''
mock_config = salt.utils.yaml.safe_load(yaml_config)
devattach_mock.reset_mock()
devdetach_mock.reset_mock()
with patch.dict(salt.modules.config.__opts__, mock_config): # pylint: disable=no-member
ret = virt.update('my vm', nic_profile='myprofile',
interfaces=[{'name': 'eth0', 'type': 'network', 'source': 'default',
'mac': '52:54:00:39:02:b1'},
{'name': 'eth1', 'type': 'network', 'source': 'newnet'}])
self.assertEqual(['newnet'],
[ET.fromstring(nic).find('source').get('network') for nic in ret['interface']['attached']])
self.assertEqual(['oldnet'],
[ET.fromstring(nic).find('source').get('network') for nic in ret['interface']['detached']])
devattach_mock.assert_called_once()
devdetach_mock.assert_called_once()
# Remove nics case
devattach_mock.reset_mock()
devdetach_mock.reset_mock()
ret = virt.update('my vm', nic_profile=None, interfaces=[])
self.assertEqual([], ret['interface']['attached'])
self.assertEqual(2, len(ret['interface']['detached']))
devattach_mock.assert_not_called()
devdetach_mock.assert_called()
# Remove disks case (yeah, it surely is silly)
devattach_mock.reset_mock()
devdetach_mock.reset_mock()
ret = virt.update('my vm', disk_profile=None, disks=[])
self.assertEqual([], ret['disk']['attached'])
self.assertEqual(2, len(ret['disk']['detached']))
devattach_mock.assert_not_called()
devdetach_mock.assert_called()
# Graphics change test case
self.assertEqual({
'definition': True,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', graphics={'type': 'vnc'}))
setxml = ET.fromstring(define_mock.call_args[0][0])
self.assertEqual('vnc', setxml.find('devices/graphics').get('type'))
# Update with no diff case
self.assertEqual({
'definition': False,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', cpu=1, mem=1024,
disk_profile='default', disks=[{'name': 'data', 'size': 2048}],
nic_profile='myprofile',
interfaces=[{'name': 'eth0', 'type': 'network', 'source': 'default',
'mac': '52:54:00:39:02:b1'},
{'name': 'eth1', 'type': 'network', 'source': 'oldnet',
'mac': '52:54:00:39:02:b2'}],
graphics={'type': 'spice',
'listen': {'type': 'address', 'address': '127.0.0.1'}}))
# Failed XML description update case
self.mock_conn.defineXML.side_effect = self.mock_libvirt.libvirtError("Test error")
setmem_mock.reset_mock()
with self.assertRaises(self.mock_libvirt.libvirtError):
virt.update('my vm', mem=2048)
# Failed single update failure case
self.mock_conn.defineXML = MagicMock(return_value=True)
setmem_mock.side_effect = self.mock_libvirt.libvirtError("Failed to live change memory")
self.assertEqual({
'definition': True,
'errors': ['Failed to live change memory'],
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', mem=2048))
# Failed multiple updates failure case
self.assertEqual({
'definition': True,
'errors': ['Failed to live change memory'],
'cpu': True,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}, virt.update('my vm', cpu=4, mem=2048))
def test_mixed_dict_and_list_as_profile_objects(self):
'''
Test virt._nic_profile with mixed dictionaries and lists as input.
'''
yaml_config = '''
virt:
nic:
new-listonly-profile:
- bridge: br0
name: eth0
- model: virtio
name: eth1
source: test_network
type: network
new-list-with-legacy-names:
- eth0:
bridge: br0
- eth1:
bridge: br1
model: virtio
non-default-legacy-profile:
eth0:
bridge: br0
eth1:
bridge: br1
model: virtio
'''
mock_config = salt.utils.yaml.safe_load(yaml_config)
with patch.dict(salt.modules.config.__opts__, mock_config): # pylint: disable=no-member
for name in six.iterkeys(mock_config['virt']['nic']):
profile = salt.modules.virt._nic_profile(name, 'kvm')
self.assertEqual(len(profile), 2)
interface_attrs = profile[0]
self.assertIn('source', interface_attrs)
self.assertIn('type', interface_attrs)
self.assertIn('name', interface_attrs)
self.assertIn('model', interface_attrs)
self.assertEqual(interface_attrs['model'], 'virtio')
self.assertIn('mac', interface_attrs)
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$',
interface_attrs['mac'], re.I))
def test_get_xml(self):
'''
Test virt.get_xml()
'''
xml = '''<domain type='kvm' id='7'>
<name>test-vm</name>
<devices>
<graphics type='vnc' port='5900' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
</devices>
</domain>
'''
domain = self.set_mock_vm("test-vm", xml)
self.assertEqual(xml, virt.get_xml('test-vm'))
self.assertEqual(xml, virt.get_xml(domain))
def test_parse_qemu_img_info(self):
'''
Make sure that qemu-img info output is properly parsed
'''
qemu_infos = '''[{
"snapshots": [
{
"vm-clock-nsec": 0,
"name": "first-snap",
"date-sec": 1528877587,
"date-nsec": 380589000,
"vm-clock-sec": 0,
"id": "1",
"vm-state-size": 1234
},
{
"vm-clock-nsec": 0,
"name": "second snap",
"date-sec": 1528877592,
"date-nsec": 933509000,
"vm-clock-sec": 0,
"id": "2",
"vm-state-size": 4567
}
],
"virtual-size": 25769803776,
"filename": "/disks/test.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 217088,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"full-backing-filename": "/disks/mybacking.qcow2",
"backing-filename": "mybacking.qcow2",
"dirty-flag": false
},
{
"virtual-size": 25769803776,
"filename": "/disks/mybacking.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 393744384,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"full-backing-filename": "/disks/root.qcow2",
"backing-filename": "root.qcow2",
"dirty-flag": false
},
{
"virtual-size": 25769803776,
"filename": "/disks/root.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 196872192,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"dirty-flag": false
}]'''
self.assertEqual(
{
'file': '/disks/test.qcow2',
'file format': 'qcow2',
'backing file': {
'file': '/disks/mybacking.qcow2',
'file format': 'qcow2',
'disk size': 393744384,
'virtual size': 25769803776,
'cluster size': 65536,
'backing file': {
'file': '/disks/root.qcow2',
'file format': 'qcow2',
'disk size': 196872192,
'virtual size': 25769803776,
'cluster size': 65536,
}
},
'disk size': 217088,
'virtual size': 25769803776,
'cluster size': 65536,
'snapshots': [
{
'id': '1',
'tag': 'first-snap',
'vmsize': 1234,
'date': datetime.datetime.fromtimestamp(
float("{}.{}".format(1528877587, 380589000))).isoformat(),
'vmclock': '00:00:00'
},
{
'id': '2',
'tag': 'second snap',
'vmsize': 4567,
'date': datetime.datetime.fromtimestamp(
float("{}.{}".format(1528877592, 933509000))).isoformat(),
'vmclock': '00:00:00'
}
],
}, virt._parse_qemu_img_info(qemu_infos))
@patch('salt.modules.virt.stop', return_value=True)
@patch('salt.modules.virt.undefine')
@patch('os.remove')
def test_purge_default(self, mock_remove, mock_undefine, mock_stop):
'''
Test virt.purge() with default parameters
'''
xml = '''<domain type='kvm' id='7'>
<name>test-vm</name>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/disks/test.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/disks/test-cdrom.iso'/>
<target dev='hda' bus='ide'/>
<readonly/>
</disk>
</devices>
</domain>
'''
self.set_mock_vm("test-vm", xml)
qemu_infos = '''[{
"virtual-size": 25769803776,
"filename": "/disks/test.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 217088,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"dirty-flag": false
}]'''
self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member
res = virt.purge('test-vm')
self.assertTrue(res)
mock_remove.assert_any_call('/disks/test.qcow2')
mock_remove.assert_any_call('/disks/test-cdrom.iso')
@patch('salt.modules.virt.stop', return_value=True)
@patch('salt.modules.virt.undefine')
@patch('os.remove')
def test_purge_noremovable(self, mock_remove, mock_undefine, mock_stop):
'''
Test virt.purge(removables=False)
'''
xml = '''<domain type='kvm' id='7'>
<name>test-vm</name>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/disks/test.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/disks/test-cdrom.iso'/>
<target dev='hda' bus='ide'/>
<readonly/>
</disk>
<disk type='file' device='floppy'>
<driver name='qemu' type='raw'/>
<source file='/disks/test-floppy.iso'/>
<target dev='hdb' bus='ide'/>
<readonly/>
</disk>
</devices>
</domain>
'''
self.set_mock_vm("test-vm", xml)
qemu_infos = '''[{
"virtual-size": 25769803776,
"filename": "/disks/test.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 217088,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"dirty-flag": false
}]'''
self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member
res = virt.purge('test-vm', removables=False)
self.assertTrue(res)
mock_remove.assert_called_once()
mock_remove.assert_any_call('/disks/test.qcow2')
def test_capabilities(self):
'''
Test the virt.capabilities parsing
'''
xml = '''
<capabilities>
<host>
<uuid>44454c4c-3400-105a-8033-b3c04f4b344a</uuid>
<cpu>
<arch>x86_64</arch>
<model>Nehalem</model>
<vendor>Intel</vendor>
<microcode version='25'/>
<topology sockets='1' cores='4' threads='2'/>
<feature name='vme'/>
<feature name='ds'/>
<feature name='acpi'/>
<pages unit='KiB' size='4'/>
<pages unit='KiB' size='2048'/>
</cpu>
<power_management>
<suspend_mem/>
<suspend_disk/>
<suspend_hybrid/>
</power_management>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
<uri_transport>rdma</uri_transport>
</uri_transports>
</migration_features>
<topology>
<cells num='1'>
<cell id='0'>
<memory unit='KiB'>12367120</memory>
<pages unit='KiB' size='4'>3091780</pages>
<pages unit='KiB' size='2048'>0</pages>
<distances>
<sibling id='0' value='10'/>
</distances>
<cpus num='8'>
<cpu id='0' socket_id='0' core_id='0' siblings='0,4'/>
<cpu id='1' socket_id='0' core_id='1' siblings='1,5'/>
<cpu id='2' socket_id='0' core_id='2' siblings='2,6'/>
<cpu id='3' socket_id='0' core_id='3' siblings='3,7'/>
<cpu id='4' socket_id='0' core_id='0' siblings='0,4'/>
<cpu id='5' socket_id='0' core_id='1' siblings='1,5'/>
<cpu id='6' socket_id='0' core_id='2' siblings='2,6'/>
<cpu id='7' socket_id='0' core_id='3' siblings='3,7'/>
</cpus>
</cell>
</cells>
</topology>
<cache>
<bank id='0' level='3' type='both' size='8' unit='MiB' cpus='0-7'/>
</cache>
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
<secmodel>
<model>dac</model>
<doi>0</doi>
<baselabel type='kvm'>+487:+486</baselabel>
<baselabel type='qemu'>+487:+486</baselabel>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-i386</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
<pae/>
<nonpae/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine canonical='pc-i440fx-2.6' maxCpus='255'>pc</machine>
<machine maxCpus='255'>pc-0.12</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>xen</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>xenpv</machine>
<domain type='xen'/>
</arch>
</guest>
</capabilities>
'''
self.mock_conn.getCapabilities.return_value = xml # pylint: disable=no-member
caps = virt.capabilities()
expected = {
'host': {
'uuid': '44454c4c-3400-105a-8033-b3c04f4b344a',
'cpu': {
'arch': 'x86_64',
'model': 'Nehalem',
'vendor': 'Intel',
'microcode': '25',
'sockets': 1,
'cores': 4,
'threads': 2,
'features': ['vme', 'ds', 'acpi'],
'pages': [{'size': '4 KiB'}, {'size': '2048 KiB'}]
},
'power_management': ['suspend_mem', 'suspend_disk', 'suspend_hybrid'],
'migration': {
'live': True,
'transports': ['tcp', 'rdma']
},
'topology': {
'cells': [
{
'id': 0,
'memory': '12367120 KiB',
'pages': [
{'size': '4 KiB', 'available': 3091780},
{'size': '2048 KiB', 'available': 0}
],
'distances': {
0: 10,
},
'cpus': [
{'id': 0, 'socket_id': 0, 'core_id': 0, 'siblings': '0,4'},
{'id': 1, 'socket_id': 0, 'core_id': 1, 'siblings': '1,5'},
{'id': 2, 'socket_id': 0, 'core_id': 2, 'siblings': '2,6'},
{'id': 3, 'socket_id': 0, 'core_id': 3, 'siblings': '3,7'},
{'id': 4, 'socket_id': 0, 'core_id': 0, 'siblings': '0,4'},
{'id': 5, 'socket_id': 0, 'core_id': 1, 'siblings': '1,5'},
{'id': 6, 'socket_id': 0, 'core_id': 2, 'siblings': '2,6'},
{'id': 7, 'socket_id': 0, 'core_id': 3, 'siblings': '3,7'}
]
}
]
},
'cache': {
'banks': [
{'id': 0, 'level': 3, 'type': 'both', 'size': '8 MiB', 'cpus': '0-7'}
]
},
'security': [
{'model': 'apparmor', 'doi': '0', 'baselabels': []},
{'model': 'dac', 'doi': '0', 'baselabels': [
{'type': 'kvm', 'label': '+487:+486'},
{'type': 'qemu', 'label': '+487:+486'}
]}
]
},
'guests': [
{
'os_type': 'hvm',
'arch': {
'name': 'i686',
'wordsize': 32,
'emulator': '/usr/bin/qemu-system-i386',
'machines': {
'pc-i440fx-2.6': {'maxcpus': 255, 'alternate_names': ['pc']},
'pc-0.12': {'maxcpus': 255, 'alternate_names': []}
},
'domains': {
'qemu': {
'emulator': None,
'machines': {}
},
'kvm': {
'emulator': '/usr/bin/qemu-kvm',
'machines': {
'pc-i440fx-2.6': {'maxcpus': 255, 'alternate_names': ['pc']},
'pc-0.12': {'maxcpus': 255, 'alternate_names': []}
}
}
}
},
'features': {
'cpuselection': {'default': True, 'toggle': False},
'deviceboot': {'default': True, 'toggle': False},
'disksnapshot': {'default': True, 'toggle': False},
'acpi': {'default': True, 'toggle': True},
'apic': {'default': True, 'toggle': False},
'pae': {'default': True, 'toggle': False},
'nonpae': {'default': True, 'toggle': False}
}
},
{
'os_type': 'hvm',
'arch': {
'name': 'x86_64',
'wordsize': 64,
'emulator': '/usr/bin/qemu-system-x86_64',
'machines': {
'pc-i440fx-2.6': {'maxcpus': 255, 'alternate_names': ['pc']},
'pc-0.12': {'maxcpus': 255, 'alternate_names': []}
},
'domains': {
'qemu': {
'emulator': None,
'machines': {}
},
'kvm': {
'emulator': '/usr/bin/qemu-kvm',
'machines': {
'pc-i440fx-2.6': {'maxcpus': 255, 'alternate_names': ['pc']},
'pc-0.12': {'maxcpus': 255, 'alternate_names': []}
}
}
}
},
'features': {
'cpuselection': {'default': True, 'toggle': False},
'deviceboot': {'default': True, 'toggle': False},
'disksnapshot': {'default': True, 'toggle': False},
'acpi': {'default': True, 'toggle': True},
'apic': {'default': True, 'toggle': False}
}
},
{
'os_type': 'xen',
'arch': {
'name': 'x86_64',
'wordsize': 64,
'emulator': '/usr/bin/qemu-system-x86_64',
'machines': {
'xenpv': {'alternate_names': []}
},
'domains': {
'xen': {
'emulator': None,
'machines': {}
}
}
}
}
]
}
self.assertEqual(expected, caps)
def test_network(self):
'''
Test virt._get_net_xml()
'''
xml_data = virt._gen_net_xml('network', 'main', 'bridge', 'openvswitch')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'network')
self.assertEqual(root.find('bridge').attrib['name'], 'main')
self.assertEqual(root.find('forward').attrib['mode'], 'bridge')
self.assertEqual(root.find('virtualport').attrib['type'], 'openvswitch')
def test_network_nat(self):
'''
Test virt._get_net_xml() in a nat setup
'''
xml_data = virt._gen_net_xml('network', 'main', 'nat', None, ip_configs=[
{
'cidr': '192.168.2.0/24',
'dhcp_ranges': [
{'start': '192.168.2.10', 'end': '192.168.2.25'},
{'start': '192.168.2.110', 'end': '192.168.2.125'},
]
}
])
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'network')
self.assertEqual(root.find('bridge').attrib['name'], 'main')
self.assertEqual(root.find('forward').attrib['mode'], 'nat')
self.assertEqual(root.find("./ip[@address='192.168.2.0']").attrib['prefix'], '24')
self.assertEqual(root.find("./ip[@address='192.168.2.0']").attrib['family'], 'ipv4')
self.assertEqual(
root.find("./ip[@address='192.168.2.0']/dhcp/range[@start='192.168.2.10']").attrib['end'],
'192.168.2.25')
self.assertEqual(
root.find("./ip[@address='192.168.2.0']/dhcp/range[@start='192.168.2.110']").attrib['end'],
'192.168.2.125')
def test_domain_capabilities(self):
'''
Test the virt.domain_capabilities parsing
'''
xml = '''
<domainCapabilities>
<path>/usr/bin/qemu-system-aarch64</path>
<domain>kvm</domain>
<machine>virt-2.12</machine>
<arch>aarch64</arch>
<vcpu max='255'/>
<iothreads supported='yes'/>
<os supported='yes'>
<loader supported='yes'>
<value>/usr/share/AAVMF/AAVMF_CODE.fd</value>
<value>/usr/share/AAVMF/AAVMF32_CODE.fd</value>
<value>/usr/share/OVMF/OVMF_CODE.fd</value>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='yes'/>
<mode name='host-model' supported='yes'>
<model fallback='forbid'>sample-cpu</model>
<vendor>ACME</vendor>
<feature policy='require' name='vme'/>
<feature policy='require' name='ss'/>
</mode>
<mode name='custom' supported='yes'>
<model usable='unknown'>pxa262</model>
<model usable='yes'>pxa270-a0</model>
<model usable='no'>arm1136</model>
</mode>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'>
<value>default</value>
<value>kvm</value>
<value>vfio</value>
</enum>
</hostdev>
</devices>
<features>
<gic supported='yes'>
<enum name='version'>
<value>3</value>
</enum>
</gic>
<vmcoreinfo supported='yes'/>
</features>
</domainCapabilities>
'''
self.mock_conn.getDomainCapabilities.return_value = xml # pylint: disable=no-member
caps = virt.domain_capabilities()
expected = {
'emulator': '/usr/bin/qemu-system-aarch64',
'domain': 'kvm',
'machine': 'virt-2.12',
'arch': 'aarch64',
'max_vcpus': 255,
'iothreads': True,
'os': {
'loader': {
'type': ['rom', 'pflash'],
'readonly': ['yes', 'no'],
'values': [
'/usr/share/AAVMF/AAVMF_CODE.fd',
'/usr/share/AAVMF/AAVMF32_CODE.fd',
'/usr/share/OVMF/OVMF_CODE.fd'
]
}
},
'cpu': {
'host-passthrough': True,
'host-model': {
'model': {
'name': 'sample-cpu',
'fallback': 'forbid'
},
'vendor': 'ACME',
'features': {
'vme': 'require',
'ss': 'require'
}
},
'custom': {
'models': {
'pxa262': 'unknown',
'pxa270-a0': 'yes',
'arm1136': 'no'
}
}
},
'devices': {
'disk': {
'diskDevice': ['disk', 'cdrom', 'floppy', 'lun'],
'bus': ['fdc', 'scsi', 'virtio', 'usb', 'sata'],
},
'graphics': {
'type': ['sdl', 'vnc']
},
'video': {
'modelType': ['vga', 'virtio']
},
'hostdev': {
'mode': ['subsystem'],
'startupPolicy': ['default', 'mandatory', 'requisite', 'optional'],
'subsysType': ['usb', 'pci', 'scsi'],
'capsType': [],
'pciBackend': ['default', 'kvm', 'vfio']
}
},
'features': {
'gic': {
'version': ['3']
},
'vmcoreinfo': {}
}
}
self.assertEqual(expected, caps)
def test_network_tag(self):
'''
Test virt._get_net_xml() with VLAN tag
'''
xml_data = virt._gen_net_xml('network', 'main', 'bridge', 'openvswitch', 1001)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'network')
self.assertEqual(root.find('bridge').attrib['name'], 'main')
self.assertEqual(root.find('forward').attrib['mode'], 'bridge')
self.assertEqual(root.find('virtualport').attrib['type'], 'openvswitch')
self.assertEqual(root.find('vlan/tag').attrib['id'], '1001')
def test_list_networks(self):
'''
Test virt.list_networks()
'''
names = ['net1', 'default', 'net2']
net_mocks = [MagicMock(), MagicMock(), MagicMock()]
for i, value in enumerate(names):
net_mocks[i].name.return_value = value
self.mock_conn.listAllNetworks.return_value = net_mocks # pylint: disable=no-member
actual = virt.list_networks()
self.assertEqual(names, actual)
def test_network_info(self):
'''
Test virt.network_info()
'''
self.mock_libvirt.VIR_IP_ADDR_TYPE_IPV4 = 0
self.mock_libvirt.VIR_IP_ADDR_TYPE_IPV6 = 1
net_mock = MagicMock()
# pylint: disable=no-member
net_mock.name.return_value = 'foo'
net_mock.UUIDString.return_value = 'some-uuid'
net_mock.bridgeName.return_value = 'br0'
net_mock.autostart.return_value = True
net_mock.isActive.return_value = False
net_mock.isPersistent.return_value = True
net_mock.DHCPLeases.return_value = [
{
'iface': 'virbr0',
'expirytime': 1527757552,
'type': 0,
'mac': '52:54:00:01:71:bd',
'ipaddr': '192.168.122.45',
'prefix': 24,
'hostname': 'py3-test',
'clientid': '01:52:54:00:01:71:bd',
'iaid': None
}
]
self.mock_conn.listAllNetworks.return_value = [net_mock]
# pylint: enable=no-member
net = virt.network_info('foo')
self.assertEqual({'foo': {
'uuid': 'some-uuid',
'bridge': 'br0',
'autostart': True,
'active': False,
'persistent': True,
'leases': [
{
'iface': 'virbr0',
'expirytime': 1527757552,
'type': 'ipv4',
'mac': '52:54:00:01:71:bd',
'ipaddr': '192.168.122.45',
'prefix': 24,
'hostname': 'py3-test',
'clientid': '01:52:54:00:01:71:bd',
'iaid': None
}
]}}, net)
def test_network_info_all(self):
'''
Test virt.network_info()
'''
self.mock_libvirt.VIR_IP_ADDR_TYPE_IPV4 = 0
self.mock_libvirt.VIR_IP_ADDR_TYPE_IPV6 = 1
net_mocks = []
# pylint: disable=no-member
for i in range(2):
net_mock = MagicMock()
net_mock.name.return_value = 'net{0}'.format(i)
net_mock.UUIDString.return_value = 'some-uuid'
net_mock.bridgeName.return_value = 'br{0}'.format(i)
net_mock.autostart.return_value = True
net_mock.isActive.return_value = False
net_mock.isPersistent.return_value = True
net_mock.DHCPLeases.return_value = []
net_mocks.append(net_mock)
self.mock_conn.listAllNetworks.return_value = net_mocks
# pylint: enable=no-member
net = virt.network_info()
self.assertEqual({
'net0':
{
'uuid': 'some-uuid',
'bridge': 'br0',
'autostart': True,
'active': False,
'persistent': True,
'leases': []
}, 'net1':
{
'uuid': 'some-uuid',
'bridge': 'br1',
'autostart': True,
'active': False,
'persistent': True,
'leases': []
}
}, net)
def test_network_info_notfound(self):
'''
Test virt.network_info() when the network can't be found
'''
# pylint: disable=no-member
self.mock_conn.listAllNetworks.return_value = []
# pylint: enable=no-member
net = virt.network_info('foo')
self.assertEqual({}, net)
def test_pool(self):
'''
Test virt._gen_pool_xml()
'''
xml_data = virt._gen_pool_xml('pool', 'logical', '/dev/base')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'logical')
self.assertEqual(root.find('target/path').text, '/dev/base')
def test_pool_with_source(self):
'''
Test virt._gen_pool_xml() with a source device
'''
xml_data = virt._gen_pool_xml('pool', 'logical', '/dev/base', source_devices=[{'path': '/dev/sda'}])
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'logical')
self.assertEqual(root.find('target/path').text, '/dev/base')
self.assertEqual(root.find('source/device').attrib['path'], '/dev/sda')
def test_pool_with_scsi(self):
'''
Test virt._gen_pool_xml() with a SCSI source
'''
xml_data = virt._gen_pool_xml('pool',
'scsi',
'/dev/disk/by-path',
source_devices=[{'path': '/dev/sda'}],
source_adapter={
'type': 'scsi_host',
'parent_address': {
'unique_id': 5,
'address': {
'domain': '0x0000',
'bus': '0x00',
'slot': '0x1f',
'function': '0x2'
}
}
},
source_name='srcname')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'scsi')
self.assertEqual(root.find('target/path').text, '/dev/disk/by-path')
self.assertEqual(root.find('source/device'), None)
self.assertEqual(root.find('source/name'), None)
self.assertEqual(root.find('source/adapter').attrib['type'], 'scsi_host')
self.assertEqual(root.find('source/adapter/parentaddr').attrib['unique_id'], '5')
self.assertEqual(root.find('source/adapter/parentaddr/address').attrib['domain'], '0x0000')
self.assertEqual(root.find('source/adapter/parentaddr/address').attrib['bus'], '0x00')
self.assertEqual(root.find('source/adapter/parentaddr/address').attrib['slot'], '0x1f')
self.assertEqual(root.find('source/adapter/parentaddr/address').attrib['function'], '0x2')
def test_pool_with_rbd(self):
'''
Test virt._gen_pool_xml() with an RBD source
'''
xml_data = virt._gen_pool_xml('pool',
'rbd',
source_devices=[{'path': '/dev/sda'}],
source_hosts=['1.2.3.4', 'my.ceph.monitor:69'],
source_auth={
'type': 'ceph',
'username': 'admin',
'secret': {
'type': 'uuid',
'value': 'someuuid'
}
},
source_name='srcname',
source_adapter={'type': 'scsi_host', 'name': 'host0'},
source_dir='/some/dir',
source_format='fmt')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'rbd')
self.assertEqual(root.find('target'), None)
self.assertEqual(root.find('source/device'), None)
self.assertEqual(root.find('source/name').text, 'srcname')
self.assertEqual(root.find('source/adapter'), None)
self.assertEqual(root.find('source/dir'), None)
self.assertEqual(root.find('source/format'), None)
self.assertEqual(root.findall('source/host')[0].attrib['name'], '1.2.3.4')
self.assertTrue('port' not in root.findall('source/host')[0].attrib)
self.assertEqual(root.findall('source/host')[1].attrib['name'], 'my.ceph.monitor')
self.assertEqual(root.findall('source/host')[1].attrib['port'], '69')
self.assertEqual(root.find('source/auth').attrib['type'], 'ceph')
self.assertEqual(root.find('source/auth').attrib['username'], 'admin')
self.assertEqual(root.find('source/auth/secret').attrib['uuid'], 'someuuid')
def test_pool_with_netfs(self):
'''
Test virt._gen_pool_xml() with a netfs source
'''
xml_data = virt._gen_pool_xml('pool',
'netfs',
target='/path/to/target',
permissions={
'mode': '0770',
'owner': 1000,
'group': 100,
'label': 'seclabel'
},
source_devices=[{'path': '/dev/sda'}],
source_hosts=['nfs.host'],
source_name='srcname',
source_adapter={'type': 'scsi_host', 'name': 'host0'},
source_dir='/some/dir',
source_format='nfs')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'netfs')
self.assertEqual(root.find('target/path').text, '/path/to/target')
self.assertEqual(root.find('target/permissions/mode').text, '0770')
self.assertEqual(root.find('target/permissions/owner').text, '1000')
self.assertEqual(root.find('target/permissions/group').text, '100')
self.assertEqual(root.find('target/permissions/label').text, 'seclabel')
self.assertEqual(root.find('source/device'), None)
self.assertEqual(root.find('source/name'), None)
self.assertEqual(root.find('source/adapter'), None)
self.assertEqual(root.find('source/dir').attrib['path'], '/some/dir')
self.assertEqual(root.find('source/format').attrib['type'], 'nfs')
self.assertEqual(root.find('source/host').attrib['name'], 'nfs.host')
self.assertEqual(root.find('source/auth'), None)
def test_pool_with_iscsi_direct(self):
'''
Test virt._gen_pool_xml() with a iscsi-direct source
'''
xml_data = virt._gen_pool_xml('pool',
'iscsi-direct',
source_hosts=['iscsi.example.com'],
source_devices=[{'path': 'iqn.2013-06.com.example:iscsi-pool'}],
source_initiator='iqn.2013-06.com.example:iscsi-initiator')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'pool')
self.assertEqual(root.attrib['type'], 'iscsi-direct')
self.assertEqual(root.find('target'), None)
self.assertEqual(root.find('source/device').attrib['path'], 'iqn.2013-06.com.example:iscsi-pool')
self.assertEqual(root.findall('source/host')[0].attrib['name'], 'iscsi.example.com')
self.assertEqual(root.find('source/initiator/iqn').attrib['name'], 'iqn.2013-06.com.example:iscsi-initiator')
def test_pool_define(self):
'''
Test virt.pool_define()
'''
mock_pool = MagicMock()
mock_secret = MagicMock()
mock_secret_define = MagicMock(return_value=mock_secret)
self.mock_conn.secretDefineXML = mock_secret_define
self.mock_conn.storagePoolCreateXML = MagicMock(return_value=mock_pool)
self.mock_conn.storagePoolDefineXML = MagicMock(return_value=mock_pool)
mocks = [mock_pool, mock_secret, mock_secret_define, self.mock_conn.storagePoolCreateXML,
self.mock_conn.secretDefineXML, self.mock_conn.storagePoolDefineXML]
# Test case with already defined secret and permanent pool
self.assertTrue(virt.pool_define('default',
'rbd',
source_hosts=['one.example.com', 'two.example.com'],
source_name='rbdvol',
source_auth={
'type': 'ceph',
'username': 'admin',
'secret': {
'type': 'uuid',
'value': 'someuuid'
}
}))
self.mock_conn.storagePoolDefineXML.assert_called_once()
self.mock_conn.storagePoolCreateXML.assert_not_called()
mock_pool.create.assert_called_once()
mock_secret_define.assert_not_called()
# Test case with Ceph secret to be defined and transient pool
for mock in mocks:
mock.reset_mock()
self.assertTrue(virt.pool_define('default',
'rbd',
transient=True,
source_hosts=['one.example.com', 'two.example.com'],
source_name='rbdvol',
source_auth={
'username': 'admin',
'password': 'c2VjcmV0'
}))
self.mock_conn.storagePoolDefineXML.assert_not_called()
pool_xml = self.mock_conn.storagePoolCreateXML.call_args[0][0]
root = ET.fromstring(pool_xml)
self.assertEqual(root.find('source/auth').attrib['type'], 'ceph')
self.assertEqual(root.find('source/auth').attrib['username'], 'admin')
self.assertEqual(root.find('source/auth/secret').attrib['usage'], 'pool_default')
mock_pool.create.assert_not_called()
mock_secret.setValue.assert_called_once_with(b'secret')
secret_xml = mock_secret_define.call_args[0][0]
root = ET.fromstring(secret_xml)
self.assertEqual(root.find('usage/name').text, 'pool_default')
self.assertEqual(root.find('usage').attrib['type'], 'ceph')
self.assertEqual(root.attrib['private'], 'yes')
self.assertEqual(root.find('description').text, 'Passphrase for default pool created by Salt')
# Test case with iscsi secret not starting
for mock in mocks:
mock.reset_mock()
self.assertTrue(virt.pool_define('default',
'iscsi',
target='/dev/disk/by-path',
source_hosts=['iscsi.example.com'],
source_devices=[{'path': 'iqn.2013-06.com.example:iscsi-pool'}],
source_auth={
'username': 'admin',
'password': 'secret'
},
start=False))
self.mock_conn.storagePoolCreateXML.assert_not_called()
pool_xml = self.mock_conn.storagePoolDefineXML.call_args[0][0]
root = ET.fromstring(pool_xml)
self.assertEqual(root.find('source/auth').attrib['type'], 'chap')
self.assertEqual(root.find('source/auth').attrib['username'], 'admin')
self.assertEqual(root.find('source/auth/secret').attrib['usage'], 'pool_default')
mock_pool.create.assert_not_called()
mock_secret.setValue.assert_called_once_with('secret')
secret_xml = mock_secret_define.call_args[0][0]
root = ET.fromstring(secret_xml)
self.assertEqual(root.find('usage/target').text, 'pool_default')
self.assertEqual(root.find('usage').attrib['type'], 'iscsi')
self.assertEqual(root.attrib['private'], 'yes')
self.assertEqual(root.find('description').text, 'Passphrase for default pool created by Salt')
def test_list_pools(self):
'''
Test virt.list_pools()
'''
names = ['pool1', 'default', 'pool2']
pool_mocks = [MagicMock(), MagicMock(), MagicMock()]
for i, value in enumerate(names):
pool_mocks[i].name.return_value = value
self.mock_conn.listAllStoragePools.return_value = pool_mocks # pylint: disable=no-member
actual = virt.list_pools()
self.assertEqual(names, actual)
def test_pool_info(self):
'''
Test virt.pool_info()
'''
# pylint: disable=no-member
pool_mock = MagicMock()
pool_mock.name.return_value = 'foo'
pool_mock.UUIDString.return_value = 'some-uuid'
pool_mock.info.return_value = [0, 1234, 5678, 123]
pool_mock.autostart.return_value = True
pool_mock.isPersistent.return_value = True
pool_mock.XMLDesc.return_value = '''<pool type='dir'>
<name>default</name>
<uuid>d92682d0-33cf-4e10-9837-a216c463e158</uuid>
<capacity unit='bytes'>854374301696</capacity>
<allocation unit='bytes'>596275986432</allocation>
<available unit='bytes'>258098315264</available>
<source>
</source>
<target>
<path>/srv/vms</path>
<permissions>
<mode>0755</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>'''
self.mock_conn.listAllStoragePools.return_value = [pool_mock]
# pylint: enable=no-member
pool = virt.pool_info('foo')
self.assertEqual({'foo': {
'uuid': 'some-uuid',
'state': 'inactive',
'capacity': 1234,
'allocation': 5678,
'free': 123,
'autostart': True,
'persistent': True,
'type': 'dir',
'target_path': '/srv/vms'}}, pool)
def test_pool_info_notarget(self):
'''
Test virt.pool_info()
'''
# pylint: disable=no-member
pool_mock = MagicMock()
pool_mock.name.return_value = 'ceph'
pool_mock.UUIDString.return_value = 'some-uuid'
pool_mock.info.return_value = [0, 0, 0, 0]
pool_mock.autostart.return_value = True
pool_mock.isPersistent.return_value = True
pool_mock.XMLDesc.return_value = '''<pool type='rbd'>
<name>ceph</name>
<uuid>some-uuid</uuid>
<capacity unit='bytes'>0</capacity>
<allocation unit='bytes'>0</allocation>
<available unit='bytes'>0</available>
<source>
<host name='localhost' port='6789'/>
<host name='localhost' port='6790'/>
<name>rbd</name>
<auth type='ceph' username='admin'>
<secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
</auth>
</source>
</pool>'''
self.mock_conn.listAllStoragePools.return_value = [pool_mock]
# pylint: enable=no-member
pool = virt.pool_info('ceph')
self.assertEqual({'ceph': {
'uuid': 'some-uuid',
'state': 'inactive',
'capacity': 0,
'allocation': 0,
'free': 0,
'autostart': True,
'persistent': True,
'type': 'rbd',
'target_path': None}}, pool)
def test_pool_info_notfound(self):
'''
Test virt.pool_info() when the pool can't be found
'''
# pylint: disable=no-member
self.mock_conn.listAllStoragePools.return_value = []
# pylint: enable=no-member
pool = virt.pool_info('foo')
self.assertEqual({}, pool)
def test_pool_info_all(self):
'''
Test virt.pool_info()
'''
# pylint: disable=no-member
pool_mocks = []
for i in range(2):
pool_mock = MagicMock()
pool_mock.name.return_value = 'pool{0}'.format(i)
pool_mock.UUIDString.return_value = 'some-uuid-{0}'.format(i)
pool_mock.info.return_value = [0, 1234, 5678, 123]
pool_mock.autostart.return_value = True
pool_mock.isPersistent.return_value = True
pool_mock.XMLDesc.return_value = '''<pool type='dir'>
<name>default</name>
<uuid>d92682d0-33cf-4e10-9837-a216c463e158</uuid>
<capacity unit='bytes'>854374301696</capacity>
<allocation unit='bytes'>596275986432</allocation>
<available unit='bytes'>258098315264</available>
<source>
</source>
<target>
<path>/srv/vms</path>
<permissions>
<mode>0755</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>'''
pool_mocks.append(pool_mock)
self.mock_conn.listAllStoragePools.return_value = pool_mocks
# pylint: enable=no-member
pool = virt.pool_info()
self.assertEqual({
'pool0':
{
'uuid': 'some-uuid-0',
'state': 'inactive',
'capacity': 1234,
'allocation': 5678,
'free': 123,
'autostart': True,
'persistent': True,
'type': 'dir',
'target_path': '/srv/vms'
}, 'pool1': {
'uuid': 'some-uuid-1',
'state': 'inactive',
'capacity': 1234,
'allocation': 5678,
'free': 123,
'autostart': True,
'persistent': True,
'type': 'dir',
'target_path': '/srv/vms'
}
}, pool)
def test_pool_list_volumes(self):
'''
Test virt.pool_list_volumes
'''
names = ['volume1', 'volume2']
mock_pool = MagicMock()
# pylint: disable=no-member
mock_pool.listVolumes.return_value = names
self.mock_conn.storagePoolLookupByName.return_value = mock_pool
# pylint: enable=no-member
self.assertEqual(names, virt.pool_list_volumes('default'))
@patch('salt.modules.virt._is_kvm_hyper', return_value=True)
@patch('salt.modules.virt._is_xen_hyper', return_value=False)
def test_get_hypervisor(self, isxen_mock, iskvm_mock):
'''
test the virt.get_hypervisor() function
'''
self.assertEqual('kvm', virt.get_hypervisor())
iskvm_mock.return_value = False
self.assertIsNone(virt.get_hypervisor())
isxen_mock.return_value = True
self.assertEqual('xen', virt.get_hypervisor())
def test_pool_delete(self):
'''
Test virt.pool_delete function
'''
mock_pool = MagicMock()
mock_pool.delete = MagicMock(return_value=0)
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool)
res = virt.pool_delete('test-pool')
self.assertTrue(res)
self.mock_conn.storagePoolLookupByName.assert_called_once_with('test-pool')
# Shouldn't be called with another parameter so far since those are not implemented
# and thus throwing exceptions.
mock_pool.delete.assert_called_once_with(self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL)
def test_full_info(self):
'''
Test virt.full_info
'''
xml = '''<domain type='kvm' id='7'>
<uuid>28deee33-4859-4f23-891c-ee239cffec94</uuid>
<name>test-vm</name>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/disks/test.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/disks/test-cdrom.iso'/>
<target dev='hda' bus='ide'/>
<readonly/>
</disk>
<interface type='bridge'>
<mac address='ac:de:48:b6:8b:59'/>
<source bridge='br0'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<graphics type='vnc' port='5900' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
</devices>
</domain>
'''
self.set_mock_vm("test-vm", xml)
qemu_infos = '''[{
"virtual-size": 25769803776,
"filename": "/disks/test.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 217088,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"full-backing-filename": "/disks/mybacking.qcow2",
"backing-filename": "mybacking.qcow2",
"dirty-flag": false
},
{
"virtual-size": 25769803776,
"filename": "/disks/mybacking.qcow2",
"cluster-size": 65536,
"format": "qcow2",
"actual-size": 393744384,
"format-specific": {
"type": "qcow2",
"data": {
"compat": "1.1",
"lazy-refcounts": false,
"refcount-bits": 16,
"corrupt": false
}
},
"dirty-flag": false
}]'''
self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member
self.mock_conn.getInfo = MagicMock(return_value=['x86_64', 4096, 8, 2712, 1, 2, 4, 2])
actual = virt.full_info()
# Check that qemu-img was called with the proper parameters
qemu_img_call = [call for call in self.mock_subprocess.Popen.call_args_list if 'qemu-img' in call[0][0]][0]
self.assertIn('info', qemu_img_call[0][0])
self.assertIn('-U', qemu_img_call[0][0])
# Test the hypervisor infos
self.assertEqual(2816, actual['freemem'])
self.assertEqual(6, actual['freecpu'])
self.assertEqual(4, actual['node_info']['cpucores'])
self.assertEqual(2712, actual['node_info']['cpumhz'])
self.assertEqual('x86_64', actual['node_info']['cpumodel'])
self.assertEqual(8, actual['node_info']['cpus'])
self.assertEqual(2, actual['node_info']['cputhreads'])
self.assertEqual(1, actual['node_info']['numanodes'])
self.assertEqual(4096, actual['node_info']['phymemory'])
self.assertEqual(2, actual['node_info']['sockets'])
# Test the vm_info output:
self.assertEqual(2, actual['vm_info']['test-vm']['cpu'])
self.assertEqual(1234, actual['vm_info']['test-vm']['cputime'])
self.assertEqual(1024 * 1024, actual['vm_info']['test-vm']['mem'])
self.assertEqual(2048 * 1024, actual['vm_info']['test-vm']['maxMem'])
self.assertEqual('shutdown', actual['vm_info']['test-vm']['state'])
self.assertEqual('28deee33-4859-4f23-891c-ee239cffec94', actual['vm_info']['test-vm']['uuid'])
self.assertEqual('destroy', actual['vm_info']['test-vm']['on_crash'])
self.assertEqual('restart', actual['vm_info']['test-vm']['on_reboot'])
self.assertEqual('destroy', actual['vm_info']['test-vm']['on_poweroff'])
# Test the nics
nic = actual['vm_info']['test-vm']['nics']['ac:de:48:b6:8b:59']
self.assertEqual('bridge', nic['type'])
self.assertEqual('ac:de:48:b6:8b:59', nic['mac'])
# Test the disks
disks = actual['vm_info']['test-vm']['disks']
disk = disks.get('vda')
self.assertEqual('/disks/test.qcow2', disk['file'])
self.assertEqual('disk', disk['type'])
self.assertEqual('/disks/mybacking.qcow2', disk['backing file']['file'])
cdrom = disks.get('hda')
self.assertEqual('/disks/test-cdrom.iso', cdrom['file'])
self.assertEqual('cdrom', cdrom['type'])
self.assertFalse('backing file' in cdrom.keys())
# Test the graphics
graphics = actual['vm_info']['test-vm']['graphics']
self.assertEqual('vnc', graphics['type'])
self.assertEqual('5900', graphics['port'])
self.assertEqual('0.0.0.0', graphics['listen'])
def test_pool_update(self):
'''
Test the pool_update function
'''
current_xml = '''<pool type='dir'>
<name>default</name>
<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>
<capacity unit='bytes'>1999421108224</capacity>
<allocation unit='bytes'>713207042048</allocation>
<available unit='bytes'>1286214066176</available>
<source>
</source>
<target>
<path>/path/to/pool</path>
<permissions>
<mode>0775</mode>
<owner>0</owner>
<group>100</group>
</permissions>
</target>
</pool>'''
expected_xml = '<pool type="netfs">' \
'<name>default</name>' \
'<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>' \
'<capacity unit="bytes">1999421108224</capacity>' \
'<allocation unit="bytes">713207042048</allocation>' \
'<available unit="bytes">1286214066176</available>' \
'<target>' \
'<path>/mnt/cifs</path>' \
'<permissions>' \
'<mode>0774</mode>' \
'<owner>1234</owner>' \
'<group>123</group>' \
'</permissions>' \
'</target>' \
'<source>' \
'<dir path="samba_share" />' \
'<host name="one.example.com" />' \
'<host name="two.example.com" />' \
'<format type="cifs" />' \
'</source>' \
'</pool>'
mocked_pool = MagicMock()
mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mocked_pool)
self.mock_conn.storagePoolDefineXML = MagicMock()
self.assertTrue(
virt.pool_update('default',
'netfs',
target='/mnt/cifs',
permissions={'mode': '0774', 'owner': '1234', 'group': '123'},
source_format='cifs',
source_dir='samba_share',
source_hosts=['one.example.com', 'two.example.com']))
self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
def test_pool_update_nochange(self):
'''
Test the pool_update function when no change is needed
'''
current_xml = '''<pool type='dir'>
<name>default</name>
<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>
<capacity unit='bytes'>1999421108224</capacity>
<allocation unit='bytes'>713207042048</allocation>
<available unit='bytes'>1286214066176</available>
<source>
</source>
<target>
<path>/path/to/pool</path>
<permissions>
<mode>0775</mode>
<owner>0</owner>
<group>100</group>
</permissions>
</target>
</pool>'''
mocked_pool = MagicMock()
mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mocked_pool)
self.mock_conn.storagePoolDefineXML = MagicMock()
self.assertFalse(
virt.pool_update('default',
'dir',
target='/path/to/pool',
permissions={'mode': '0775', 'owner': '0', 'group': '100'},
test=True))
self.mock_conn.storagePoolDefineXML.assert_not_called()
def test_pool_update_password(self):
'''
Test the pool_update function, where the password only is changed
'''
current_xml = '''<pool type='rbd'>
<name>default</name>
<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>
<capacity unit='bytes'>1999421108224</capacity>
<allocation unit='bytes'>713207042048</allocation>
<available unit='bytes'>1286214066176</available>
<source>
<name>iscsi-images</name>
<host name='ses4.tf.local'/>
<host name='ses5.tf.local'/>
<auth username='libvirt' type='ceph'>
<secret uuid='14e9a0f1-8fbf-4097-b816-5b094c182212'/>
</auth>
</source>
</pool>'''
expected_xml = '<pool type="rbd">' \
'<name>default</name>' \
'<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>' \
'<capacity unit="bytes">1999421108224</capacity>' \
'<allocation unit="bytes">713207042048</allocation>' \
'<available unit="bytes">1286214066176</available>' \
'<source>' \
'<host name="ses4.tf.local" />' \
'<host name="ses5.tf.local" />' \
'<auth type="ceph" username="libvirt">' \
'<secret uuid="14e9a0f1-8fbf-4097-b816-5b094c182212" />' \
'</auth>' \
'<name>iscsi-images</name>' \
'</source>' \
'</pool>'
mock_secret = MagicMock()
self.mock_conn.secretLookupByUUIDString = MagicMock(return_value=mock_secret)
mocked_pool = MagicMock()
mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mocked_pool)
self.mock_conn.storagePoolDefineXML = MagicMock()
self.assertTrue(
virt.pool_update('default',
'rbd',
source_name='iscsi-images',
source_hosts=['ses4.tf.local', 'ses5.tf.local'],
source_auth={'username': 'libvirt',
'password': 'c2VjcmV0'}))
self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
mock_secret.setValue.assert_called_once_with(b'secret')
def test_pool_update_password_create(self):
'''
Test the pool_update function, where the password only is changed
'''
current_xml = '''<pool type='rbd'>
<name>default</name>
<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>
<capacity unit='bytes'>1999421108224</capacity>
<allocation unit='bytes'>713207042048</allocation>
<available unit='bytes'>1286214066176</available>
<source>
<name>iscsi-images</name>
<host name='ses4.tf.local'/>
<host name='ses5.tf.local'/>
</source>
</pool>'''
expected_xml = '<pool type="rbd">' \
'<name>default</name>' \
'<uuid>20fbe05c-ab40-418a-9afa-136d512f0ede</uuid>' \
'<capacity unit="bytes">1999421108224</capacity>' \
'<allocation unit="bytes">713207042048</allocation>' \
'<available unit="bytes">1286214066176</available>' \
'<source>' \
'<host name="ses4.tf.local" />' \
'<host name="ses5.tf.local" />' \
'<auth type="ceph" username="libvirt">' \
'<secret usage="pool_default" />' \
'</auth>' \
'<name>iscsi-images</name>' \
'</source>' \
'</pool>'
mock_secret = MagicMock()
self.mock_conn.secretDefineXML = MagicMock(return_value=mock_secret)
mocked_pool = MagicMock()
mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mocked_pool)
self.mock_conn.storagePoolDefineXML = MagicMock()
self.assertTrue(
virt.pool_update('default',
'rbd',
source_name='iscsi-images',
source_hosts=['ses4.tf.local', 'ses5.tf.local'],
source_auth={'username': 'libvirt',
'password': 'c2VjcmV0'}))
self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
mock_secret.setValue.assert_called_once_with(b'secret')
def test_volume_infos(self):
'''
Test virt.volume_infos
'''
vms_disks = [
'''
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/path/to/vol0.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
''',
'''
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/path/to/vol3.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
''',
'''
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/path/to/vol2.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
'''
]
mock_vms = []
for idx, disk in enumerate(vms_disks):
vm = MagicMock()
# pylint: disable=no-member
vm.name.return_value = 'vm{0}'.format(idx)
vm.XMLDesc.return_value = '''
<domain type='kvm' id='1'>
<name>vm{0}</name>
<devices>{1}</devices>
</domain>
'''.format(idx, disk)
# pylint: enable=no-member
mock_vms.append(vm)
mock_pool_data = [
{
'name': 'pool0',
'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
'volumes': [
{
'key': '/key/of/vol0',
'name': 'vol0',
'path': '/path/to/vol0.qcow2',
'info': [0, 123456789, 123456],
'backingStore': None
}
]
},
{
'name': 'pool1',
'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
'volumes': [
{
'key': '/key/of/vol0bad',
'name': 'vol0bad',
'path': '/path/to/vol0bad.qcow2',
'info': None,
'backingStore': None
},
{
'key': '/key/of/vol1',
'name': 'vol1',
'path': '/path/to/vol1.qcow2',
'info': [0, 12345, 1234],
'backingStore': None
},
{
'key': '/key/of/vol2',
'name': 'vol2',
'path': '/path/to/vol2.qcow2',
'info': [0, 12345, 1234],
'backingStore': '/path/to/vol0.qcow2'
},
],
}
]
mock_pools = []
for pool_data in mock_pool_data:
mock_pool = MagicMock()
mock_pool.name.return_value = pool_data['name'] # pylint: disable=no-member
mock_pool.info.return_value = [pool_data['state']]
mock_volumes = []
for vol_data in pool_data['volumes']:
mock_volume = MagicMock()
# pylint: disable=no-member
mock_volume.name.return_value = vol_data['name']
mock_volume.key.return_value = vol_data['key']
mock_volume.path.return_value = '/path/to/{0}.qcow2'.format(vol_data['name'])
if vol_data['info']:
mock_volume.info.return_value = vol_data['info']
backing_store = '''
<backingStore>
<format>qcow2</format>
<path>{0}</path>
</backingStore>
'''.format(vol_data['backingStore']) if vol_data['backingStore'] else '<backingStore/>'
mock_volume.XMLDesc.return_value = '''
<volume type='file'>
<name>{0}</name>
<target>
<format>qcow2</format>
<path>/path/to/{0}.qcow2</path>
</target>
{1}
</volume>
'''.format(vol_data['name'], backing_store)
else:
mock_volume.info.side_effect = self.mock_libvirt.libvirtError('No such volume')
mock_volume.XMLDesc.side_effect = self.mock_libvirt.libvirtError('No such volume')
mock_volumes.append(mock_volume)
# pylint: enable=no-member
mock_pool.listAllVolumes.return_value = mock_volumes # pylint: disable=no-member
mock_pools.append(mock_pool)
inactive_pool = MagicMock()
inactive_pool.name.return_value = 'pool2'
inactive_pool.info.return_value = [self.mock_libvirt.VIR_STORAGE_POOL_INACTIVE]
inactive_pool.listAllVolumes.side_effect = self.mock_libvirt.libvirtError('pool is inactive')
mock_pools.append(inactive_pool)
self.mock_conn.listAllStoragePools.return_value = mock_pools # pylint: disable=no-member
with patch('salt.modules.virt._get_domain', MagicMock(return_value=mock_vms)):
actual = virt.volume_infos('pool0', 'vol0')
self.assertEqual(1, len(actual.keys()))
self.assertEqual(1, len(actual['pool0'].keys()))
self.assertEqual(['vm0', 'vm2'], sorted(actual['pool0']['vol0']['used_by']))
self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
self.assertEqual('file', actual['pool0']['vol0']['type'])
self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
self.assertEqual(virt.volume_infos('pool1', None), {
'pool1': {
'vol1': {
'type': 'file',
'key': '/key/of/vol1',
'path': '/path/to/vol1.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
},
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': ['vm2'],
}
}
})
self.assertEqual(virt.volume_infos(None, 'vol2'), {
'pool1': {
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': ['vm2'],
}
}
})
# Single VM test
with patch('salt.modules.virt._get_domain', MagicMock(return_value=mock_vms[0])):
actual = virt.volume_infos('pool0', 'vol0')
self.assertEqual(1, len(actual.keys()))
self.assertEqual(1, len(actual['pool0'].keys()))
self.assertEqual(['vm0'], sorted(actual['pool0']['vol0']['used_by']))
self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
self.assertEqual('file', actual['pool0']['vol0']['type'])
self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
self.assertEqual(virt.volume_infos('pool1', None), {
'pool1': {
'vol1': {
'type': 'file',
'key': '/key/of/vol1',
'path': '/path/to/vol1.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
},
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
}
}
})
self.assertEqual(virt.volume_infos(None, 'vol2'), {
'pool1': {
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
}
}
})
# No VM test
with patch('salt.modules.virt._get_domain', MagicMock(side_effect=CommandExecutionError('no VM'))):
actual = virt.volume_infos('pool0', 'vol0')
self.assertEqual(1, len(actual.keys()))
self.assertEqual(1, len(actual['pool0'].keys()))
self.assertEqual([], sorted(actual['pool0']['vol0']['used_by']))
self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
self.assertEqual('file', actual['pool0']['vol0']['type'])
self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
self.assertEqual(virt.volume_infos('pool1', None), {
'pool1': {
'vol1': {
'type': 'file',
'key': '/key/of/vol1',
'path': '/path/to/vol1.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
},
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
}
}
})
self.assertEqual(virt.volume_infos(None, 'vol2'), {
'pool1': {
'vol2': {
'type': 'file',
'key': '/key/of/vol2',
'path': '/path/to/vol2.qcow2',
'capacity': 12345,
'allocation': 1234,
'used_by': [],
}
}
})
def test_volume_delete(self):
'''
Test virt.volume_delete
'''
mock_delete = MagicMock(side_effect=[0, 1])
mock_volume = MagicMock()
mock_volume.delete = mock_delete # pylint: disable=no-member
mock_pool = MagicMock()
# pylint: disable=no-member
mock_pool.storageVolLookupByName.side_effect = [
mock_volume,
mock_volume,
self.mock_libvirt.libvirtError("Missing volume"),
mock_volume,
]
self.mock_conn.storagePoolLookupByName.side_effect = [
mock_pool,
mock_pool,
mock_pool,
self.mock_libvirt.libvirtError("Missing pool"),
]
# pylint: enable=no-member
self.assertTrue(virt.volume_delete('default', 'test_volume'))
self.assertFalse(virt.volume_delete('default', 'test_volume'))
with self.assertRaises(self.mock_libvirt.libvirtError):
virt.volume_delete('default', 'missing')
virt.volume_delete('missing', 'test_volume')
self.assertEqual(mock_delete.call_count, 2)
def test_pool_capabilities(self):
'''
Test virt.pool_capabilities where libvirt has the pool-capabilities feature
'''
xml_caps = '''
<storagepoolCapabilities>
<pool type='disk' supported='yes'>
<poolOptions>
<defaultFormat type='unknown'/>
<enum name='sourceFormatType'>
<value>unknown</value>
<value>dos</value>
<value>dvh</value>
</enum>
</poolOptions>
<volOptions>
<defaultFormat type='none'/>
<enum name='targetFormatType'>
<value>none</value>
<value>linux</value>
</enum>
</volOptions>
</pool>
<pool type='iscsi' supported='yes'>
</pool>
<pool type='rbd' supported='yes'>
<volOptions>
<defaultFormat type='raw'/>
<enum name='targetFormatType'>
</enum>
</volOptions>
</pool>
<pool type='sheepdog' supported='no'>
</pool>
</storagepoolCapabilities>
'''
self.mock_conn.getStoragePoolCapabilities = MagicMock(return_value=xml_caps)
actual = virt.pool_capabilities()
self.assertEqual({
'computed': False,
'pool_types': [{
'name': 'disk',
'supported': True,
'options': {
'pool': {
'default_format': 'unknown',
'sourceFormatType': ['unknown', 'dos', 'dvh']
},
'volume': {
'default_format': 'none',
'targetFormatType': ['none', 'linux']
}
}
},
{
'name': 'iscsi',
'supported': True,
},
{
'name': 'rbd',
'supported': True,
'options': {
'volume': {
'default_format': 'raw',
'targetFormatType': []
}
}
},
{
'name': 'sheepdog',
'supported': False,
},
]}, actual)
@patch('salt.modules.virt.get_hypervisor', return_value='kvm')
def test_pool_capabilities_computed(self, mock_get_hypervisor):
'''
Test virt.pool_capabilities where libvirt doesn't have the pool-capabilities feature
'''
self.mock_conn.getLibVersion = MagicMock(return_value=4006000)
del self.mock_conn.getStoragePoolCapabilities
actual = virt.pool_capabilities()
self.assertTrue(actual['computed'])
backends = actual['pool_types']
# libvirt version matching check
self.assertFalse([backend for backend in backends if backend['name'] == 'iscsi-direct'][0]['supported'])
self.assertTrue([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
self.assertFalse([backend for backend in backends if backend['name'] == 'zfs'][0]['supported'])
# test case matching other hypervisors
mock_get_hypervisor.return_value = 'xen'
backends = virt.pool_capabilities()['pool_types']
self.assertFalse([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
mock_get_hypervisor.return_value = 'bhyve'
backends = virt.pool_capabilities()['pool_types']
self.assertFalse([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
self.assertTrue([backend for backend in backends if backend['name'] == 'zfs'][0]['supported'])
# Test options output
self.assertNotIn('options', [backend for backend in backends if backend['name'] == 'iscsi'][0])
self.assertNotIn('pool', [backend for backend in backends if backend['name'] == 'dir'][0]['options'])
self.assertNotIn('volume', [backend for backend in backends if backend['name'] == 'logical'][0]['options'])
self.assertEqual({
'pool': {
'default_format': 'auto',
'sourceFormatType': ['auto', 'nfs', 'glusterfs', 'cifs']
},
'volume': {
'default_format': 'raw',
'targetFormatType': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi',
'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']
}
},
[backend for backend in backends if backend['name'] == 'netfs'][0]['options'])
| 39.833616
| 120
| 0.486863
|
5a9dbbff520ae9071f4068c9f234663fd86a8619
| 2,264
|
py
|
Python
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveTaskForSubmittingDomainRealNameVerificationByRegistrantProfileIDRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveTaskForSubmittingDomainRealNameVerificationByRegistrantProfileIDRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveTaskForSubmittingDomainRealNameVerificationByRegistrantProfileIDRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveTaskForSubmittingDomainRealNameVerificationByRegistrantProfileIDRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveTaskForSubmittingDomainRealNameVerificationByRegistrantProfileID')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_RegistrantProfileId(self):
return self.get_query_params().get('RegistrantProfileId')
def set_RegistrantProfileId(self,RegistrantProfileId):
self.add_query_param('RegistrantProfileId',RegistrantProfileId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
| 36.516129
| 124
| 0.779594
|
7bfd4cc30e2b646f8613dd574634210f40fac44f
| 5,248
|
py
|
Python
|
keras_extra/generator.py
|
klauscc/keras-extra
|
a0536e1ec08c5a44f973da11ab7ce45d38193e8d
|
[
"Apache-2.0"
] | null | null | null |
keras_extra/generator.py
|
klauscc/keras-extra
|
a0536e1ec08c5a44f973da11ab7ce45d38193e8d
|
[
"Apache-2.0"
] | null | null | null |
keras_extra/generator.py
|
klauscc/keras-extra
|
a0536e1ec08c5a44f973da11ab7ce45d38193e8d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#================================================================
# God Bless You.
#
# file name: generator.py
# author: klaus
# email: klaus.cheng@qq.com
# created date: 2018/01/04
# description:
#
#================================================================
import os
import sys
import keras
import numpy as np
from keras.preprocessing.image import Iterator
from .utils.image import resize_image, load_image
class LabelFileIterator(Iterator):
"""iterate data from label file"""
def __init__(self,
label_file_path,
image_data_generator,
batch_size=32,
num_classes=2,
keep_aspect_ratio=True,
min_side=600,
max_side=1024,
shuffle=True,
seed=None,
target_size=None,
preprocess_function=None):
"""TODO: to be defined1.
Args:
label_file_path (TODO): TODO
image_data_generator (TODO): TODO
Kwargs:
batch_size (TODO): TODO
num_classes:
keep_aspect_ratio (TODO): TODO
min_side (TODO): TODO
max_side (TODO): TODO
shuffle (TODO): TODO
seed (TODO): TODO
target_size (TODO): TODO
preprocess_function:
"""
self._label_file_path = label_file_path
self._image_data_generator = image_data_generator
self._batch_size = batch_size
self._num_classes = num_classes
self._keep_aspect_ratio = keep_aspect_ratio
self._min_side = min_side
self._max_side = max_side
self._shuffle = shuffle
self._seed = seed
self._target_size = target_size
self._preprocess_function = preprocess_function
paths, labels = self._enumerate_files(self._label_file_path)
self.paths = paths
self.labels = labels
self.samples = len(labels)
super(LabelFileIterator, self).__init__(self.samples, self._batch_size,
self._shuffle, self._seed)
def _enumerate_files(self, label_file_path):
"""get file paths
Args:
label_file_path (TODO): TODO
Returns: TODO
"""
paths = []
labels = []
with open(label_file_path, 'r') as fld:
for l in fld:
x, y = l.split()
y = int(y)
paths.append(x)
labels.append(y)
return paths, labels
def _preprocess_image(self, img, label):
"""preprocess image
Args:
img: numpy array image
Returns:
img: an numpy array image
img_scale: the img resize factor (scale_h, scale_w). scale_h is the resize factor along the rows and scale_w along the cols
"""
if self._preprocess_function != None:
img, label = self._preprocess_function(img, label)
img = self._image_data_generator.random_transform(img)
img, img_scale = resize_image(
img,
min_side=self._min_side,
max_side=self._max_side,
target_size=self._target_size)
return img, label, img_scale
def _load_single_example(self, index):
"""load one example of index
Args:
index (TODO): TODO
Returns: TODO
"""
try:
path = self.paths[index]
label = self.labels[index]
img = load_image(path)
img, label, img_scale = self._preprocess_image(img, label)
except Exception, e:
print e
index = np.random.randint(self.samples)
img, label, img_scale = self._load_single_example(index)
return img, label, img_scale
def _get_batches_of_transformed_samples(self, index_array):
"""
Args:
index_array (TODO): TODO
Returns: TODO
"""
image_group = []
label_group = []
for i, j in enumerate(index_array):
img, label, img_scale = self._load_single_example(j)
image_group.append(img)
label_group.append(label)
# get the max image shape
max_shape = tuple(
max(image.shape[x] for image in image_group) for x in range(3))
# construct an image batch and label batch
image_batch = np.zeros(
(self._batch_size, ) + max_shape, dtype=keras.backend.floatx())
label_batch = keras.utils.to_categorical(
label_group, num_classes=self._num_classes)
# copy all images to the upper left part of the image batch object
for image_index, image in enumerate(image_group):
image_batch[image_index, :image.shape[0], :image.shape[1], :
image.shape[2]] = image
return image_batch, label_batch
def next(self):
"""for python 2.x
Returns: The next batch
"""
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
| 29.318436
| 135
| 0.556784
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.