hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40b2eb86509a7b4bf8ce745acfda0a37083d5216
| 21,968
|
py
|
Python
|
squareconnect/models/v1_order.py
|
xethorn/connect-python-sdk
|
a0543b2f7ea498865c6d916de0b10370f89ebc77
|
[
"Apache-2.0"
] | null | null | null |
squareconnect/models/v1_order.py
|
xethorn/connect-python-sdk
|
a0543b2f7ea498865c6d916de0b10370f89ebc77
|
[
"Apache-2.0"
] | null | null | null |
squareconnect/models/v1_order.py
|
xethorn/connect-python-sdk
|
a0543b2f7ea498865c6d916de0b10370f89ebc77
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Order(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, errors=None, id=None, buyer_email=None, recipient_name=None, recipient_phone_number=None, state=None, shipping_address=None, subtotal_money=None, total_shipping_money=None, total_tax_money=None, total_price_money=None, total_discount_money=None, created_at=None, updated_at=None, expires_at=None, payment_id=None, buyer_note=None, completed_note=None, refunded_note=None, canceled_note=None, tender=None, order_history=None, promo_code=None, btc_receive_address=None, btc_price_satoshi=None):
"""
V1Order - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'errors': 'list[Error]',
'id': 'str',
'buyer_email': 'str',
'recipient_name': 'str',
'recipient_phone_number': 'str',
'state': 'str',
'shipping_address': 'Address',
'subtotal_money': 'V1Money',
'total_shipping_money': 'V1Money',
'total_tax_money': 'V1Money',
'total_price_money': 'V1Money',
'total_discount_money': 'V1Money',
'created_at': 'str',
'updated_at': 'str',
'expires_at': 'str',
'payment_id': 'str',
'buyer_note': 'str',
'completed_note': 'str',
'refunded_note': 'str',
'canceled_note': 'str',
'tender': 'V1Tender',
'order_history': 'list[V1OrderHistoryEntry]',
'promo_code': 'str',
'btc_receive_address': 'str',
'btc_price_satoshi': 'float'
}
self.attribute_map = {
'errors': 'errors',
'id': 'id',
'buyer_email': 'buyer_email',
'recipient_name': 'recipient_name',
'recipient_phone_number': 'recipient_phone_number',
'state': 'state',
'shipping_address': 'shipping_address',
'subtotal_money': 'subtotal_money',
'total_shipping_money': 'total_shipping_money',
'total_tax_money': 'total_tax_money',
'total_price_money': 'total_price_money',
'total_discount_money': 'total_discount_money',
'created_at': 'created_at',
'updated_at': 'updated_at',
'expires_at': 'expires_at',
'payment_id': 'payment_id',
'buyer_note': 'buyer_note',
'completed_note': 'completed_note',
'refunded_note': 'refunded_note',
'canceled_note': 'canceled_note',
'tender': 'tender',
'order_history': 'order_history',
'promo_code': 'promo_code',
'btc_receive_address': 'btc_receive_address',
'btc_price_satoshi': 'btc_price_satoshi'
}
self._errors = errors
self._id = id
self._buyer_email = buyer_email
self._recipient_name = recipient_name
self._recipient_phone_number = recipient_phone_number
self._state = state
self._shipping_address = shipping_address
self._subtotal_money = subtotal_money
self._total_shipping_money = total_shipping_money
self._total_tax_money = total_tax_money
self._total_price_money = total_price_money
self._total_discount_money = total_discount_money
self._created_at = created_at
self._updated_at = updated_at
self._expires_at = expires_at
self._payment_id = payment_id
self._buyer_note = buyer_note
self._completed_note = completed_note
self._refunded_note = refunded_note
self._canceled_note = canceled_note
self._tender = tender
self._order_history = order_history
self._promo_code = promo_code
self._btc_receive_address = btc_receive_address
self._btc_price_satoshi = btc_price_satoshi
@property
def errors(self):
"""
Gets the errors of this V1Order.
Any errors that occurred during the request.
:return: The errors of this V1Order.
:rtype: list[Error]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""
Sets the errors of this V1Order.
Any errors that occurred during the request.
:param errors: The errors of this V1Order.
:type: list[Error]
"""
self._errors = errors
@property
def id(self):
"""
Gets the id of this V1Order.
The order's unique identifier.
:return: The id of this V1Order.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this V1Order.
The order's unique identifier.
:param id: The id of this V1Order.
:type: str
"""
self._id = id
@property
def buyer_email(self):
"""
Gets the buyer_email of this V1Order.
The email address of the order's buyer.
:return: The buyer_email of this V1Order.
:rtype: str
"""
return self._buyer_email
@buyer_email.setter
def buyer_email(self, buyer_email):
"""
Sets the buyer_email of this V1Order.
The email address of the order's buyer.
:param buyer_email: The buyer_email of this V1Order.
:type: str
"""
self._buyer_email = buyer_email
@property
def recipient_name(self):
"""
Gets the recipient_name of this V1Order.
The name of the order's buyer.
:return: The recipient_name of this V1Order.
:rtype: str
"""
return self._recipient_name
@recipient_name.setter
def recipient_name(self, recipient_name):
"""
Sets the recipient_name of this V1Order.
The name of the order's buyer.
:param recipient_name: The recipient_name of this V1Order.
:type: str
"""
self._recipient_name = recipient_name
@property
def recipient_phone_number(self):
"""
Gets the recipient_phone_number of this V1Order.
The phone number to use for the order's delivery.
:return: The recipient_phone_number of this V1Order.
:rtype: str
"""
return self._recipient_phone_number
@recipient_phone_number.setter
def recipient_phone_number(self, recipient_phone_number):
"""
Sets the recipient_phone_number of this V1Order.
The phone number to use for the order's delivery.
:param recipient_phone_number: The recipient_phone_number of this V1Order.
:type: str
"""
self._recipient_phone_number = recipient_phone_number
@property
def state(self):
"""
Gets the state of this V1Order.
Whether the tax is an ADDITIVE tax or an INCLUSIVE tax.
:return: The state of this V1Order.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this V1Order.
Whether the tax is an ADDITIVE tax or an INCLUSIVE tax.
:param state: The state of this V1Order.
:type: str
"""
self._state = state
@property
def shipping_address(self):
"""
Gets the shipping_address of this V1Order.
The address to ship the order to.
:return: The shipping_address of this V1Order.
:rtype: Address
"""
return self._shipping_address
@shipping_address.setter
def shipping_address(self, shipping_address):
"""
Sets the shipping_address of this V1Order.
The address to ship the order to.
:param shipping_address: The shipping_address of this V1Order.
:type: Address
"""
self._shipping_address = shipping_address
@property
def subtotal_money(self):
"""
Gets the subtotal_money of this V1Order.
The amount of all items purchased in the order, before taxes and shipping.
:return: The subtotal_money of this V1Order.
:rtype: V1Money
"""
return self._subtotal_money
@subtotal_money.setter
def subtotal_money(self, subtotal_money):
"""
Sets the subtotal_money of this V1Order.
The amount of all items purchased in the order, before taxes and shipping.
:param subtotal_money: The subtotal_money of this V1Order.
:type: V1Money
"""
self._subtotal_money = subtotal_money
@property
def total_shipping_money(self):
"""
Gets the total_shipping_money of this V1Order.
The shipping cost for the order.
:return: The total_shipping_money of this V1Order.
:rtype: V1Money
"""
return self._total_shipping_money
@total_shipping_money.setter
def total_shipping_money(self, total_shipping_money):
"""
Sets the total_shipping_money of this V1Order.
The shipping cost for the order.
:param total_shipping_money: The total_shipping_money of this V1Order.
:type: V1Money
"""
self._total_shipping_money = total_shipping_money
@property
def total_tax_money(self):
"""
Gets the total_tax_money of this V1Order.
The total of all taxes applied to the order.
:return: The total_tax_money of this V1Order.
:rtype: V1Money
"""
return self._total_tax_money
@total_tax_money.setter
def total_tax_money(self, total_tax_money):
"""
Sets the total_tax_money of this V1Order.
The total of all taxes applied to the order.
:param total_tax_money: The total_tax_money of this V1Order.
:type: V1Money
"""
self._total_tax_money = total_tax_money
@property
def total_price_money(self):
"""
Gets the total_price_money of this V1Order.
The total cost of the order.
:return: The total_price_money of this V1Order.
:rtype: V1Money
"""
return self._total_price_money
@total_price_money.setter
def total_price_money(self, total_price_money):
"""
Sets the total_price_money of this V1Order.
The total cost of the order.
:param total_price_money: The total_price_money of this V1Order.
:type: V1Money
"""
self._total_price_money = total_price_money
@property
def total_discount_money(self):
"""
Gets the total_discount_money of this V1Order.
The total of all discounts applied to the order.
:return: The total_discount_money of this V1Order.
:rtype: V1Money
"""
return self._total_discount_money
@total_discount_money.setter
def total_discount_money(self, total_discount_money):
"""
Sets the total_discount_money of this V1Order.
The total of all discounts applied to the order.
:param total_discount_money: The total_discount_money of this V1Order.
:type: V1Money
"""
self._total_discount_money = total_discount_money
@property
def created_at(self):
"""
Gets the created_at of this V1Order.
The time when the order was created, in ISO 8601 format.
:return: The created_at of this V1Order.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this V1Order.
The time when the order was created, in ISO 8601 format.
:param created_at: The created_at of this V1Order.
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""
Gets the updated_at of this V1Order.
The time when the order was last modified, in ISO 8601 format.
:return: The updated_at of this V1Order.
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""
Sets the updated_at of this V1Order.
The time when the order was last modified, in ISO 8601 format.
:param updated_at: The updated_at of this V1Order.
:type: str
"""
self._updated_at = updated_at
@property
def expires_at(self):
"""
Gets the expires_at of this V1Order.
The time when the order expires if no action is taken, in ISO 8601 format.
:return: The expires_at of this V1Order.
:rtype: str
"""
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
"""
Sets the expires_at of this V1Order.
The time when the order expires if no action is taken, in ISO 8601 format.
:param expires_at: The expires_at of this V1Order.
:type: str
"""
self._expires_at = expires_at
@property
def payment_id(self):
"""
Gets the payment_id of this V1Order.
The unique identifier of the payment associated with the order.
:return: The payment_id of this V1Order.
:rtype: str
"""
return self._payment_id
@payment_id.setter
def payment_id(self, payment_id):
"""
Sets the payment_id of this V1Order.
The unique identifier of the payment associated with the order.
:param payment_id: The payment_id of this V1Order.
:type: str
"""
self._payment_id = payment_id
@property
def buyer_note(self):
"""
Gets the buyer_note of this V1Order.
A note provided by the buyer when the order was created, if any.
:return: The buyer_note of this V1Order.
:rtype: str
"""
return self._buyer_note
@buyer_note.setter
def buyer_note(self, buyer_note):
"""
Sets the buyer_note of this V1Order.
A note provided by the buyer when the order was created, if any.
:param buyer_note: The buyer_note of this V1Order.
:type: str
"""
self._buyer_note = buyer_note
@property
def completed_note(self):
"""
Gets the completed_note of this V1Order.
A note provided by the merchant when the order's state was set to COMPLETED, if any
:return: The completed_note of this V1Order.
:rtype: str
"""
return self._completed_note
@completed_note.setter
def completed_note(self, completed_note):
"""
Sets the completed_note of this V1Order.
A note provided by the merchant when the order's state was set to COMPLETED, if any
:param completed_note: The completed_note of this V1Order.
:type: str
"""
self._completed_note = completed_note
@property
def refunded_note(self):
"""
Gets the refunded_note of this V1Order.
A note provided by the merchant when the order's state was set to REFUNDED, if any.
:return: The refunded_note of this V1Order.
:rtype: str
"""
return self._refunded_note
@refunded_note.setter
def refunded_note(self, refunded_note):
"""
Sets the refunded_note of this V1Order.
A note provided by the merchant when the order's state was set to REFUNDED, if any.
:param refunded_note: The refunded_note of this V1Order.
:type: str
"""
self._refunded_note = refunded_note
@property
def canceled_note(self):
"""
Gets the canceled_note of this V1Order.
A note provided by the merchant when the order's state was set to CANCELED, if any.
:return: The canceled_note of this V1Order.
:rtype: str
"""
return self._canceled_note
@canceled_note.setter
def canceled_note(self, canceled_note):
"""
Sets the canceled_note of this V1Order.
A note provided by the merchant when the order's state was set to CANCELED, if any.
:param canceled_note: The canceled_note of this V1Order.
:type: str
"""
self._canceled_note = canceled_note
@property
def tender(self):
"""
Gets the tender of this V1Order.
The tender used to pay for the order.
:return: The tender of this V1Order.
:rtype: V1Tender
"""
return self._tender
@tender.setter
def tender(self, tender):
"""
Sets the tender of this V1Order.
The tender used to pay for the order.
:param tender: The tender of this V1Order.
:type: V1Tender
"""
self._tender = tender
@property
def order_history(self):
"""
Gets the order_history of this V1Order.
The history of actions associated with the order.
:return: The order_history of this V1Order.
:rtype: list[V1OrderHistoryEntry]
"""
return self._order_history
@order_history.setter
def order_history(self, order_history):
"""
Sets the order_history of this V1Order.
The history of actions associated with the order.
:param order_history: The order_history of this V1Order.
:type: list[V1OrderHistoryEntry]
"""
self._order_history = order_history
@property
def promo_code(self):
"""
Gets the promo_code of this V1Order.
The promo code provided by the buyer, if any.
:return: The promo_code of this V1Order.
:rtype: str
"""
return self._promo_code
@promo_code.setter
def promo_code(self, promo_code):
"""
Sets the promo_code of this V1Order.
The promo code provided by the buyer, if any.
:param promo_code: The promo_code of this V1Order.
:type: str
"""
self._promo_code = promo_code
@property
def btc_receive_address(self):
"""
Gets the btc_receive_address of this V1Order.
For Bitcoin transactions, the address that the buyer sent Bitcoin to.
:return: The btc_receive_address of this V1Order.
:rtype: str
"""
return self._btc_receive_address
@btc_receive_address.setter
def btc_receive_address(self, btc_receive_address):
"""
Sets the btc_receive_address of this V1Order.
For Bitcoin transactions, the address that the buyer sent Bitcoin to.
:param btc_receive_address: The btc_receive_address of this V1Order.
:type: str
"""
self._btc_receive_address = btc_receive_address
@property
def btc_price_satoshi(self):
"""
Gets the btc_price_satoshi of this V1Order.
For Bitcoin transactions, the price of the buyer's order in satoshi (100 million satoshi equals 1 BTC).
:return: The btc_price_satoshi of this V1Order.
:rtype: float
"""
return self._btc_price_satoshi
@btc_price_satoshi.setter
def btc_price_satoshi(self, btc_price_satoshi):
"""
Sets the btc_price_satoshi of this V1Order.
For Bitcoin transactions, the price of the buyer's order in satoshi (100 million satoshi equals 1 BTC).
:param btc_price_satoshi: The btc_price_satoshi of this V1Order.
:type: float
"""
self._btc_price_satoshi = btc_price_satoshi
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.487248
| 515
| 0.609978
|
87b87d926cd880f4f05e539e95e224d7160a6e98
| 3,784
|
py
|
Python
|
virtual/lib/python3.6/site-packages/pylint/test/functional/name_styles.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 69
|
2019-02-18T12:07:35.000Z
|
2022-03-12T10:38:32.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/name_styles.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 16
|
2020-02-12T00:28:11.000Z
|
2022-03-11T23:48:19.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/name_styles.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 28
|
2019-03-22T01:07:13.000Z
|
2022-02-21T16:38:27.000Z
|
"""Test for the invalid-name warning."""
# pylint: disable=no-absolute-import, useless-object-inheritance, unnecessary-pass
from __future__ import print_function
import abc
import collections
GOOD_CONST_NAME = ''
bad_const_name = 0 # [invalid-name]
def BADFUNCTION_name(): # [invalid-name]
"""Bad function name."""
BAD_LOCAL_VAR = 1 # [invalid-name]
print(BAD_LOCAL_VAR)
def func_bad_argname(NOT_GOOD): # [invalid-name]
"""Function with a badly named argument."""
return NOT_GOOD
def no_nested_args(arg1, arg21, arg22):
"""Well-formed function."""
print(arg1, arg21, arg22)
class bad_class_name(object): # [invalid-name]
"""Class with a bad name."""
class CorrectClassName(object):
"""Class with a good name."""
def __init__(self):
self._good_private_name = 10
self.__good_real_private_name = 11
self.good_attribute_name = 12
self._Bad_AtTR_name = None # [invalid-name]
self.Bad_PUBLIC_name = None # [invalid-name]
zz = 'Why Was It Bad Class Attribute?'
GOOD_CLASS_ATTR = 'Good Class Attribute'
def BadMethodName(self): # [invalid-name]
"""A Method with a bad name."""
def good_method_name(self):
"""A method with a good name."""
def __DunDER_IS_not_free_for_all__(self): # [invalid-name]
"""Another badly named method."""
class DerivedFromCorrect(CorrectClassName):
"""A derived class with an invalid inherited members.
Derived attributes and methods with invalid names do not trigger warnings.
"""
zz = 'Now a good class attribute'
def __init__(self):
super(DerivedFromCorrect, self).__init__()
self._Bad_AtTR_name = None # Ignored
def BadMethodName(self):
"""Ignored since the method is in the interface."""
V = [WHAT_Ever_inListComp for WHAT_Ever_inListComp in GOOD_CONST_NAME]
def class_builder():
"""Function returning a class object."""
class EmbeddedClass(object):
"""Useless class."""
return EmbeddedClass
# +1:[invalid-name]
BAD_NAME_FOR_CLASS = collections.namedtuple('Named', ['tuple'])
NEXT_BAD_NAME_FOR_CLASS = class_builder() # [invalid-name]
GoodName = collections.namedtuple('Named', ['tuple'])
ToplevelClass = class_builder()
# Aliases for classes have the same name constraints.
AlsoCorrect = CorrectClassName
NOT_CORRECT = CorrectClassName # [invalid-name]
def test_globals():
"""Names in global statements are also checked."""
global NOT_CORRECT
global AlsoCorrect # [invalid-name]
NOT_CORRECT = 1
AlsoCorrect = 2
class FooClass(object):
"""A test case for property names.
Since by default, the regex for attributes is the same as the one
for method names, we check the warning messages to contain the
string 'attribute'.
"""
@property
def PROPERTY_NAME(self): # [invalid-name]
"""Ignored."""
pass
@abc.abstractproperty
def ABSTRACT_PROPERTY_NAME(self): # [invalid-name]
"""Ignored."""
pass
@PROPERTY_NAME.setter
def PROPERTY_NAME_SETTER(self): # [invalid-name]
"""Ignored."""
pass
def _nice_and_long_descriptive_private_method_name(self):
"""private method with long name"""
pass
def good_public_function_name(good_arg_name):
"""This is a perfect public function"""
good_variable_name = 1
return good_variable_name + good_arg_name
def _private_scope_function_with_long_descriptive_name():
"""Private scope function are cool with long descriptive names"""
return 12
LONG_CONSTANT_NAME_IN_PUBLIC_SCOPE_ARE_OKAY = True
class _AnExceptionalExceptionThatOccursVeryVeryRarely(Exception):
"""A very exceptional exception with a nice descriptive name"""
pass
| 26.836879
| 82
| 0.691332
|
fa157f1a04736ecf925e50cd19d3e85ec8f2cb78
| 16,159
|
py
|
Python
|
venv/Lib/site-packages/jet/utils.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
venv/Lib/site-packages/jet/utils.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | 21
|
2021-02-04T01:37:44.000Z
|
2022-03-12T01:00:55.000Z
|
venv/Lib/site-packages/jet/utils.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | null | null | null |
import datetime
import json
from django.template import Context
from django.utils import translation
from jet import settings
from jet.models import PinnedApplication
try:
from django.apps.registry import apps
except ImportError:
try:
from django.apps import apps # Fix Django 1.7 import issue
except ImportError:
pass
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
try:
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
except ImportError: # Django 1.11
from django.urls import reverse, resolve, NoReverseMatch
from django.contrib.admin import AdminSite
from django.utils.encoding import smart_text
from django.utils.text import capfirst
from django.contrib import messages
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # Python 2.6
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError('In order to allow non-dict objects to be '
'serialized set the safe parameter to False')
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder)
super(JsonResponse, self).__init__(content=data, **kwargs)
def get_app_list(context, order=True):
admin_site = get_admin_site(context)
request = context['request']
app_dict = {}
for model, model_admin in admin_site._registry.items():
app_label = model._meta.app_label
try:
has_module_perms = model_admin.has_module_permission(request)
except AttributeError:
has_module_perms = request.user.has_module_perms(app_label) # Fix Django < 1.8 issue
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
'model_name': model._meta.model_name
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=admin_site.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=admin_site.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
try:
name = apps.get_app_config(app_label).verbose_name
except NameError:
name = app_label.title()
app_dict[app_label] = {
'name': name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=admin_site.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(app_dict.values())
if order:
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
def get_admin_site(context):
try:
current_resolver = resolve(context.get('request').path)
index_resolver = resolve(reverse('%s:index' % current_resolver.namespaces[0]))
if hasattr(index_resolver.func, 'admin_site'):
return index_resolver.func.admin_site
for func_closure in index_resolver.func.__closure__:
if isinstance(func_closure.cell_contents, AdminSite):
return func_closure.cell_contents
except:
pass
return admin.site
def get_admin_site_name(context):
return get_admin_site(context).name
class LazyDateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, Promise):
return force_text(obj)
return self.encode(obj)
def get_model_instance_label(instance):
if getattr(instance, "related_label", None):
return instance.related_label()
return smart_text(instance)
class SuccessMessageMixin(object):
"""
Adds a success message on successful form submission.
"""
success_message = ''
def form_valid(self, form):
response = super(SuccessMessageMixin, self).form_valid(form)
success_message = self.get_success_message(form.cleaned_data)
if success_message:
messages.success(self.request, success_message)
return response
def get_success_message(self, cleaned_data):
return self.success_message % cleaned_data
def get_model_queryset(admin_site, model, request, preserved_filters=None):
model_admin = admin_site._registry.get(model)
if model_admin is None:
return
try:
changelist_url = reverse('%s:%s_%s_changelist' % (
admin_site.name,
model._meta.app_label,
model._meta.model_name
))
except NoReverseMatch:
return
changelist_filters = None
if preserved_filters:
changelist_filters = preserved_filters.get('_changelist_filters')
if changelist_filters:
changelist_url += '?' + changelist_filters
if model_admin:
queryset = model_admin.get_queryset(request)
else:
queryset = model.objects
list_display = model_admin.get_list_display(request)
list_display_links = model_admin.get_list_display_links(request, list_display)
list_filter = model_admin.get_list_filter(request)
search_fields = model_admin.get_search_fields(request) \
if hasattr(model_admin, 'get_search_fields') else model_admin.search_fields
list_select_related = model_admin.get_list_select_related(request) \
if hasattr(model_admin, 'get_list_select_related') else model_admin.list_select_related
actions = model_admin.get_actions(request)
if actions:
list_display = ['action_checkbox'] + list(list_display)
ChangeList = model_admin.get_changelist(request)
change_list_args = [
request, model, list_display, list_display_links, list_filter,
model_admin.date_hierarchy, search_fields, list_select_related,
model_admin.list_per_page, model_admin.list_max_show_all,
model_admin.list_editable, model_admin]
try:
sortable_by = model_admin.get_sortable_by(request)
change_list_args.append(sortable_by)
except AttributeError:
# django version < 2.1
pass
try:
cl = ChangeList(*change_list_args)
queryset = cl.get_queryset(request)
except IncorrectLookupParameters:
pass
return queryset
def get_possible_language_codes():
language_code = translation.get_language()
language_code = language_code.replace('_', '-').lower()
language_codes = []
# making dialect part uppercase
split = language_code.split('-', 2)
if len(split) == 2:
language_code = '%s-%s' % (split[0].lower(), split[1].upper()) if split[0] != split[1] else split[0]
language_codes.append(language_code)
# adding language code without dialect part
if len(split) == 2:
language_codes.append(split[0].lower())
return language_codes
def get_original_menu_items(context):
if context.get('user') and user_is_authenticated(context['user']):
pinned_apps = PinnedApplication.objects.filter(user=context['user'].pk).values_list('app_label', flat=True)
else:
pinned_apps = []
original_app_list = get_app_list(context)
return map(lambda app: {
'app_label': app['app_label'],
'url': app['app_url'],
'url_blank': False,
'label': app.get('name', capfirst(_(app['app_label']))),
'has_perms': app.get('has_module_perms', False),
'models': list(map(lambda model: {
'url': model.get('admin_url'),
'url_blank': False,
'name': model['model_name'],
'object_name': model['object_name'],
'label': model.get('name', model['object_name']),
'has_perms': any(model.get('perms', {}).values()),
}, app['models'])),
'pinned': app['app_label'] in pinned_apps,
'custom': False
}, original_app_list)
def get_menu_item_url(url, original_app_list):
if isinstance(url, dict):
url_type = url.get('type')
if url_type == 'app':
return original_app_list[url['app_label']]['url']
elif url_type == 'model':
models = dict(map(
lambda x: (x['name'], x['url']),
original_app_list[url['app_label']]['models']
))
return models[url['model']]
elif url_type == 'reverse':
return reverse(url['name'], args=url.get('args'), kwargs=url.get('kwargs'))
elif isinstance(url, str):
return url
def get_menu_items(context):
pinned_apps = PinnedApplication.objects.filter(user=context['user'].pk).values_list('app_label', flat=True)
original_app_list = OrderedDict(map(lambda app: (app['app_label'], app), get_original_menu_items(context)))
custom_app_list = settings.JET_SIDE_MENU_ITEMS
custom_app_list_deprecated = settings.JET_SIDE_MENU_CUSTOM_APPS
if custom_app_list not in (None, False):
if isinstance(custom_app_list, dict):
admin_site = get_admin_site(context)
custom_app_list = custom_app_list.get(admin_site.name, [])
app_list = []
def get_menu_item_app_model(app_label, data):
item = {'has_perms': True}
if 'name' in data:
parts = data['name'].split('.', 2)
if len(parts) > 1:
app_label, name = parts
else:
name = data['name']
if app_label in original_app_list:
models = dict(map(
lambda x: (x['name'], x),
original_app_list[app_label]['models']
))
if name in models:
item = models[name].copy()
if 'label' in data:
item['label'] = data['label']
if 'url' in data:
item['url'] = get_menu_item_url(data['url'], original_app_list)
if 'url_blank' in data:
item['url_blank'] = data['url_blank']
if 'permissions' in data:
item['has_perms'] = item.get('has_perms', True) and context['user'].has_perms(data['permissions'])
return item
def get_menu_item_app(data):
app_label = data.get('app_label')
if not app_label:
if 'label' not in data:
raise Exception('Custom menu items should at least have \'label\' or \'app_label\' key')
app_label = 'custom_%s' % slugify(data['label'], allow_unicode=True)
if app_label in original_app_list:
item = original_app_list[app_label].copy()
else:
item = {'app_label': app_label, 'has_perms': True}
if 'label' in data:
item['label'] = data['label']
if 'items' in data:
item['items'] = list(map(lambda x: get_menu_item_app_model(app_label, x), data['items']))
if 'url' in data:
item['url'] = get_menu_item_url(data['url'], original_app_list)
if 'url_blank' in data:
item['url_blank'] = data['url_blank']
if 'permissions' in data:
item['has_perms'] = item.get('has_perms', True) and context['user'].has_perms(data['permissions'])
item['pinned'] = item['app_label'] in pinned_apps
return item
for data in custom_app_list:
item = get_menu_item_app(data)
app_list.append(item)
elif custom_app_list_deprecated not in (None, False):
app_dict = {}
models_dict = {}
for app in original_app_list.values():
app_label = app['app_label']
app_dict[app_label] = app
for model in app['models']:
if app_label not in models_dict:
models_dict[app_label] = {}
models_dict[app_label][model['object_name']] = model
app['items'] = []
app_list = []
if isinstance(custom_app_list_deprecated, dict):
admin_site = get_admin_site(context)
custom_app_list_deprecated = custom_app_list_deprecated.get(admin_site.name, [])
for item in custom_app_list_deprecated:
app_label, models = item
if app_label in app_dict:
app = app_dict[app_label]
for model_label in models:
if model_label == '__all__':
app['items'] = models_dict[app_label].values()
break
elif model_label in models_dict[app_label]:
model = models_dict[app_label][model_label]
app['items'].append(model)
app_list.append(app)
else:
def map_item(item):
item['items'] = item['models']
return item
app_list = list(map(map_item, original_app_list.values()))
current_found = False
for app in app_list:
if not current_found:
for model in app['items']:
if not current_found and model.get('url') and context['request'].path.startswith(model['url']):
model['current'] = True
current_found = True
else:
model['current'] = False
if not current_found and app.get('url') and context['request'].path.startswith(app['url']):
app['current'] = True
current_found = True
else:
app['current'] = False
return app_list
def context_to_dict(context):
if isinstance(context, Context):
flat = {}
for d in context.dicts:
flat.update(d)
context = flat
return context
def user_is_authenticated(user):
if not hasattr(user.is_authenticated, '__call__'):
return user.is_authenticated
else:
return user.is_authenticated()
| 34.454158
| 119
| 0.605545
|
fe55cf7adcc9d0c1a1d9a540df15ea1a2748d6ec
| 885
|
py
|
Python
|
python/django_14_app/django_14_app/urls.py
|
go/trace-examples
|
813e262f2182f7f90d370887de88fd9609526252
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T16:29:48.000Z
|
2020-04-01T16:29:48.000Z
|
python/django_14_app/django_14_app/urls.py
|
go/trace-examples
|
813e262f2182f7f90d370887de88fd9609526252
|
[
"BSD-3-Clause"
] | 4
|
2020-06-06T00:36:58.000Z
|
2021-06-10T22:35:19.000Z
|
python/django_14_app/django_14_app/urls.py
|
go/trace-examples
|
813e262f2182f7f90d370887de88fd9609526252
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^polls/simple', 'polls.views.simple_view'),
(r'^polls/templated', 'polls.views.templated_view'),
(r'^polls/cached', 'polls.views.cached_view'),
(r'^polls/db', 'polls.views.db_view'),
(r'^polls/all', 'polls.views.all_the_things'),
# Examples:
# url(r'^$', 'django_14_app.views.home', name='home'),
# url(r'^django_14_app/', include('django_14_app.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
from ddtrace import tracer
tracer.debug_logging = True
| 35.4
| 71
| 0.684746
|
f42377dab9448b694dafb0de48c5dafac07fb982
| 1,450
|
py
|
Python
|
spider/BingPictures/main.py
|
hiyongz/pythontools
|
b26b417a69c4ce295322dbf4f2eb4cbfa4606184
|
[
"MIT"
] | null | null | null |
spider/BingPictures/main.py
|
hiyongz/pythontools
|
b26b417a69c4ce295322dbf4f2eb4cbfa4606184
|
[
"MIT"
] | null | null | null |
spider/BingPictures/main.py
|
hiyongz/pythontools
|
b26b417a69c4ce295322dbf4f2eb4cbfa4606184
|
[
"MIT"
] | null | null | null |
""" 一个爬取图片的例子,用的是很基础的方法,下载速度不快,供学习 """
import os
import requests # 先导入爬虫的库
from lxml import html
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36"
} # 设置头部信息,伪装浏览器
host = "https://bing.ioliu.cn" # 爬取这个网站上的图片
p = 1 # 记录当前页码
while True:
response = requests.get(host, headers=headers, params={p: p})
response.encoding = "utf-8"
htree = html.fromstring(response.content)
downloadHref = htree.xpath(
'//*[contains(concat( " ", @class, " " ), concat( " ", "download", " " ))]/@href') # 获取下载链接(链接不完整)
page = htree.xpath(
'//*[contains(concat( " ", @class, " " ), concat( " ", "page", " " ))]//span/text()')[0].split(' / ') # 获取页码信息
hasNext = page[0] != page[1] # 是否还有下一页
dir = os.getcwd() + "\\第" + str(p) + "页\\" # 文件夹
os.makedirs(dir) # 创建文件夹
for href in downloadHref: # 遍历下载链接
href = host + href
pictrueResponse = requests.get(
url=href, headers=headers) # get方法的到图片响应
fileName = href.split("/")[4].split("?")[0] + ".png"
with open(dir + fileName, "wb+") as file: # 打开一个文件,wb表示以二进制格式打开一个文件只用于写入
file.write(pictrueResponse.content) # 写入文件
print(fileName)
if not hasNext:
print('已无下一页,程序结束。')
os._exit(0)
if input('第 ' + str(p) + ' 页下载完毕,是否继续?(y/n)') == "n":
break
p = p + 1 #更新页码
| 35.365854
| 135
| 0.568276
|
72ae1493a5fb176f0bb4067fb6a868bbb6c8ffc5
| 4,448
|
py
|
Python
|
TargetEncoder.py
|
mohsinkhn/ltfs-av
|
43077049b817e6eb0a8a8a2bf0177c9a1d9940d0
|
[
"MIT"
] | 5
|
2019-04-26T20:07:30.000Z
|
2021-02-04T20:24:34.000Z
|
TargetEncoder.py
|
mohsinkhn/ltfs-av
|
43077049b817e6eb0a8a8a2bf0177c9a1d9940d0
|
[
"MIT"
] | null | null | null |
TargetEncoder.py
|
mohsinkhn/ltfs-av
|
43077049b817e6eb0a8a8a2bf0177c9a1d9940d0
|
[
"MIT"
] | 1
|
2019-04-29T09:45:12.000Z
|
2019-04-29T09:45:12.000Z
|
import pandas as pd
import numpy as np
from collections import Counter
from sklearn.base import BaseEstimator, TransformerMixin
#@author: Mohsin
class TargetEncoderWithThresh(BaseEstimator, TransformerMixin):
"""
A utlity class to help encode categorical variables using different methods.
Ideas taken from this excellent kernel
https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study?utm_medium=email&utm_source=mailchimp&utm_campaign=datanotes-20181004
Inputs:
cols: (List or str) Can be either a string or list of strings with column names
targetcol: (str) Target column to encode column/group of columns with
thresh: (int) Minimum count of grouping to encode (Acts as smoothing). Currently not implemented TODO
func: (str or callable) Function to be applied on column/ group of columns to encode.
If str is provided, it should be a attribute of pandas Series
cname: (str) Column name for new string
func_kwargs: (dict) Additional arguments to be passed to function
add_to_orig: (bool) Whether to return dataframe with added feature or just the feature as series
use_prior: (bool) Use smoothing as suggested in kernel
alpha: (float) Smoothing factor
Output:
pandas DataFrame/Series
"""
def __init__(self, cols=None, targetcol=None, cname=None, thresh=0, func=np.mean, add_to_orig=False,
func_kwargs={}, use_prior=False, alpha=0.5):
self.cols = cols # Can be either a string or list of strings with column names
self.targetcol = targetcol # Target column to encode column/group of columns with
self.thresh = thresh # Minimum count of grouping to encode (Acts as smoothing)
self.func = func # Function to be applied on column/ group of columns to encode
self.add_to_orig = add_to_orig # Whether return a dataframe with added feature or just a series of feature
self.cname = cname # Column to new feature generated
self.func_kwargs = func_kwargs # Additional key word arguments to be applied to func
self.alpha = alpha # smoothing factor
self.use_prior = use_prior
# @numba.jit
def fit(self, X, y=None):
if isinstance(self.func, str):
if hasattr(pd.Series, self.func):
# print("here")
vals = getattr(X.groupby(self.cols)[self.targetcol], self.func)
self.dictmap = vals(**self.func_kwargs)
prior = getattr(X[self.targetcol], self.func)(**self.func_kwargs)
else:
self.dictmap = X.groupby(self.cols)[self.targetcol].apply(lambda x: self.func(x, **self.func_kwargs))
prior = X[[self.targetcol]].apply(lambda x: self.func(x, **self.func_kwargs)).values[0]
self.counts = Counter(zip(*[X[col].tolist() for col in self.cols]))
if len(self.cols) == 1:
counts_greater_than_thresh = [k[0] for k, v in self.counts.items() if v >= self.thresh]
else:
counts_greater_than_thresh = [k for k, v in self.counts.items() if v >= self.thresh]
# print(self.dictmap.head())
# print(self.counts.most_common(10))
self.dictmap = self.dictmap.loc[self.dictmap.index.isin(counts_greater_than_thresh)]
if self.use_prior:
self.dictmap = {k: ((self.counts[k] * v + prior * self.alpha) / (self.counts[k] + self.alpha))
for k, v in self.dictmap.items()}
self.dictmap = pd.Series(self.dictmap)
self.dictmap.index.names = self.cols
# print(self.dictmap.head())
if self.cname:
self.dictmap.name = self.cname
else:
cname = ''
cname = [cname + '_' + str(col) for col in self.cols]
self.cname = '_'.join(cname) + "_" + str(self.func)
self.dictmap.name = self.cname
# print(self.cname)
# self.dictmap = self.dictmap
return self
def transform(self, X, y=None):
if isinstance(X, pd.DataFrame):
X_transformed = X[self.cols]
X_transformed = X_transformed.join(self.dictmap, on=self.cols, how='left')[self.cname]
if self.add_to_orig:
return pd.concat([X, X_transformed], axis=1, copy=False)
else:
return X_transformed.values
else:
raise TypeError("Input should be a pandas DataFrame")
| 45.85567
| 154
| 0.645683
|
9a07d0a3e4d8f03e06b1c63786253e070e396355
| 5,328
|
py
|
Python
|
supervisor/compat.py
|
alex/supervisor
|
7ffe1cbd53e540ca4e242e0dd3c431766cf186fc
|
[
"ZPL-2.1"
] | 1
|
2015-11-08T13:01:35.000Z
|
2015-11-08T13:01:35.000Z
|
supervisor/compat.py
|
alex/supervisor
|
7ffe1cbd53e540ca4e242e0dd3c431766cf186fc
|
[
"ZPL-2.1"
] | null | null | null |
supervisor/compat.py
|
alex/supervisor
|
7ffe1cbd53e540ca4e242e0dd3c431766cf186fc
|
[
"ZPL-2.1"
] | null | null | null |
from __future__ import absolute_import
import sys
PY3 = sys.version>'3'
if PY3: # pragma: no cover
long = int
basestring = str
unichr = chr
raw_input = input
class unicode(str):
def __init__(self, string, encoding, errors):
str.__init__(self, string)
def as_bytes(s): return s if isinstance(s,bytes) else s.encode('utf8')
def as_string(s): return s if isinstance(s,str) else s.decode('utf8')
from functools import reduce
else: # pragma: no cover
long = long
raw_input = raw_input
unicode = unicode
basestring = basestring
def as_bytes(s): return s if isinstance(s, str) else s.encode('utf-8')
def as_string(s): return s if isinstance(s, unicode) else s.decode('utf-8')
reduce = reduce
def print_function(*args,**kwargs): # pragma: no cover
kwargs.get('file', sys.stdout).write(' '.join(i for i in args)+kwargs.get('end','\n'))
def total_ordering(cls): # pragma: no cover
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
try:
op = getattr(int, opname)
except AttributeError: # py25 int has no __gt__
pass
else:
opfunc.__doc__ = op.__doc__
setattr(cls, opname, opfunc)
return cls
try: # pragma: no cover
import xmlrpc.client as xmlrpclib
except ImportError: # pragma: no cover
import xmlrpclib
try: # pragma: no cover
import urllib.parse as urlparse
import urllib.parse as urllib
except ImportError: # pragma: no cover
import urlparse
import urllib
if PY3: # pragma: no cover
from base64 import encodebytes as encodestring
else: # pragma: no cover
from base64 import encodestring
try: # pragma: no cover
from hashlib import sha1
except ImportError: # pragma: no cover
from sha import new as sha1
try: # pragma: no cover
import syslog
except ImportError: # pragma: no cover
syslog = None
try: # pragma: no cover
import configparser as ConfigParser
except ImportError: # pragma: no cover
import ConfigParser
try: # pragma: no cover
from StringIO import StringIO
except ImportError: # pragma: no cover
from io import StringIO
try: # pragma: no cover
from sys import maxint
except ImportError: # pragma: no cover
from sys import maxsize as maxint
try: # pragma: no cover
from urllib.parse import parse_qs, parse_qsl
except ImportError: # pragma: no cover
from cgi import parse_qs, parse_qsl
try: # pragma: no cover
import http.client as httplib
except ImportError: # pragma: no cover
import httplib
try: # pragma: no cover
from base64 import decodebytes as decodestring, encodebytes as encodestring
except ImportError: # pragma: no cover
from base64 import decodestring, encodestring
if PY3: # pragma: no cover
func_attribute = '__func__'
else: # pragma: no cover
func_attribute = 'im_func'
try: # pragma: no cover
# Python 2.6 contains a version of cElementTree inside it.
from xml.etree.ElementTree import iterparse
except ImportError: # pragma: no cover
try:
# Failing that, try cElementTree instead.
from cElementTree import iterparse
except ImportError:
iterparse = None
try: # pragma: no cover
from unittest.mock import Mock, patch, sentinel
except ImportError: # pragma: no cover
from mock import Mock, patch, sentinel
try: # pragma: no cover
import unittest.mock as mock
except ImportError: # pragma: no cover
import mock
try: # pragma: no cover
from xmlrpc.client import Fault
except ImportError: # pragma: no cover
from xmlrpclib import Fault
try: # pragma: no cover
from string import ascii_letters as letters
except ImportError: # pragma: no cover
from string import letters
try: # pragma: no cover
from hashlib import md5
except ImportError: # pragma: no cover
from md5 import md5
try: # pragma: no cover
import thread
except ImportError: # pragma: no cover
import _thread as thread
| 32.290909
| 90
| 0.65503
|
a1a5a8a23235f968dd06024e3d211dff1983775a
| 424
|
py
|
Python
|
Exercices/Secao05/exercicio06.py
|
Guilt-tech/PythonExercices
|
e59bffae997a1974d3e3cdcfff7700afbed65e6e
|
[
"MIT"
] | null | null | null |
Exercices/Secao05/exercicio06.py
|
Guilt-tech/PythonExercices
|
e59bffae997a1974d3e3cdcfff7700afbed65e6e
|
[
"MIT"
] | null | null | null |
Exercices/Secao05/exercicio06.py
|
Guilt-tech/PythonExercices
|
e59bffae997a1974d3e3cdcfff7700afbed65e6e
|
[
"MIT"
] | null | null | null |
print('Digite dois números, para ver qual é o maior entre eles e a sua diferença')
n1 = int(input('Número 1: '))
n2 = int(input('Número 2: '))
if n1 > n2:
diferenca = n1 - n2
print(f'O primeiro número {n1} é maior que o segundo número {n2} e a sua diferença é de: {diferenca}')
else:
diferenca1 = n2 - n1
print(f'O segundo número {n2} é maior que o primeiro número {n1} e a sua diferença é de: {diferenca1}')
| 47.111111
| 107
| 0.667453
|
77d9bcdef8235c1176bd5b537e8d8ed26fe85b22
| 16,101
|
py
|
Python
|
03_SweynTooth/libs/scapy/layers/dhcp.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 149
|
2020-10-23T23:31:51.000Z
|
2022-03-15T00:25:35.000Z
|
03_SweynTooth/libs/scapy/layers/dhcp.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 1
|
2021-04-12T19:24:00.000Z
|
2021-04-27T03:11:07.000Z
|
03_SweynTooth/libs/scapy/layers/dhcp.py
|
Charmve/BLE-Security-Att-Def
|
3652d84bf4ac0c694bb3c4c0f611098da9122af0
|
[
"BSD-2-Clause"
] | 22
|
2020-11-17T02:52:40.000Z
|
2022-03-15T00:26:38.000Z
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
DHCP (Dynamic Host Configuration Protocol) and BOOTP
"""
from __future__ import absolute_import
from __future__ import print_function
try:
from collections.abc import Iterable
except ImportError:
# For backwards compatibility. This was removed in Python 3.8
from collections import Iterable
import random
import struct
from scapy.ansmachine import AnsweringMachine
from scapy.base_classes import Net
from scapy.compat import chb, orb, bytes_encode
from scapy.fields import ByteEnumField, ByteField, Field, FieldListField, \
FlagsField, IntField, IPField, ShortField, StrField
from scapy.layers.inet import UDP, IP
from scapy.layers.l2 import Ether
from scapy.packet import bind_layers, bind_bottom_up, Packet
from scapy.utils import atol, itom, ltoa, sane
from scapy.volatile import RandBin, RandField, RandNum, RandNumExpo
from scapy.arch import get_if_raw_hwaddr
from scapy.sendrecv import srp1, sendp
from scapy.error import warning
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.config import conf
dhcpmagic = b"c\x82Sc"
class BOOTP(Packet):
name = "BOOTP"
fields_desc = [ByteEnumField("op", 1, {1: "BOOTREQUEST", 2: "BOOTREPLY"}),
ByteField("htype", 1),
ByteField("hlen", 6),
ByteField("hops", 0),
IntField("xid", 0),
ShortField("secs", 0),
FlagsField("flags", 0, 16, "???????????????B"),
IPField("ciaddr", "0.0.0.0"),
IPField("yiaddr", "0.0.0.0"),
IPField("siaddr", "0.0.0.0"),
IPField("giaddr", "0.0.0.0"),
Field("chaddr", b"", "16s"),
Field("sname", b"", "64s"),
Field("file", b"", "128s"),
StrField("options", b"")]
def guess_payload_class(self, payload):
if self.options[:len(dhcpmagic)] == dhcpmagic:
return DHCP
else:
return Packet.guess_payload_class(self, payload)
def extract_padding(self, s):
if self.options[:len(dhcpmagic)] == dhcpmagic:
# set BOOTP options to DHCP magic cookie and make rest a payload of DHCP options # noqa: E501
payload = self.options[len(dhcpmagic):]
self.options = self.options[:len(dhcpmagic)]
return payload, None
else:
return b"", None
def hashret(self):
return struct.pack("!I", self.xid)
def answers(self, other):
if not isinstance(other, BOOTP):
return 0
return self.xid == other.xid
class _DHCPParamReqFieldListField(FieldListField):
def getfield(self, pkt, s):
ret = []
while s:
s, val = FieldListField.getfield(self, pkt, s)
ret.append(val)
return b"", [x[0] for x in ret]
# DHCP_UNKNOWN, DHCP_IP, DHCP_IPLIST, DHCP_TYPE \
# = range(4)
#
DHCPTypes = {
1: "discover",
2: "offer",
3: "request",
4: "decline",
5: "ack",
6: "nak",
7: "release",
8: "inform",
9: "force_renew",
10: "lease_query",
11: "lease_unassigned",
12: "lease_unknown",
13: "lease_active",
}
DHCPOptions = {
0: "pad",
1: IPField("subnet_mask", "0.0.0.0"),
2: IntField("time_zone", 500),
3: IPField("router", "0.0.0.0"),
4: IPField("time_server", "0.0.0.0"),
5: IPField("IEN_name_server", "0.0.0.0"),
6: IPField("name_server", "0.0.0.0"),
7: IPField("log_server", "0.0.0.0"),
8: IPField("cookie_server", "0.0.0.0"),
9: IPField("lpr_server", "0.0.0.0"),
10: IPField("impress-servers", "0.0.0.0"),
11: IPField("resource-location-servers", "0.0.0.0"),
12: "hostname",
13: ShortField("boot-size", 1000),
14: "dump_path",
15: "domain",
16: IPField("swap-server", "0.0.0.0"),
17: "root_disk_path",
18: "extensions-path",
19: ByteField("ip-forwarding", 0),
20: ByteField("non-local-source-routing", 0),
21: IPField("policy-filter", "0.0.0.0"),
22: ShortField("max_dgram_reass_size", 300),
23: ByteField("default_ttl", 50),
24: IntField("pmtu_timeout", 1000),
25: ShortField("path-mtu-plateau-table", 1000),
26: ShortField("interface-mtu", 50),
27: ByteField("all-subnets-local", 0),
28: IPField("broadcast_address", "0.0.0.0"),
29: ByteField("perform-mask-discovery", 0),
30: ByteField("mask-supplier", 0),
31: ByteField("router-discovery", 0),
32: IPField("router-solicitation-address", "0.0.0.0"),
33: IPField("static-routes", "0.0.0.0"),
34: ByteField("trailer-encapsulation", 0),
35: IntField("arp_cache_timeout", 1000),
36: ByteField("ieee802-3-encapsulation", 0),
37: ByteField("tcp_ttl", 100),
38: IntField("tcp_keepalive_interval", 1000),
39: ByteField("tcp_keepalive_garbage", 0),
40: StrField("NIS_domain", "www.example.com"),
41: IPField("NIS_server", "0.0.0.0"),
42: IPField("NTP_server", "0.0.0.0"),
43: "vendor_specific",
44: IPField("NetBIOS_server", "0.0.0.0"),
45: IPField("NetBIOS_dist_server", "0.0.0.0"),
46: ByteField("static-routes", 100),
47: "netbios-scope",
48: IPField("font-servers", "0.0.0.0"),
49: IPField("x-display-manager", "0.0.0.0"),
50: IPField("requested_addr", "0.0.0.0"),
51: IntField("lease_time", 43200),
52: ByteField("dhcp-option-overload", 100),
53: ByteEnumField("message-type", 1, DHCPTypes),
54: IPField("server_id", "0.0.0.0"),
55: _DHCPParamReqFieldListField("param_req_list", [], ByteField("opcode", 0), length_from=lambda x: 1), # noqa: E501
56: "error_message",
57: ShortField("max_dhcp_size", 1500),
58: IntField("renewal_time", 21600),
59: IntField("rebinding_time", 37800),
60: StrField("vendor_class_id", "id"),
61: StrField("client_id", ""),
62: "nwip-domain-name",
64: "NISplus_domain",
65: IPField("NISplus_server", "0.0.0.0"),
67: StrField("boot-file-name", ""),
68: IPField("mobile-ip-home-agent", "0.0.0.0"),
69: IPField("SMTP_server", "0.0.0.0"),
70: IPField("POP3_server", "0.0.0.0"),
71: IPField("NNTP_server", "0.0.0.0"),
72: IPField("WWW_server", "0.0.0.0"),
73: IPField("Finger_server", "0.0.0.0"),
74: IPField("IRC_server", "0.0.0.0"),
75: IPField("StreetTalk_server", "0.0.0.0"),
76: IPField("StreetTalk_Dir_Assistance", "0.0.0.0"),
78: "slp_service_agent",
79: "slp_service_scope",
81: "client_FQDN",
82: "relay_agent_information",
85: IPField("nds-server", "0.0.0.0"),
86: StrField("nds-tree-name", ""),
87: StrField("nds-context", ""),
88: "bcms-controller-namesi",
89: IPField("bcms-controller-address", "0.0.0.0"),
91: IntField("client-last-transaction-time", 1000),
92: IPField("associated-ip", "0.0.0.0"),
93: "pxe_client_architecture",
94: "pxe_client_network_interface",
97: "pxe_client_machine_identifier",
98: StrField("uap-servers", ""),
100: StrField("pcode", ""),
101: StrField("tcode", ""),
112: IPField("netinfo-server-address", "0.0.0.0"),
113: StrField("netinfo-server-tag", ""),
114: StrField("default-url", ""),
116: ByteField("auto-config", 0),
117: ShortField("name-service-search", 0,),
118: IPField("subnet-selection", "0.0.0.0"),
124: "vendor_class",
125: "vendor_specific_information",
136: IPField("pana-agent", "0.0.0.0"),
137: "v4-lost",
138: IPField("capwap-ac-v4", "0.0.0.0"),
141: "sip_ua_service_domains",
146: "rdnss-selection",
159: "v4-portparams",
160: StrField("v4-captive-portal", ""),
208: "pxelinux_magic",
209: "pxelinux_configuration_file",
210: "pxelinux_path_prefix",
211: "pxelinux_reboot_time",
212: "option-6rd",
213: "v4-access-domain",
255: "end"
}
DHCPRevOptions = {}
for k, v in six.iteritems(DHCPOptions):
if isinstance(v, str):
n = v
v = None
else:
n = v.name
DHCPRevOptions[n] = (k, v)
del(n)
del(v)
del(k)
class RandDHCPOptions(RandField):
def __init__(self, size=None, rndstr=None):
if size is None:
size = RandNumExpo(0.05)
self.size = size
if rndstr is None:
rndstr = RandBin(RandNum(0, 255))
self.rndstr = rndstr
self._opts = list(six.itervalues(DHCPOptions))
self._opts.remove("pad")
self._opts.remove("end")
def _fix(self):
op = []
for k in range(self.size):
o = random.choice(self._opts)
if isinstance(o, str):
op.append((o, self.rndstr * 1))
else:
op.append((o.name, o.randval()._fix()))
return op
class DHCPOptionsField(StrField):
islist = 1
def i2repr(self, pkt, x):
s = []
for v in x:
if isinstance(v, tuple) and len(v) >= 2:
if v[0] in DHCPRevOptions and isinstance(DHCPRevOptions[v[0]][1], Field): # noqa: E501
f = DHCPRevOptions[v[0]][1]
vv = ",".join(f.i2repr(pkt, val) for val in v[1:])
else:
vv = ",".join(repr(val) for val in v[1:])
r = "%s=%s" % (v[0], vv)
s.append(r)
else:
s.append(sane(v))
return "[%s]" % (" ".join(s))
def getfield(self, pkt, s):
return b"", self.m2i(pkt, s)
def m2i(self, pkt, x):
opt = []
while x:
o = orb(x[0])
if o == 255:
opt.append("end")
x = x[1:]
continue
if o == 0:
opt.append("pad")
x = x[1:]
continue
if len(x) < 2 or len(x) < orb(x[1]) + 2:
opt.append(x)
break
elif o in DHCPOptions:
f = DHCPOptions[o]
if isinstance(f, str):
olen = orb(x[1])
opt.append((f, x[2:olen + 2]))
x = x[olen + 2:]
else:
olen = orb(x[1])
lval = [f.name]
try:
left = x[2:olen + 2]
while left:
left, val = f.getfield(pkt, left)
lval.append(val)
except Exception:
opt.append(x)
break
else:
otuple = tuple(lval)
opt.append(otuple)
x = x[olen + 2:]
else:
olen = orb(x[1])
opt.append((o, x[2:olen + 2]))
x = x[olen + 2:]
return opt
def i2m(self, pkt, x):
if isinstance(x, str):
return x
s = b""
for o in x:
if isinstance(o, tuple) and len(o) >= 2:
name = o[0]
lval = o[1:]
if isinstance(name, int):
onum, oval = name, b"".join(lval)
elif name in DHCPRevOptions:
onum, f = DHCPRevOptions[name]
if f is not None:
lval = (f.addfield(pkt, b"", f.any2i(pkt, val)) for val in lval) # noqa: E501
else:
lval = (bytes_encode(x) for x in lval)
oval = b"".join(lval)
else:
warning("Unknown field option %s", name)
continue
s += chb(onum)
s += chb(len(oval))
s += oval
elif (isinstance(o, str) and o in DHCPRevOptions and
DHCPRevOptions[o][1] is None):
s += chb(DHCPRevOptions[o][0])
elif isinstance(o, int):
s += chb(o) + b"\0"
elif isinstance(o, (str, bytes)):
s += bytes_encode(o)
else:
warning("Malformed option %s", o)
return s
class DHCP(Packet):
name = "DHCP options"
fields_desc = [DHCPOptionsField("options", b"")]
bind_layers(UDP, BOOTP, dport=67, sport=68)
bind_layers(UDP, BOOTP, dport=68, sport=67)
bind_bottom_up(UDP, BOOTP, dport=67, sport=67)
bind_layers(BOOTP, DHCP, options=b'c\x82Sc')
@conf.commands.register
def dhcp_request(iface=None, **kargs):
"""Send a DHCP discover request and return the answer"""
if conf.checkIPaddr:
warning(
"conf.checkIPaddr is enabled, may not be able to match the answer"
)
if iface is None:
iface = conf.iface
fam, hw = get_if_raw_hwaddr(iface)
return srp1(Ether(dst="ff:ff:ff:ff:ff:ff") / IP(src="0.0.0.0", dst="255.255.255.255") / UDP(sport=68, dport=67) / # noqa: E501
BOOTP(chaddr=hw) / DHCP(options=[("message-type", "discover"), "end"]), iface=iface, **kargs) # noqa: E501
class BOOTP_am(AnsweringMachine):
function_name = "bootpd"
filter = "udp and port 68 and port 67"
send_function = staticmethod(sendp)
def parse_options(self, pool=Net("192.168.1.128/25"), network="192.168.1.0/24", gw="192.168.1.1", # noqa: E501
domain="localnet", renewal_time=60, lease_time=1800):
self.domain = domain
netw, msk = (network.split("/") + ["32"])[:2]
msk = itom(int(msk))
self.netmask = ltoa(msk)
self.network = ltoa(atol(netw) & msk)
self.broadcast = ltoa(atol(self.network) | (0xffffffff & ~msk))
self.gw = gw
if isinstance(pool, six.string_types):
pool = Net(pool)
if isinstance(pool, Iterable):
pool = [k for k in pool if k not in [gw, self.network, self.broadcast]] # noqa: E501
pool.reverse()
if len(pool) == 1:
pool, = pool
self.pool = pool
self.lease_time = lease_time
self.renewal_time = renewal_time
self.leases = {}
def is_request(self, req):
if not req.haslayer(BOOTP):
return 0
reqb = req.getlayer(BOOTP)
if reqb.op != 1:
return 0
return 1
def print_reply(self, req, reply):
print("Reply %s to %s" % (reply.getlayer(IP).dst, reply.dst))
def make_reply(self, req):
mac = req[Ether].src
if isinstance(self.pool, list):
if mac not in self.leases:
self.leases[mac] = self.pool.pop()
ip = self.leases[mac]
else:
ip = self.pool
repb = req.getlayer(BOOTP).copy()
repb.op = "BOOTREPLY"
repb.yiaddr = ip
repb.siaddr = self.gw
repb.ciaddr = self.gw
repb.giaddr = self.gw
del(repb.payload)
rep = Ether(dst=mac) / IP(dst=ip) / UDP(sport=req.dport, dport=req.sport) / repb # noqa: E501
return rep
class DHCP_am(BOOTP_am):
function_name = "dhcpd"
def make_reply(self, req):
resp = BOOTP_am.make_reply(self, req)
if DHCP in req:
dhcp_options = [(op[0], {1: 2, 3: 5}.get(op[1], op[1]))
for op in req[DHCP].options
if isinstance(op, tuple) and op[0] == "message-type"] # noqa: E501
dhcp_options += [("server_id", self.gw),
("domain", self.domain),
("router", self.gw),
("name_server", self.gw),
("broadcast_address", self.broadcast),
("subnet_mask", self.netmask),
("renewal_time", self.renewal_time),
("lease_time", self.lease_time),
"end"
]
resp /= DHCP(options=dhcp_options)
return resp
| 34.184713
| 131
| 0.537358
|
7d333c9b3eb5ec39028dab3002d5b90e3709883b
| 58,157
|
py
|
Python
|
tests/unit/gapic/dialogflow_v2/test_fulfillments.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflow_v2/test_fulfillments.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflow_v2/test_fulfillments.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.fulfillments import FulfillmentsAsyncClient
from google.cloud.dialogflow_v2.services.fulfillments import FulfillmentsClient
from google.cloud.dialogflow_v2.services.fulfillments import transports
from google.cloud.dialogflow_v2.services.fulfillments.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.dialogflow_v2.types import fulfillment
from google.cloud.dialogflow_v2.types import fulfillment as gcd_fulfillment
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FulfillmentsClient._get_default_mtls_endpoint(None) is None
assert (
FulfillmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert FulfillmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [FulfillmentsClient, FulfillmentsAsyncClient,])
def test_fulfillments_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FulfillmentsGrpcTransport, "grpc"),
(transports.FulfillmentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_fulfillments_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [FulfillmentsClient, FulfillmentsAsyncClient,])
def test_fulfillments_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_fulfillments_client_get_transport_class():
transport = FulfillmentsClient.get_transport_class()
available_transports = [
transports.FulfillmentsGrpcTransport,
]
assert transport in available_transports
transport = FulfillmentsClient.get_transport_class("grpc")
assert transport == transports.FulfillmentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FulfillmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FulfillmentsClient)
)
@mock.patch.object(
FulfillmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FulfillmentsAsyncClient),
)
def test_fulfillments_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FulfillmentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FulfillmentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc", "true"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc", "false"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FulfillmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FulfillmentsClient)
)
@mock.patch.object(
FulfillmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FulfillmentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_fulfillments_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_fulfillments_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_fulfillments_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_fulfillments_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FulfillmentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_get_fulfillment(
transport: str = "grpc", request_type=fulfillment.GetFulfillmentRequest
):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment(
name="name_value",
display_name="display_name_value",
enabled=True,
generic_web_service=fulfillment.Fulfillment.GenericWebService(
uri="uri_value"
),
)
response = client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
def test_get_fulfillment_from_dict():
test_get_fulfillment(request_type=dict)
def test_get_fulfillment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
client.get_fulfillment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
@pytest.mark.asyncio
async def test_get_fulfillment_async(
transport: str = "grpc_asyncio", request_type=fulfillment.GetFulfillmentRequest
):
client = FulfillmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment(
name="name_value", display_name="display_name_value", enabled=True,
)
)
response = await client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
@pytest.mark.asyncio
async def test_get_fulfillment_async_from_dict():
await test_get_fulfillment_async(request_type=dict)
def test_get_fulfillment_field_headers():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = fulfillment.GetFulfillmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
call.return_value = fulfillment.Fulfillment()
client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_fulfillment_field_headers_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = fulfillment.GetFulfillmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment()
)
await client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_fulfillment_flattened():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_fulfillment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_fulfillment_flattened_error():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_fulfillment(
fulfillment.GetFulfillmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_fulfillment_flattened_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_fulfillment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_fulfillment_flattened_error_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_fulfillment(
fulfillment.GetFulfillmentRequest(), name="name_value",
)
def test_update_fulfillment(
transport: str = "grpc", request_type=gcd_fulfillment.UpdateFulfillmentRequest
):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment(
name="name_value",
display_name="display_name_value",
enabled=True,
generic_web_service=gcd_fulfillment.Fulfillment.GenericWebService(
uri="uri_value"
),
)
response = client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
def test_update_fulfillment_from_dict():
test_update_fulfillment(request_type=dict)
def test_update_fulfillment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
client.update_fulfillment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
@pytest.mark.asyncio
async def test_update_fulfillment_async(
transport: str = "grpc_asyncio",
request_type=gcd_fulfillment.UpdateFulfillmentRequest,
):
client = FulfillmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment(
name="name_value", display_name="display_name_value", enabled=True,
)
)
response = await client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
@pytest.mark.asyncio
async def test_update_fulfillment_async_from_dict():
await test_update_fulfillment_async(request_type=dict)
def test_update_fulfillment_field_headers():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_fulfillment.UpdateFulfillmentRequest()
request.fulfillment.name = "fulfillment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
call.return_value = gcd_fulfillment.Fulfillment()
client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "fulfillment.name=fulfillment.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_fulfillment_field_headers_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_fulfillment.UpdateFulfillmentRequest()
request.fulfillment.name = "fulfillment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment()
)
await client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "fulfillment.name=fulfillment.name/value",) in kw[
"metadata"
]
def test_update_fulfillment_flattened():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_fulfillment(
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].fulfillment == gcd_fulfillment.Fulfillment(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_fulfillment_flattened_error():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_fulfillment(
gcd_fulfillment.UpdateFulfillmentRequest(),
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_fulfillment_flattened_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_fulfillment(
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].fulfillment == gcd_fulfillment.Fulfillment(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_fulfillment_flattened_error_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_fulfillment(
gcd_fulfillment.UpdateFulfillmentRequest(),
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FulfillmentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FulfillmentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.FulfillmentsGrpcTransport,)
def test_fulfillments_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FulfillmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_fulfillments_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FulfillmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"get_fulfillment",
"update_fulfillment",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_fulfillments_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_fulfillments_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_fulfillments_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FulfillmentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FulfillmentsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_fulfillments_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FulfillmentsGrpcTransport, grpc_helpers),
(transports.FulfillmentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_fulfillments_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_fulfillments_host_no_port():
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_fulfillments_host_with_port():
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_fulfillments_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FulfillmentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_fulfillments_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FulfillmentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_fulfillment_path():
project = "squid"
expected = "projects/{project}/agent/fulfillment".format(project=project,)
actual = FulfillmentsClient.fulfillment_path(project)
assert expected == actual
def test_parse_fulfillment_path():
expected = {
"project": "clam",
}
path = FulfillmentsClient.fulfillment_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_fulfillment_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FulfillmentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = FulfillmentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = FulfillmentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = FulfillmentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = FulfillmentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = FulfillmentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = FulfillmentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = FulfillmentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FulfillmentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = FulfillmentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FulfillmentsTransport, "_prep_wrapped_messages"
) as prep:
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FulfillmentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FulfillmentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 38.160761
| 114
| 0.689685
|
a11a884cffac6ba104ee1d938e98fa4bb4b2693e
| 38,504
|
py
|
Python
|
checks.d/vsphere.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | 2
|
2018-01-31T03:50:55.000Z
|
2018-01-31T03:51:04.000Z
|
checks.d/vsphere.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | null | null | null |
checks.d/vsphere.py
|
takus/dd-agent
|
3029873135f0f55c1bcdf3f825691aafca5abf97
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from datetime import datetime, timedelta
from hashlib import md5
from Queue import Empty, Queue
import re
import ssl
import time
import traceback
# 3p
from pyVim import connect
from pyVmomi import vim # pylint: disable=E0611
# project
from config import _is_affirmative
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from checks.libs.vmware.all_metrics import ALL_METRICS
from util import Timer
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# Metrics are only collected on vSphere VMs marked by custom field value
VM_MONITORING_FLAG = 'DatadogMonitored'
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
REALTIME_RESOURCES = {'vm', 'host'}
RESOURCE_TYPE_MAP = {
'vm': vim.VirtualMachine,
'datacenter': vim.Datacenter,
'host': vim.HostSystem,
'datastore': vim.Datastore
}
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_datadog_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# managed entity raw view
self.registry = {}
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
# Check for ssl configs and generate an appropriate ssl context object
ssl_verify = instance.get('ssl_verify', True)
ssl_capath = instance.get('ssl_capath', None)
if not ssl_verify:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
elif ssl_capath:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(capath=ssl_capath)
# If both configs are used, log a message explaining the default
if not ssl_verify and ssl_capath:
self.log.debug("Your configuration is incorrectly attempting to "
"specify both a CA path, and to disable SSL "
"verification. You cannot do both. Proceeding with "
"disabling ssl verification.")
if i_key not in self.server_instances:
try:
# Object returned by SmartConnect is a ServerInstance
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.ServiceInstance.html
server_instance = connect.SmartConnect(
host = instance.get('host'),
user = instance.get('username'),
pwd = instance.get('password'),
sslContext = context if not ssl_verify or ssl_capath else None
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
self.server_instances[i_key].RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if (i_key not in self.metrics_metadata
or metric.counterId not in self.metrics_metadata[i_key]):
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.debug(u"Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_by_mor_name = self.morlist.get(i_key)
if not mor_by_mor_name:
self.log.warning(
u"Unable to extract hosts' tags for `%s` vSphere instance."
u"Is the check failing on this instance?", instance
)
continue
for mor in mor_by_mor_name.itervalues():
if mor['hostname']: # some mor's have a None hostname
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
def _discover_mor(self, instance, tags, regexes=None, include_only_marked=False):
"""
Explore vCenter infrastructure to discover hosts, virtual machines
and compute their associated tags.
Start with the vCenter `rootFolder` and proceed recursively,
queueing other such jobs for children nodes.
Example topology:
```
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
```
If it's a node we want to query metric for, queue it in `self.morlist_raw` that
will be processed by another job.
"""
def _get_parent_tags(mor):
tags = []
if mor.parent:
tag = []
if isinstance(mor.parent, vim.HostSystem):
tag.append(u'vsphere_host:{}'.format(mor.parent.name))
elif isinstance(mor.parent, vim.Folder):
tag.append(u'vsphere_folder:{}'.format(mor.parent.name))
elif isinstance(mor.parent, vim.ComputeResource):
if isinstance(mor.parent, vim.ClusterComputeResource):
tag.append(u'vsphere_cluster:{}'.format(mor.parent.name))
tag.append(u'vsphere_compute:{}'.format(mor.parent.name))
elif isinstance(mor.parent, vim.Datacenter):
tag.append(u'vsphere_datacenter:{}'.format(mor.parent.name))
tags = _get_parent_tags(mor.parent)
if tag:
tags.extend(tag)
return tags
def _get_all_objs(content, vimtype, regexes=None, include_only_marked=False, tags=[]):
"""
Get all the vsphere objects associated with a given type
"""
obj_list = []
container = content.viewManager.CreateContainerView(
content.rootFolder,
[RESOURCE_TYPE_MAP[vimtype]],
True)
for c in container.view:
instance_tags = []
if not self._is_excluded(c, regexes, include_only_marked):
hostname = c.name
if c.parent:
instance_tags += _get_parent_tags(c)
vsphere_type = None
if isinstance(c, vim.VirtualMachine):
vsphere_type = u'vsphere_type:vm'
if c.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
continue
host = c.runtime.host.name
instance_tags.append(u'vsphere_host:{}'.format(host))
elif isinstance(c, vim.HostSystem):
vsphere_type = u'vsphere_type:host'
elif isinstance(c, vim.Datastore):
vsphere_type = u'vsphere_type:datastore'
instance_tags.append(u'vsphere_datastore:{}'.format(c.name))
hostname = None
elif isinstance(c, vim.Datacenter):
vsphere_type = u'vsphere_type:datacenter'
hostname = None
if vsphere_type:
instance_tags.append(vsphere_type)
obj_list.append(dict(mor_type=vimtype, mor=c, hostname=hostname, tags=tags+instance_tags))
return obj_list
# @atomic_method
def build_resource_registry(instance, tags, regexes=None, include_only_marked=False):
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
if i_key not in self.morlist_raw:
self.morlist_raw[i_key] = {}
for resource in sorted(RESOURCE_TYPE_MAP):
self.morlist_raw[i_key][resource] = _get_all_objs(
server_instance.RetrieveContent(),
resource,
regexes,
include_only_marked,
tags
)
# collect...
self.pool.apply_async(
build_resource_registry,
args=(instance, tags, regexes, include_only_marked)
)
@staticmethod
def _is_excluded(obj, regexes, include_only_marked):
"""
Return `True` if the given host or virtual machine is excluded by the user configuration,
i.e. violates any of the following rules:
* Do not match the corresponding `*_include_only` regular expressions
* Is "non-labeled" while `include_only_marked` is enabled (virtual machine only)
"""
# Host
if isinstance(obj, vim.HostSystem):
# Based on `host_include_only_regex`
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
return True
# VirtualMachine
elif isinstance(obj, vim.VirtualMachine):
# Based on `vm_include_only_regex`
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
return True
# Based on `include_only_marked`
if include_only_marked:
monitored = False
for field in obj.customValue:
if field.value == VM_MONITORING_FLAG:
monitored = True
break # we shall monitor
if not monitored:
return True
return False
def _cache_morlist_raw(self, instance):
"""
Initiate the first layer to refresh the list of MORs (`self.morlist`).
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines discovery.
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
for resource_type in RESOURCE_TYPE_MAP:
if i_key in self.morlist_raw and len(self.morlist_raw[i_key].get(resource_type, [])) > 0:
self.log.debug(
"Skipping morlist collection now, RAW results "
"processing not over (latest refresh was {0}s ago)".format(
time.time() - self.cache_times[i_key][MORLIST][LAST])
)
return
self.morlist_raw[i_key] = {}
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
include_only_marked = _is_affirmative(instance.get('include_only_marked', False))
# Discover hosts and virtual machines
self._discover_mor(instance, [instance_tag], regexes, include_only_marked)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
mor['interval'] = REAL_TIME_INTERVAL if mor['mor_type'] in REALTIME_RESOURCES else None
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=mor['interval'])
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
processed = 0
for resource_type in RESOURCE_TYPE_MAP:
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key][resource_type].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
processed += 1
if processed == batch_size:
break
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
break
if processed == batch_size:
break
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=mor['interval'],
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
# Metric types are absolute, delta, and rate
metric_name = self.metrics_metadata[i_key][result.id.counterId]['name']
if metric_name not in ALL_METRICS:
self.log.debug(u"Skipping unknown `%s` metric.", metric_name)
continue
tags = ['instance:%s' % instance_name]
if not mor['hostname']: # no host tags available
tags.extend(mor['tags'])
# vsphere "rates" should be submitted as gauges (rate is
# precomputed).
self.gauge(
"vsphere.%s" % metric_name,
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor or not mor['metrics']:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
### </TEST-INSTRUMENTATION>
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, METRICS_METADATA):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, MORLIST):
self._cache_morlist_raw(instance)
self._cache_morlist_process(instance)
self._vacuum_morlist(instance)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
# For our own sanity
self._clean()
thread_crashed = False
try:
while True:
self.log.critical(self.exceptionq.get_nowait())
thread_crashed = True
except Empty:
pass
if thread_crashed:
self.stop_pool()
raise Exception("One thread in the pool crashed, check the logs")
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:final'])
### </TEST-INSTRUMENTATION>
if __name__ == '__main__':
check, _instances = VSphereCheck.from_yaml('conf.d/vsphere.yaml')
try:
for i in xrange(200):
print "Loop %d" % i
for instance in check.instances:
check.check(instance)
if check.has_events():
print 'Events: %s' % (check.get_events())
print 'Metrics: %d' % (len(check.get_metrics()))
time.sleep(10)
except Exception as e:
print "Whoops something happened {0}".format(traceback.format_exc())
finally:
check.stop()
| 39.491282
| 137
| 0.597237
|
23decabb8c0f6029c8e7767bb3d56c4fafa65c5b
| 925
|
py
|
Python
|
DQMOffline/CalibCalo/python/MonitorAlCaHcalPhisym_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
DQMOffline/CalibCalo/python/MonitorAlCaHcalPhisym_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
DQMOffline/CalibCalo/python/MonitorAlCaHcalPhisym_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
# The following comments couldn't be translated into the new config version:
# prescale
import FWCore.ParameterSet.Config as cms
#
#
# \author Stefano Argiro
#
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
HcalPhiSymMon = DQMEDAnalyzer('DQMHcalPhiSymAlCaReco',
# product to monitor
hbheInputMB = cms.InputTag("hbherecoMB"),
hoInputMB = cms.InputTag("horecoMB"),
hfInputMB = cms.InputTag("hfrecoMBspecial"),
hbheInputNoise = cms.InputTag("hbherecoNoise"),
hoInputNoise = cms.InputTag("horecoNoise"),
hfInputNoise = cms.InputTag("hfrecoNoise"),
rawInputLabel=cms.InputTag("rawDataCollector"),
period = cms.uint32(4096),
# File to save
SaveToFile = cms.untracked.bool(False),
FileName = cms.untracked.string('MonitorAlCaHcalPhiSym.root'),
# DQM folder to write to
FolderName = cms.untracked.string('AlCaReco/HcalPhiSym')
)
| 29.83871
| 76
| 0.707027
|
de059c65018fc6990af1648e4cec32c88f06319b
| 3,875
|
py
|
Python
|
upsea_Ver_0.1/Ea_02_money/moneySecond.py
|
UpSea/PyAlgoTradeMid
|
c8edcbc089d92dbfbb8bb25af92a039146f6c6da
|
[
"MIT"
] | null | null | null |
upsea_Ver_0.1/Ea_02_money/moneySecond.py
|
UpSea/PyAlgoTradeMid
|
c8edcbc089d92dbfbb8bb25af92a039146f6c6da
|
[
"MIT"
] | null | null | null |
upsea_Ver_0.1/Ea_02_money/moneySecond.py
|
UpSea/PyAlgoTradeMid
|
c8edcbc089d92dbfbb8bb25af92a039146f6c6da
|
[
"MIT"
] | 1
|
2021-04-10T06:04:04.000Z
|
2021-04-10T06:04:04.000Z
|
# -*- coding: utf-8 -*-
import baseMoney
class moneySecond(baseMoney.baseMoney):
def __init__(self):
'''mid
回测过程中,关于账户状况,只有两个数据可以获取:
1.账户权益价值
2.账户现金资产价值
权益价值 = 现金资产价值 + 非现金资产价值
非现金资产包括账户中持有的所有头寸的价值
买入时:
现金减少
购入资产增加,资产价值以正值表示
1000现金,买入400 XAUUSD
1000 = 600现金 + 400 XAUUSD
卖出时:
现金增加
卖出资产减少,资产价值以负值表示
1000现金,卖出400 XAUUSD
1000 = 1400 现金 – 400 XAUUSD
portfolio_value = strat.getBroker().getEquity()
cash = strat.getBroker().getCash()
feed = self.getFeed()
bars = feed.getCurrentBars()
bar = bars.getBar(self.__instrument)
openPrice = bar.getOpen()
closePrice = self.getLastPrice(self.__instrument) #mid lastPrice == closePrice
share = self.getBroker().getShares(self.__instrument)
self.position_cost = openPrice*share
买入卖出的资产平衡公式统一如下:
'''
self.portfolioIndex = 0 #mid 总投资次数序号
self.initRisk = 0.60 #mid 风险系数
'''
设定初始投资额
'''
def getShares(self,strat = None):
curClosePrice = strat.getLastPrice(strat.getInstrument())
strat.info(('moneyFirst.getShare().price:%.3f'%(curClosePrice)))
if(self.portfolioIndex == 0):
'''mid
由于money是在expert中生成,却是在在与其平级的同样在expert中生成的strategy中调用,
所以,在money.__init__中是不能调用(至少目前如此,以后或许可调整)
如此,只能在此处,依据strat参数进行初始化
'''
#mid 初始总资产 = 初始总资产中现金数额 + 仓位价值,这几个值作为常量保存初始值
self.initPortfolio ,self.initCash,positions_closeValue = strat.getAssetStructure()
#mid 确定初始仓位价值
self.initSubPortfolio = self.initPortfolio
#mid 当前子投资初始仓位,当前仓位,总是用于子投资计量
self.initSubPositionCost = self.initSubPortfolio * self.initRisk
self.curPositionCost = self.initSubPositionCost
#mid 自投资序号
self.subPortfolioIndex = 0
shares = (self.curPositionCost/curClosePrice)
else:
curPortfolio ,curCash,curPositions_closeValue = strat.getAssetStructure()
#mid 如果当前权益大于当前子投资初始价值,开始新的一轮投资
if(curPortfolio > self.initSubPortfolio):
#mid 新的循环计数开始
self.subPortfolioIndex = 0
#mid 新的循环的初始权益价值
self.initSubPortfolio = curPortfolio
#mid 新的循环的初始仓位价值
self.initSubPositionCost = self.initSubPortfolio * self.initRisk
#mid 新的循环的当前仓位价值
self.curPositionCost = self.initSubPositionCost
else:
self.subPortfolioIndex = self.subPortfolioIndex + 1
if(self.subPortfolioIndex<=20):
self.curPositionCost = self.initSubPositionCost * 1
elif(self.subPortfolioIndex<=40):
self.curPositionCost = self.initSubPositionCost * 1
elif(self.subPortfolioIndex<=60):
self.curPositionCost = self.initSubPositionCost * 1.3
else:
self.curPositionCost = self.initSubPositionCost * 1.3
shares = (self.curPositionCost/curClosePrice)
self.portfolioIndex = self.portfolioIndex + 1
print "portfolioIndex:%d,subPortfolioIndex:%d,curPositionCost:%.2f,shares to open:%.2f" % (self.portfolioIndex,
self.subPortfolioIndex,
self.curPositionCost,
shares)
return shares
| 39.540816
| 127
| 0.536258
|
56cb5a6e5265983fb9a628a01044ce1e8ed7d9fe
| 29,088
|
py
|
Python
|
google/ads/google_ads/v2/services/language_constant_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
google/ads/google_ads/v2/services/language_constant_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v2/services/language_constant_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:04:06.000Z
|
2020-09-30T17:04:06.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v2.services LanguageConstantService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.ads.google_ads.v2.services import enums
from google.ads.google_ads.v2.services import language_constant_service_client_config
from google.ads.google_ads.v2.services.transports import language_constant_service_grpc_transport
from google.ads.google_ads.v2.proto.resources import account_budget_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_proposal_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_feed_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_parameter_pb2
from google.ads.google_ads.v2.proto.resources import ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_schedule_view_pb2
from google.ads.google_ads.v2.proto.resources import age_range_view_pb2
from google.ads.google_ads.v2.proto.resources import asset_pb2
from google.ads.google_ads.v2.proto.resources import bidding_strategy_pb2
from google.ads.google_ads.v2.proto.resources import billing_setup_pb2
from google.ads.google_ads.v2.proto.resources import campaign_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import campaign_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import campaign_budget_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import campaign_draft_pb2
from google.ads.google_ads.v2.proto.resources import campaign_experiment_pb2
from google.ads.google_ads.v2.proto.resources import campaign_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import campaign_feed_pb2
from google.ads.google_ads.v2.proto.resources import campaign_label_pb2
from google.ads.google_ads.v2.proto.resources import campaign_pb2
from google.ads.google_ads.v2.proto.resources import campaign_shared_set_pb2
from google.ads.google_ads.v2.proto.resources import carrier_constant_pb2
from google.ads.google_ads.v2.proto.resources import change_status_pb2
from google.ads.google_ads.v2.proto.resources import click_view_pb2
from google.ads.google_ads.v2.proto.resources import conversion_action_pb2
from google.ads.google_ads.v2.proto.resources import custom_interest_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_pb2
from google.ads.google_ads.v2.proto.resources import customer_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import customer_feed_pb2
from google.ads.google_ads.v2.proto.resources import customer_label_pb2
from google.ads.google_ads.v2.proto.resources import customer_manager_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_negative_criterion_pb2
from google.ads.google_ads.v2.proto.resources import customer_pb2
from google.ads.google_ads.v2.proto.resources import detail_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import display_keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import distance_view_pb2
from google.ads.google_ads.v2.proto.resources import domain_category_pb2
from google.ads.google_ads.v2.proto.resources import dynamic_search_ads_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import expanded_landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import extension_feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_target_pb2
from google.ads.google_ads.v2.proto.resources import feed_mapping_pb2
from google.ads.google_ads.v2.proto.resources import feed_pb2
from google.ads.google_ads.v2.proto.resources import feed_placeholder_view_pb2
from google.ads.google_ads.v2.proto.resources import gender_view_pb2
from google.ads.google_ads.v2.proto.resources import geo_target_constant_pb2
from google.ads.google_ads.v2.proto.resources import geographic_view_pb2
from google.ads.google_ads.v2.proto.resources import google_ads_field_pb2
from google.ads.google_ads.v2.proto.resources import group_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_group_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_performance_view_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_ad_group_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_campaign_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_negative_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_pb2
from google.ads.google_ads.v2.proto.resources import keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import label_pb2
from google.ads.google_ads.v2.proto.resources import landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import language_constant_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import asset_service_pb2
from google.ads.google_ads.v2.proto.services import asset_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import change_status_service_pb2
from google.ads.google_ads.v2.proto.services import change_status_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import click_view_service_pb2
from google.ads.google_ads.v2.proto.services import click_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_service_pb2
from google.ads.google_ads.v2.proto.services import customer_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import extension_feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import extension_feed_item_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_mapping_service_pb2
from google.ads.google_ads.v2.proto.services import feed_mapping_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_placeholder_view_service_pb2
from google.ads.google_ads.v2.proto.services import feed_placeholder_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_service_pb2
from google.ads.google_ads.v2.proto.services import feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import gender_view_service_pb2
from google.ads.google_ads.v2.proto.services import gender_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import geo_target_constant_service_pb2
from google.ads.google_ads.v2.proto.services import geo_target_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import geographic_view_service_pb2
from google.ads.google_ads.v2.proto.services import geographic_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import google_ads_field_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_field_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import google_ads_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import group_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import group_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import hotel_group_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_group_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import hotel_performance_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_performance_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import invoice_service_pb2
from google.ads.google_ads.v2.proto.services import invoice_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_campaign_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_campaign_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_idea_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_idea_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_keyword_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_negative_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_negative_keyword_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import label_service_pb2
from google.ads.google_ads.v2.proto.services import label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import language_constant_service_pb2
from google.ads.google_ads.v2.proto.services import language_constant_service_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import wrappers_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class LanguageConstantServiceClient(object):
"""Service to fetch language constants."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v2.services.LanguageConstantService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def language_constant_path(cls, language_constant):
"""Return a fully-qualified language_constant string."""
return google.api_core.path_template.expand(
'languageConstants/{language_constant}',
language_constant=language_constant,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.LanguageConstantServiceGrpcTransport,
Callable[[~.Credentials, type], ~.LanguageConstantServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = language_constant_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=language_constant_service_grpc_transport.LanguageConstantServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = language_constant_service_grpc_transport.LanguageConstantServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_language_constant(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested language constant.
Args:
resource_name (str): Resource name of the language constant to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.LanguageConstant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_language_constant' not in self._inner_api_calls:
self._inner_api_calls['get_language_constant'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_language_constant,
default_retry=self._method_configs['GetLanguageConstant'].retry,
default_timeout=self._method_configs['GetLanguageConstant'].timeout,
client_info=self._client_info,
)
request = language_constant_service_pb2.GetLanguageConstantRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_language_constant'](request, retry=retry, timeout=timeout, metadata=metadata)
| 62.554839
| 119
| 0.810472
|
c6c42b7cbfe3c98fa501e805191be2fe65f9573d
| 3,618
|
py
|
Python
|
app/main/views.py
|
kamransumar/blog-ip
|
4e55e641effb79ad96c3d16149c7aea179b627f1
|
[
"Unlicense"
] | null | null | null |
app/main/views.py
|
kamransumar/blog-ip
|
4e55e641effb79ad96c3d16149c7aea179b627f1
|
[
"Unlicense"
] | null | null | null |
app/main/views.py
|
kamransumar/blog-ip
|
4e55e641effb79ad96c3d16149c7aea179b627f1
|
[
"Unlicense"
] | null | null | null |
from flask import render_template, request, redirect, url_for, abort
from flask_login import login_required, current_user
from . import main
from ..models import User, Blog, Comment
from .forms import *
from .. import db, photos
from datetime import datetime
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to The best Movie Review Website Online'
blogs = Blog.query.all()
return render_template('index.html', title=title, blogs=blogs)
@main.route('/user/<name>')
def profile(name):
user = User.query.filter_by(username=name).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route('/user/<name>/update', methods=['GET', 'POST'])
@login_required
def update_profile(name):
user = User.query.filter_by(username=name).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.username = form.username.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', name=user.username))
return render_template('profile/update.html', form=form)
@main.route('/user/<name>/update/pic', methods=['POST'])
@login_required
def update_pic(name):
user = User.query.filter_by(username=name).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.image = path
db.session.commit()
return redirect(url_for('main.profile', name=name))
@main.route('/blog/', methods=['GET', 'POST'])
@login_required
def new_blog():
form = BlogForm()
if form.validate_on_submit():
content = form.content.data
title = form.title.data
# Updated review instance
new_blog = Blog(
content=content, title=title)
db.session.add(new_blog)
db.session.commit()
return render_template('blog.html', blog_form=form)
@main.route('/blog/comment/new/<int:id>', methods=['GET', 'POST'])
@login_required
def new_comment(id):
'''
view category that returns a form to create a new comment
'''
form = CommentForm()
blog = Blog.query.filter_by(id=id).first()
if form.validate_on_submit():
title = form.title.data
comment = form.comment.data
# comment instance
new_comment = Comment(
blog_id=blog.id, post_comment=comment, title=title, user=current_user)
# save comment
new_comment.save_comment()
return redirect(url_for('.blogs', id=blog.id))
title = f'{blog.title} comment'
return render_template('newcomment.html', title=title, comment_form=form, blog=blog, )
@main.route('/allblogs')
def blog_list():
blogs = Blog.query.all()
return render_template('blog.html', blogs=blogs)
@main.route('/oneblog/<int:id>', methods=['GET', 'POST'])
def one_blog(id):
blog = Blog.query.get(id)
form = CommentForm()
blog = Blog.query.filter_by(id=id).first()
if form.validate_on_submit():
# comment instance
new_comment = Comment(
ratings=0,
like=0,
dislike=0,
content=form.content.data,
time=datetime.utcnow(),
blog=blog,
author=current_user)
# save comment
db.session.add(new_comment)
db.session.commit()
comments = blog.comment_id
return render_template('viewblog.html', blog=blog, comment_form=form, comments=comments)
| 25.659574
| 92
| 0.644831
|
f80376cc7b7cab8bb8a2ab44e925315dd59b4934
| 5,300
|
py
|
Python
|
app.py
|
avirupsinha10/Face-Mask-Detection
|
80581febe9cd21054e4c972be5238fe975cb1ef6
|
[
"MIT"
] | null | null | null |
app.py
|
avirupsinha10/Face-Mask-Detection
|
80581febe9cd21054e4c972be5238fe975cb1ef6
|
[
"MIT"
] | null | null | null |
app.py
|
avirupsinha10/Face-Mask-Detection
|
80581febe9cd21054e4c972be5238fe975cb1ef6
|
[
"MIT"
] | null | null | null |
import streamlit as st
from PIL import Image, ImageEnhance
import numpy as np
import cv2
import os
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import detect_mask_image
# Setting custom Page Title and Icon with changed layout and sidebar state
st.set_page_config(page_title='Face Mask Detector', page_icon='😷', layout='centered', initial_sidebar_state='expanded')
def local_css(file_name):
""" Method for reading styles.css and applying necessary changes to HTML"""
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
def mask_image():
global RGB_img
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
weightsPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
model = load_model("mask_detector.model")
# load the input image from disk and grab the image spatial
# dimensions
image = cv2.imread("./images/out.jpg")
(h, w) = image.shape[:2]
# construct a blob from the image
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
print("[INFO] computing face detections...")
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
# pass the face through the model to determine if the face
# has a mask or not
(mask, withoutMask) = model.predict(face)[0]
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask_image()
def mask_detection():
local_css("css/styles.css")
st.markdown('<h1 align="center">😷 Face Mask Detection</h1>', unsafe_allow_html=True)
activities = ["Image", "Webcam"]
st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.markdown("# Mask Detection on?")
choice = st.sidebar.selectbox("Choose among the given options:", activities)
if choice == 'Image':
st.markdown('<h2 align="center">Detection on Image</h2>', unsafe_allow_html=True)
st.markdown("### Upload your image here ⬇")
image_file = st.file_uploader("", type=['jpg']) # upload image
if image_file is not None:
our_image = Image.open(image_file) # making compatible to PIL
im = our_image.save('./images/out.jpg')
saved_image = st.image(image_file, caption='', use_column_width=True)
st.markdown('<h3 align="center">Image uploaded successfully!</h3>', unsafe_allow_html=True)
if st.button('Process'):
st.image(RGB_img, use_column_width=True)
if choice == 'Webcam':
st.markdown('<h2 align="center">Detection on Webcam</h2>', unsafe_allow_html=True)
st.markdown('<h3 align="center">This feature will be available soon!</h3>', unsafe_allow_html=True)
mask_detection()
| 44.166667
| 120
| 0.619057
|
92c908da80dbb180123f1ac420516a6050741814
| 7,988
|
py
|
Python
|
docs/conf.py
|
GMakarenko/ultimatepyxl
|
1bf1d1864d37a5935a4fcebcb7a564493b7c57f9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
GMakarenko/ultimatepyxl
|
1bf1d1864d37a5935a4fcebcb7a564493b7c57f9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
GMakarenko/ultimatepyxl
|
1bf1d1864d37a5935a4fcebcb7a564493b7c57f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# editpyxl documentation build configuration file, created by
# sphinx-quickstart on Wed May 7 10:56:37 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'editpyxl'
copyright = u'2014, Adam Morris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'editpyxldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'editpyxl.tex', u'editpyxl Documentation',
u'Adam Morris', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'editpyxl', u'editpyxl Documentation',
[u'Adam Morris'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'editpyxl', u'editpyxl Documentation',
u'Adam Morris', 'editpyxl', 'editpyxl is a Python library to non-destructively edit Excel xlsx/xlsm files.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.080321
| 113
| 0.714572
|
b2b11e99032968bf39c2611d402de07ef9204747
| 4,435
|
py
|
Python
|
Unet/UNet.py
|
hong2223/traffic4cast2020
|
767af3714dcfef529f514bf253ef7aa8b3a00203
|
[
"Apache-2.0"
] | null | null | null |
Unet/UNet.py
|
hong2223/traffic4cast2020
|
767af3714dcfef529f514bf253ef7aa8b3a00203
|
[
"Apache-2.0"
] | null | null | null |
Unet/UNet.py
|
hong2223/traffic4cast2020
|
767af3714dcfef529f514bf253ef7aa8b3a00203
|
[
"Apache-2.0"
] | 1
|
2021-11-01T12:08:48.000Z
|
2021-11-01T12:08:48.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out, k_size=3):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=k_size, padding=k_size // 2, bias=False),
nn.GroupNorm(ch_out // 16, ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(ch_out // 16, ch_out),
)
self.ident = nn.Sequential(nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, padding=0, bias=False))
self.out = nn.Sequential(nn.ReLU(inplace=True))
def forward(self, x):
res = self.conv(x)
ident = self.ident(x)
return self.out(res + ident)
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(ch_out // 16, ch_out),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.up(x)
return x
class UNet(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(UNet, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
ch_num = [128, 128, 128, 128, 128, 128, 256]
self.Conv1 = conv_block(ch_in=img_ch, ch_out=ch_num[0], k_size=7)
self.Conv2 = conv_block(ch_in=ch_num[0], ch_out=ch_num[1], k_size=5)
self.Conv3 = conv_block(ch_in=ch_num[1], ch_out=ch_num[2])
self.Conv4 = conv_block(ch_in=ch_num[2], ch_out=ch_num[3])
self.Conv5 = conv_block(ch_in=ch_num[3], ch_out=ch_num[4])
self.Conv6 = conv_block(ch_in=ch_num[4], ch_out=ch_num[5])
self.Conv7 = conv_block(ch_in=ch_num[5], ch_out=ch_num[6])
self.Up7 = up_conv(ch_in=ch_num[6], ch_out=ch_num[5])
self.Up_conv7 = conv_block(ch_in=ch_num[5] + ch_num[5], ch_out=ch_num[5])
self.Up6 = up_conv(ch_in=ch_num[5], ch_out=ch_num[4])
self.Up_conv6 = conv_block(ch_in=ch_num[4] + ch_num[4], ch_out=ch_num[4])
self.Up5 = up_conv(ch_in=ch_num[4], ch_out=ch_num[3])
self.Up_conv5 = conv_block(ch_in=ch_num[3] + ch_num[3], ch_out=ch_num[3])
self.Up4 = up_conv(ch_in=ch_num[3], ch_out=ch_num[2])
self.Up_conv4 = conv_block(ch_in=ch_num[2] + ch_num[2], ch_out=ch_num[2])
self.Up3 = up_conv(ch_in=ch_num[2], ch_out=ch_num[1])
self.Up_conv3 = conv_block(ch_in=ch_num[1] + ch_num[1], ch_out=ch_num[1])
self.Up2 = up_conv(ch_in=ch_num[1], ch_out=ch_num[0])
self.Up_conv2 = conv_block(ch_in=ch_num[0] + ch_num[0], ch_out=ch_num[0])
self.out = nn.Conv2d(ch_num[0], output_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
# encoding path
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
x6 = self.Maxpool(x5)
x6 = self.Conv6(x6)
x7 = self.Maxpool(x6)
x7 = self.Conv7(x7)
# decoding + concat path
d7 = self.Up7(x7)
d7 = torch.cat((x6, d7), dim=1)
d7 = self.Up_conv7(d7)
d6 = self.Up6(d7)
d6 = torch.cat((x5, d6), dim=1)
d6 = self.Up_conv6(d6)
d5 = self.Up5(d6)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.out(d2)
return d1
if __name__ == "__main__":
import numpy as np
model = UNet(img_ch=36, output_ch=12)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("# of parameters: ", params)
input_x = torch.rand((2, 36, 496, 448))
out = model(input_x)
print(out.shape)
| 30.586207
| 108
| 0.58805
|
be31f6d65f3c6101b61de2c948d7a1360162e178
| 19,292
|
py
|
Python
|
dadmatools/models/flair/trainers/.ipynb_checkpoints/language_model_trainer-checkpoint.py
|
njzr/DadmaTools
|
64ff407d5d818d5a9216340cccf0d1cc909d3b1b
|
[
"Apache-2.0"
] | 25
|
2021-12-01T15:19:36.000Z
|
2022-03-12T12:50:28.000Z
|
dadmatools/models/flair/trainers/.ipynb_checkpoints/language_model_trainer-checkpoint.py
|
ebad84/DadmaTools
|
b26ad8aa834f642d49bd120bd7cf1fdf40741be1
|
[
"Apache-2.0"
] | 3
|
2021-12-14T06:34:52.000Z
|
2022-02-17T08:23:20.000Z
|
dadmatools/models/flair/trainers/.ipynb_checkpoints/language_model_trainer-checkpoint.py
|
ebad84/DadmaTools
|
b26ad8aa834f642d49bd120bd7cf1fdf40741be1
|
[
"Apache-2.0"
] | 6
|
2021-10-12T13:44:17.000Z
|
2022-03-07T13:54:17.000Z
|
import time, datetime
import random
import sys
import logging
from pathlib import Path
from typing import Union
from torch import cuda
from torch.utils.data import Dataset, DataLoader
from torch.optim.sgd import SGD
try:
from apex import amp
except ImportError:
amp = None
import models.flair as flair
from models.flair.data import Dictionary
from models.flair.models import LanguageModel
from models.flair.optim import *
from models.flair.training_utils import add_file_handler
log = logging.getLogger("flair")
class TextDataset(Dataset):
def __init__(
self,
path: Path,
dictionary: Dictionary,
expand_vocab: bool = False,
forward: bool = True,
split_on_char: bool = True,
random_case_flip: bool = True,
shuffle_lines: bool = True,
):
assert path.exists()
self.files = None
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.shuffle_lines = shuffle_lines
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self):
return len(self.files)
def __getitem__(self, index=0) -> torch.tensor:
return self.charsplit(
self.files[index],
self.expand_vocab,
self.forward,
self.split_on_char,
self.random_case_flip,
)
def charsplit(
self,
path: Path,
expand_vocab=False,
forward=True,
split_on_char=True,
random_case_flip=True,
) -> torch.tensor:
"""Tokenizes a text file on character basis."""
assert path.exists()
lines = open(path, "r", encoding="utf-8").readlines()
log.info(f"read text file with {len(lines)} lines")
if self.shuffle_lines:
random.shuffle(lines)
log.info(f"shuffled")
tokens = 0
for line in lines:
if split_on_char:
chars = list(line)
else:
chars = line.split()
tokens += len(chars)
# Add chars to the dictionary
if expand_vocab:
for char in chars:
self.dictionary.add_item(char)
ids = torch.zeros(tokens, dtype=torch.long)
if forward:
# charsplit file content
token = 0
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if token >= tokens:
break
ids[token] = self.dictionary.get_idx_for_item(char)
token += 1
else:
# charsplit file content
token = tokens - 1
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if token >= tokens:
break
ids[token] = self.dictionary.get_idx_for_item(char)
token -= 1
return ids
@staticmethod
def random_casechange(line: str) -> str:
no = random.randint(0, 99)
if no is 0:
line = line.lower()
if no is 1:
line = line.upper()
return line
def tokenize(self, path: Path):
"""Tokenizes a text file."""
assert path.exists()
# Add words to the dictionary
with open(path, "r") as f:
tokens = 0
for line in f:
words = line.split() + ["<eos>"]
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, "r") as f:
ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)
token = 0
for line in f:
words = line.split() + ["<eos>"]
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class TextCorpus(object):
def __init__(
self,
path: Union[Path, str],
dictionary: Dictionary,
forward: bool = True,
character_level: bool = True,
random_case_flip: bool = True,
shuffle_lines: bool = True,
):
self.dictionary: Dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.shuffle_lines = shuffle_lines
if type(path) == str:
path = Path(path)
self.train = TextDataset(
path / "train",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
shuffle_lines=self.shuffle_lines,
)
# TextDataset returns a list. valid and test are only one file, so return the first element
self.valid = TextDataset(
path / "valid.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
shuffle_lines=False,
)[0]
self.test = TextDataset(
path / "test.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
shuffle_lines=False,
)[0]
class LanguageModelTrainer:
def __init__(
self,
model: LanguageModel,
corpus: TextCorpus,
optimizer: Optimizer = SGD,
test_mode: bool = False,
epoch: int = 0,
split: int = 0,
loss: float = 10000,
optimizer_state: dict = None,
):
self.model: LanguageModel = model
self.optimizer: Optimizer = optimizer
self.corpus: TextCorpus = corpus
self.test_mode: bool = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp:
if sys.version_info < (3, 0):
raise RuntimeError("Apex currently only supports Python 3. Aborting.")
if amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
if type(base_path) is str:
base_path = Path(base_path)
add_file_handler(log, base_path / "training.log")
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(
self.model.parameters(), lr=learning_rate, **kwargs
)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLRWDOnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler: ReduceLROnPlateau = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
if use_amp:
self.model, optimizer = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level
)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers
)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating through corpus one
if epoch > 0:
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers
)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at self.split (for checkpointing)
for curr_split, train_slice in enumerate(
training_generator, self.split
):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info(
"Split %d" % curr_split
+ "\t - ({:%H:%M:%S})".format(datetime.datetime.now())
)
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for batch, i in enumerate(
range(0, train_data.size(0) - 1, sequence_length)
):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info(
"Batch %d is not on CUDA, training will be very slow"
% (batch)
)
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
"| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
curr_split,
number_of_splits,
batch,
len(train_data) // sequence_length,
elapsed * 1000 / self.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
log.info(
"%d seconds for train split %d"
% (time.time() - split_start_time, curr_split)
)
###############################################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info("best loss so far {:5.2f}".format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val_loss:
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
###############################################################################
# print info
###############################################################################
log.info("-" * 89)
summary = (
"| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f} | learning rate {:3.4f}".format(
curr_split,
number_of_splits,
epoch + 1,
(time.time() - split_start_time),
val_loss,
math.exp(val_loss),
learning_rate,
)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = "TEST: valid loss {:5.2f} | valid ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
def evaluate(self, data_source, eval_batch_size, sequence_length):
# Turn on evaluation mode which disables dropout.
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, sequence_length):
data, targets = self._get_batch(data_source, i, sequence_length)
prediction, rnn_output, hidden = self.model.forward(data, hidden)
output_flat = prediction.view(-1, ntokens)
total_loss += len(data) * self.loss_function(output_flat, targets).data
hidden = self._repackage_hidden(hidden)
return total_loss.item() / len(data_source)
@staticmethod
def _batchify(data, batch_size):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
data = data.view(batch_size, -1).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, len(source) - 1 - i)
data = source[i : i + seq_len].clone().detach()
target = source[i + 1 : i + 1 + seq_len].view(-1).clone().detach()
data = data.to(flair.device)
target = target.to(flair.device)
return data, target
@staticmethod
def _repackage_hidden(h):
"""Wraps hidden states in new tensors, to detach them from their history."""
return tuple(v.clone().detach() for v in h)
@staticmethod
def load_from_checkpoint(
checkpoint_file: Path, corpus: TextCorpus, optimizer: Optimizer = SGD
):
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(
checkpoint["model"],
corpus,
optimizer,
epoch=checkpoint["epoch"],
split=checkpoint["split"],
loss=checkpoint["loss"],
optimizer_state=checkpoint["optimizer_state_dict"],
)
| 34.823105
| 112
| 0.488596
|
1f0ec10b9f37ada40955d263e08b46d386e70ea7
| 2,469
|
py
|
Python
|
nideep/nets/net_surgery_fcn_cont.py
|
kashefy/caffe_sandbox
|
31afc409df14fece0ac21707185e586dd2d625a9
|
[
"BSD-2-Clause"
] | 15
|
2015-08-26T21:15:15.000Z
|
2016-03-10T06:25:08.000Z
|
nideep/nets/net_surgery_fcn_cont.py
|
nigroup/nideep
|
31afc409df14fece0ac21707185e586dd2d625a9
|
[
"BSD-2-Clause"
] | 35
|
2016-05-24T13:57:01.000Z
|
2018-03-07T18:43:07.000Z
|
nideep/nets/net_surgery_fcn_cont.py
|
nigroup/nideep
|
31afc409df14fece0ac21707185e586dd2d625a9
|
[
"BSD-2-Clause"
] | 11
|
2016-05-24T13:42:55.000Z
|
2019-10-04T16:20:54.000Z
|
'''
Created on Jul 21, 2015
@author: kashefy
'''
import os
import caffe
def make_fully_conv(path_model_src,
path_weights_src,
path_model_full_conv,
param_pairs,
path_weights_dst
):
# PART A: Load the original network and extract the fully connected layers' parameters.
net_full_cnnct = caffe.Net(path_model_src, path_weights_src, caffe.TEST)
params = [src for src, _ in param_pairs]
# fc_params = {name: (weights, biases)}
fc_params = {pr: (net_full_cnnct.params[pr][0].data, net_full_cnnct.params[pr][1].data) for pr in params}
print "Original dimensions:"
for fc in params:
print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape)
# PART B: Load the fully convolutional network to transplant the parameters.
net_full_conv = caffe.Net(path_model_full_conv, path_weights_src, caffe.TEST)
params_full_conv = [dst for _, dst in param_pairs]
# conv_params = {name: (weights, biases)}
conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}
for conv in params_full_conv:
print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape)
# Let's transplant!
for pr, pr_conv in zip(params, params_full_conv):
conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays
conv_params[pr_conv][1][...] = fc_params[pr][1]
# save new weights
net_full_conv.save(path_weights_dst)
for pr, pr_conv in zip(params, params_full_conv):
print pr_conv
print net_full_conv.params[pr_conv][0].data
return 0
if __name__ == '__main__':
caffe.set_mode_cpu()
param_pairs = [('fc6', 'fc6-conv'),
('fc7', 'fc7-conv'),
('fc8', 'fc8-conv')]
make_fully_conv(os.path.expanduser('~/models/vgg-16/VGG_ILSVRC_16_layers_deploy.prototxt'),
os.path.expanduser('~/models/vgg-16/VGG_ILSVRC_16_layers.caffemodel'),
os.path.expanduser('~/models/vgg-16/VGG_ILSVRC_16_layers_fcn_deploy_151208.prototxt'),
param_pairs,
os.path.expanduser('~/models/vgg-16/VGG_ILSVRC_16_layers_fcn_151208.caffemodel'),
)
| 37.409091
| 144
| 0.64439
|
c7679c24c63a7447494149e53adce080df266681
| 5,153
|
py
|
Python
|
tensorflow_probability/python/experimental/inference_gym/targets/banana.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 2
|
2020-12-17T20:43:24.000Z
|
2021-06-11T22:09:16.000Z
|
tensorflow_probability/python/experimental/inference_gym/targets/banana.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/inference_gym/targets/banana.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 1
|
2020-10-22T21:09:22.000Z
|
2020-10-22T21:09:22.000Z
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Banana model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp # Avoid rewriting this to JAX.
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.experimental.inference_gym.targets import model
__all__ = [
'Banana',
]
class Banana(model.Model):
"""Creates a banana-shaped distribution.
This distribution was first described in [1]. The distribution is constructed
by transforming a 2-D normal distribution with scale [10, 1] by shifting the
second dimension by `curvature * (x0**2 - 100)` where `x0` is the value of
the first dimension. If an N > 2 dimensions are requested, the remaining
dimensions are distributed as a standard normal.
This distribution is notable for having relatively narrow tails, while being
derived from a simple, volume-preserving transformation of a normal
distribution. Despite this simplicity, some inference algorithms have trouble
sampling from this distribution.
#### References
1. Haario, H., Saksman, E., & Tamminen, J. (1999). Adaptive proposal
distribution for random walk Metropolis algorithm. Computational
Statistics, 14(3), 375-396.
"""
def __init__(
self,
ndims=2,
curvature=0.03,
name='banana',
pretty_name='Banana',
):
"""Construct the banana model.
Args:
ndims: Python integer. Dimensionality of the distribution. Must be at
least 2.
curvature: Python float. Controls the strength of the curvature of
the distribution.
name: Python `str` name prefixed to Ops created by this class.
pretty_name: A Python `str`. The pretty name of this model.
Raises:
ValueError: If ndims < 2.
"""
if ndims < 2:
raise ValueError('ndims must be at least 2, saw: {}'.format(ndims))
with tf.name_scope(name):
def bijector_fn(x):
"""Banana transform."""
batch_shape = tf.shape(x)[:-1]
shift = tf.concat(
[
tf.zeros(tf.concat([batch_shape, [1]], axis=0)),
curvature * (tf.square(x[..., :1]) - 100),
tf.zeros(tf.concat([batch_shape, [ndims - 2]], axis=0)),
],
axis=-1,
)
return tfb.Shift(shift)
mg = tfd.MultivariateNormalDiag(
loc=tf.zeros(ndims), scale_diag=[10.] + [1.] * (ndims - 1))
banana = tfd.TransformedDistribution(
mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn))
sample_transformations = {
'identity':
model.Model.SampleTransformation(
fn=lambda params: params,
pretty_name='Identity',
# The second dimension is a sum of scaled Chi2 and normal
# distribution.
# Mean of Chi2 with one degree of freedom is 1, but since the
# first element has variance of 100, it cancels with the shift
# (which is why the shift is there).
ground_truth_mean=onp.zeros(ndims),
# Variance of Chi2 with one degree of freedom is 2.
ground_truth_standard_deviation=onp.array(
[10.] + [onp.sqrt(1. + 2 * curvature**2 * 10.**4)] +
[1.] * (ndims - 2)),
)
}
self._banana = banana
super(Banana, self).__init__(
default_event_space_bijector=tfb.Identity(),
event_shape=banana.event_shape,
dtype=banana.dtype,
name=name,
pretty_name=pretty_name,
sample_transformations=sample_transformations,
)
def _unnormalized_log_prob(self, value):
return self._banana.log_prob(value)
def sample(self, sample_shape=(), seed=None, name='sample'):
"""Generate samples of the specified shape from the target distribution.
The returned samples are exact (and independent) samples from the target
distribution of this model.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer or `tfp.util.SeedStream` instance, for seeding PRNG.
name: Name to give to the prefix the generated ops.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._banana.sample(sample_shape, seed=seed, name=name)
| 35.784722
| 82
| 0.658451
|
9dfa1e025d04047916fe7e6022a1106b243f4b54
| 16,037
|
py
|
Python
|
sunspec2/modbus/client.py
|
sunspec/pysunspec2
|
e946f86d7167b14039dd5fe4e442cd899072401b
|
[
"Apache-2.0"
] | 26
|
2020-09-05T22:23:53.000Z
|
2022-03-17T21:44:00.000Z
|
sunspec2/modbus/client.py
|
sunspec/pysunspec2
|
e946f86d7167b14039dd5fe4e442cd899072401b
|
[
"Apache-2.0"
] | 45
|
2020-09-09T21:34:54.000Z
|
2022-02-16T12:28:41.000Z
|
sunspec2/modbus/client.py
|
sunspec/pysunspec2
|
e946f86d7167b14039dd5fe4e442cd899072401b
|
[
"Apache-2.0"
] | 10
|
2020-09-28T17:23:12.000Z
|
2021-09-16T15:50:30.000Z
|
"""
Copyright (C) 2020 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import time
import uuid
import sunspec2.mdef as mdef
import sunspec2.device as device
import sunspec2.mb as mb
import sunspec2.modbus.modbus as modbus_client
TEST_NAME = 'test_name'
modbus_rtu_clients = {}
class SunSpecModbusClientError(Exception):
pass
class SunSpecModbusClientTimeout(SunSpecModbusClientError):
pass
class SunSpecModbusClientException(SunSpecModbusClientError):
pass
class SunSpecModbusClientPoint(device.Point):
def read(self):
data = self.model.device.read(self.model.model_addr + self.offset, self.len)
self.set_mb(data=data, dirty=False)
def write(self):
"""Write the point to the physical device"""
data = self.info.to_data(self.value, int(self.len) * 2)
model_addr = self.model.model_addr
point_offset = self.offset
addr = model_addr + point_offset
self.model.device.write(addr, data)
self.dirty = False
class SunSpecModbusClientGroup(device.Group):
def __init__(self, gdef=None, model=None, model_offset=0, group_len=0, data=None, data_offset=0, group_class=None,
point_class=None, index=None):
device.Group.__init__(self, gdef=gdef, model=model, model_offset=model_offset, group_len=group_len,
data=data, data_offset=data_offset, group_class=group_class, point_class=point_class,
index=index)
def read(self, len=None):
if len is None:
len = self.len
# check if currently connected
connected = self.model.device.is_connected()
if not connected:
self.model.device.connect()
if self.access_regions:
data = bytearray()
for region in self.access_regions:
data += self.model.device.read(self.model.model_addr + self.offset + region[0], region[1])
data = bytes(data)
else:
data = self.model.device.read(self.model.model_addr + self.offset, len)
self.set_mb(data=data, dirty=False)
# disconnect if was not connected
if not connected:
self.model.device.disconnect()
def write(self):
start_addr = next_addr = self.model.model_addr + self.offset
data = b''
start_addr, next_addr, data = self.write_points(start_addr, next_addr, data)
if data:
self.model.device.write(start_addr, data)
def write_points(self, start_addr=None, next_addr=None, data=None):
"""
Write all points that have been modified since the last write operation to the physical device
"""
for name, point in self.points.items():
model_addr = self.model.model_addr
point_offset = point.offset
point_addr = model_addr + point_offset
if data and (not point.dirty or point_addr != next_addr):
self.model.device.write(start_addr, data)
data = b''
if point.dirty:
point_len = point.len
point_data = point.info.to_data(point.value, int(point_len) * 2)
if not data:
start_addr = point_addr
next_addr = point_addr + point_len
data += point_data
point.dirty = False
for name, group in self.groups.items():
if isinstance(group, list):
for g in group:
start_addr, next_addr, data = g.write_points(start_addr, next_addr, data)
else:
start_addr, next_addr, data = group.write_points(start_addr, next_addr, data)
return start_addr, next_addr, data
class SunSpecModbusClientModel(SunSpecModbusClientGroup):
def __init__(self, model_id=None, model_addr=0, model_len=0, model_def=None, data=None, mb_device=None,
group_class=SunSpecModbusClientGroup, point_class=SunSpecModbusClientPoint):
self.model_id = model_id
self.model_addr = model_addr
self.model_len = model_len
self.model_def = model_def
self.error_info = ''
self.mid = None
self.device = mb_device
self.model = self
gdef = None
try:
if self.model_def is None:
self.model_def = device.get_model_def(model_id)
if self.model_def is not None:
gdef = self.model_def.get(mdef.GROUP)
except Exception as e:
self.add_error(str(e))
# determine largest point index that contains a group len
group_len_points_index = mdef.get_group_len_points_index(gdef)
# if data len < largest point index that contains a group len, read the rest of the point data
data_regs = len(data)/2
remaining = group_len_points_index - data_regs
if remaining > 0:
points_data = self.device.read(self.model_addr + data_regs, remaining)
data += points_data
SunSpecModbusClientGroup.__init__(self, gdef=gdef, model=self.model, model_offset=0, group_len=self.model_len,
data=data, data_offset=0, group_class=group_class, point_class=point_class)
if self.model_len is not None:
self.len = self.model_len
if self.model_len and self.len:
if self.model_len != self.len:
self.add_error('Model error: Discovered length %s does not match computed length %s' %
(self.model_len, self.len))
def add_error(self, error_info):
self.error_info = '%s%s\n' % (self.error_info, error_info)
def read(self, len=None):
SunSpecModbusClientGroup.read(self, len=self.len + 2)
class SunSpecModbusClientDevice(device.Device):
def __init__(self, model_class=SunSpecModbusClientModel):
device.Device.__init__(self, model_class=model_class)
self.did = str(uuid.uuid4())
self.retry_count = 2
self.base_addr_list = [0, 40000, 50000]
self.base_addr = None
def connect(self):
pass
def disconnect(self):
pass
def is_connected(self):
return True
def close(self):
pass
# must be overridden by Modbus protocol implementation
def read(self, addr, count):
return ''
# must be overridden by Modbus protocol implementation
def write(self, addr, data):
return
def scan(self, progress=None, delay=None, connect=True, full_model_read=True):
"""Scan all the models of the physical device and create the
corresponding model objects within the device object based on the
SunSpec model definitions.
"""
self.base_addr = None
self.delete_models()
data = error = ''
connected = False
if connect:
self.connect()
connected = True
if delay is not None:
time.sleep(delay)
if self.base_addr is None:
for addr in self.base_addr_list:
try:
data = self.read(addr, 3)
if data[:4] == b'SunS':
self.base_addr = addr
break
else:
error = 'Device responded - not SunSpec register map'
except SunSpecModbusClientError as e:
if not error:
error = str(e)
except modbus_client.ModbusClientTimeout as e:
if not error:
error = str(e)
except modbus_client.ModbusClientException:
pass
if delay is not None:
time.sleep(delay)
if self.base_addr is not None:
model_id_data = data[4:6]
model_id = mb.data_to_u16(model_id_data)
addr = self.base_addr + 2
mid = 0
while model_id != mb.SUNS_END_MODEL_ID:
# read model and model len separately due to some devices not supplying
# count for the end model id
model_len_data = self.read(addr + 1, 1)
if model_len_data and len(model_len_data) == 2:
if progress is not None:
cont = progress('Scanning model %s' % (model_id))
if not cont:
raise SunSpecModbusClientError('Device scan terminated')
model_len = mb.data_to_u16(model_len_data)
# read model data
### model_data = self.read(addr, model_len + 2)
model_data = model_id_data + model_len_data
model = self.model_class(model_id=model_id, model_addr=addr, model_len=model_len, data=model_data,
mb_device=self)
if full_model_read:
model.read()
model.mid = '%s_%s' % (self.did, mid)
mid += 1
self.add_model(model)
addr += model_len + 2
model_id_data = self.read(addr, 1)
if model_id_data and len(model_id_data) == 2:
model_id = mb.data_to_u16(model_id_data)
else:
break
else:
break
if delay is not None:
time.sleep(delay)
else:
if not error:
error = 'Unknown error'
raise SunSpecModbusClientError(error)
if connected:
self.disconnect()
class SunSpecModbusClientDeviceTCP(SunSpecModbusClientDevice):
def __init__(self, slave_id=1, ipaddr='127.0.0.1', ipport=502, timeout=None, ctx=None, trace_func=None,
max_count=modbus_client.REQ_COUNT_MAX, max_write_count=modbus_client.REQ_WRITE_COUNT_MAX, test=False,
model_class=SunSpecModbusClientModel):
SunSpecModbusClientDevice.__init__(self, model_class=model_class)
self.slave_id = slave_id
self.ipaddr = ipaddr
self.ipport = ipport
self.timeout = timeout
self.ctx = ctx
self.socket = None
self.trace_func = trace_func
self.max_count = max_count
self.max_write_count = max_write_count
self.client = modbus_client.ModbusClientTCP(slave_id=slave_id, ipaddr=ipaddr, ipport=ipport, timeout=timeout,
ctx=ctx, trace_func=trace_func,
max_count=modbus_client.REQ_COUNT_MAX,
max_write_count=modbus_client.REQ_WRITE_COUNT_MAX, test=test)
if self.client is None:
raise SunSpecModbusClientError('No modbus tcp client set for device')
def connect(self):
self.client.connect()
def disconnect(self):
self.client.disconnect()
def is_connected(self):
return self.client.is_connected()
def read(self, addr, count, op=modbus_client.FUNC_READ_HOLDING):
return self.client.read(addr, count, op)
def write(self, addr, data):
return self.client.write(addr, data)
class SunSpecModbusClientDeviceRTU(SunSpecModbusClientDevice):
"""Provides access to a Modbus RTU device.
Parameters:
slave_id :
Modbus slave id.
name :
Name of the serial port such as 'com4' or '/dev/ttyUSB0'.
baudrate :
Baud rate such as 9600 or 19200. Default is 9600 if not specified.
parity :
Parity. Possible values:
:const:`sunspec.core.modbus.client.PARITY_NONE`,
:const:`sunspec.core.modbus.client.PARITY_EVEN` Defaulted to
:const:`PARITY_NONE`.
timeout :
Modbus request timeout in seconds. Fractional seconds are permitted
such as .5.
ctx :
Context variable to be used by the object creator. Not used by the
modbus module.
trace_func :
Trace function to use for detailed logging. No detailed logging is
perform is a trace function is not supplied.
max_count :
Maximum register count for a single Modbus request.
Raises:
SunSpecModbusClientError: Raised for any general modbus client error.
SunSpecModbusClientTimeoutError: Raised for a modbus client request timeout.
SunSpecModbusClientException: Raised for an exception response to a modbus
client request.
"""
def __init__(self, slave_id, name, baudrate=None, parity=None, timeout=None, ctx=None, trace_func=None,
max_count=modbus_client.REQ_COUNT_MAX, max_write_count=modbus_client.REQ_WRITE_COUNT_MAX,
model_class=SunSpecModbusClientModel):
# test if this super class init is needed
SunSpecModbusClientDevice.__init__(self, model_class=model_class)
self.slave_id = slave_id
self.name = name
self.client = None
self.ctx = ctx
self.trace_func = trace_func
self.max_count = max_count
self.max_write_count = max_write_count
self.client = modbus_client.modbus_rtu_client(name, baudrate, parity)
if self.client is None:
raise SunSpecModbusClientError('No modbus rtu client set for device')
self.client.add_device(self.slave_id, self)
if timeout is not None and self.client.serial is not None:
self.client.serial.timeout = timeout
self.client.serial.writeTimeout = timeout
def open(self):
self.client.open()
def close(self):
"""Close the device. Called when device is not longer in use.
"""
if self.client:
self.client.remove_device(self.slave_id)
def read(self, addr, count, op=modbus_client.FUNC_READ_HOLDING):
"""Read Modbus device registers.
Parameters:
addr :
Starting Modbus address.
count :
Read length in Modbus registers.
op :
Modbus function code for request.
Returns:
Byte string containing register contents.
"""
return self.client.read(self.slave_id, addr, count, op=op, trace_func=self.trace_func, max_count=self.max_count)
def write(self, addr, data):
"""Write Modbus device registers.
Parameters:
addr :
Starting Modbus address.
count :
Byte string containing register contents.
"""
return self.client.write(self.slave_id, addr, data, trace_func=self.trace_func,
max_write_count=self.max_write_count)
| 37.823113
| 120
| 0.60454
|
28ab413f25ba4a6ef11df0a1408951ed8e2254a1
| 5,475
|
py
|
Python
|
doc/generate_config_rst.py
|
heidihoward/CCF
|
2a048e2ef9da7a5b09e759f232b7abe0fe497e93
|
[
"Apache-2.0"
] | null | null | null |
doc/generate_config_rst.py
|
heidihoward/CCF
|
2a048e2ef9da7a5b09e759f232b7abe0fe497e93
|
[
"Apache-2.0"
] | 22
|
2021-11-09T00:42:40.000Z
|
2022-01-13T11:54:37.000Z
|
doc/generate_config_rst.py
|
heidihoward/CCF
|
2a048e2ef9da7a5b09e759f232b7abe0fe497e93
|
[
"Apache-2.0"
] | 1
|
2021-12-04T22:44:27.000Z
|
2021-12-04T22:44:27.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import sys
import json
import tempfile
import filecmp
# Generated document is included in existing page, so
# start at heading of depth 1 (equivalent to markdown h2.)
START_DEPTH = 1
class MinimalRstGenerator:
def __init__(self):
self._lines = [".."]
self._lines.append(" This is an auto-generated file. DO NOT EDIT.\n")
def _add_lines(self, lines):
self._lines.extend(lines)
self._lines.append("\n")
def add_heading(self, text, depth):
depth_to_char = {0: "=", 1: "-", 2: "~", 3: "+"}
self._add_lines([text, depth_to_char[depth] * len(text)])
def add_line(self, text):
self._add_lines([text])
def render(self):
return "\n".join(self._lines)
def print_attributes(entry):
def stringify_output(s):
return f"``{json.dumps(s)}``"
desc = ""
if "description" in entry:
desc += entry["description"]
if "enum" in entry:
desc += f' (values: {", ".join(stringify_output(s) for s in entry["enum"])})'
if "default" in entry:
desc += f'. Default: {stringify_output(entry["default"])}'
if "minimum" in entry:
desc += f'. Minimum: {stringify_output(entry["minimum"])}'
return desc
def print_entry(output, entry, name, required=False, depth=0):
desc = ""
if depth == START_DEPTH:
output.add_heading(f"``{name}``", START_DEPTH)
else:
desc += f"- ``{name}``: "
desc += print_attributes(entry)
if required:
desc += ". Required"
output.add_line(f"{desc}.")
def has_subobjs(obj):
if not isinstance(obj, dict):
return False
return any(
k in ["properties", "additionalProperties", "items"] for k in obj.keys()
) and ("items" not in obj or obj["items"]["type"] == "object")
def print_object(output, obj, depth=0, required_entries=None, additional_desc=None):
required_entries = required_entries or []
for k, v in obj.items():
if has_subobjs(v):
output.add_heading(f"``{k}``", depth)
if "description" in v:
output.add_line(
f'{"**Required.** " if k in required_entries else ""}{v["description"]}.'
)
if additional_desc is not None:
output.add_line(f"Note: {additional_desc}.")
reqs = v.get("required", [])
if "properties" in v:
print_object(
output, v["properties"], depth=depth + 1, required_entries=reqs
)
# Strict schema with no extra fields allowed https://github.com/microsoft/CCF/issues/3813
assert (
"allOf" in v or v.get("additionalProperties") == False
), f"AdditionalProperties not set to false in {k}:{v}"
if "additionalProperties" in v:
if isinstance(v["additionalProperties"], dict):
print_object(
output,
v["additionalProperties"]["properties"],
depth=depth + 1,
required_entries=v["additionalProperties"].get("required", []),
)
if "items" in v and v["items"]["type"] == "object":
print_object(
output,
v["items"]["properties"],
depth=depth + 1,
required_entries=reqs,
)
if "allOf" in v:
for e in v["allOf"]:
((k_, cond_),) = e["if"]["properties"].items()
print_object(
output,
e["then"]["properties"],
depth=depth + 1,
required_entries=reqs,
additional_desc=f'Only if ``{k_}`` is ``"{cond_["const"]}"``',
)
elif k == "additionalProperties" and isinstance(v, bool):
# Skip display of additionalProperties if bool as it is used
# to make the schema stricter
pass
else:
print_entry(output, v, name=k, required=k in required_entries, depth=depth)
def generate_configuration_docs(input_file_path, output_file_path):
with open(input_file_path, "r") as in_:
j = json.load(in_)
output = MinimalRstGenerator()
output.add_heading("Configuration Options", START_DEPTH)
print_object(
output, j["properties"], required_entries=j["required"], depth=START_DEPTH
)
assert (
j.get("additionalProperties") == False
), f"AdditionalProperties not set to false in top level schema"
out = output.render()
# Only update output file if the file will be modified
with tempfile.NamedTemporaryFile("w") as temp:
temp.write(out)
temp.flush()
if not os.path.exists(output_file_path) or not filecmp.cmp(
temp.name, output_file_path
):
with open(output_file_path, "w") as out_:
out_.write(output.render())
print(f"Configuration file successfully generated at {output_file_path}")
if __name__ == "__main__":
if len(sys.argv) <= 2:
print(f"Usage: {sys.argv[0]} <input_path> <output_path>")
sys.exit(1)
generate_configuration_docs(sys.argv[1], sys.argv[2])
| 34.651899
| 105
| 0.557078
|
c143b21a5224cdf8f652246bed038635a8cd1c3f
| 370
|
py
|
Python
|
aula07/ex006.py
|
avacorreia/Exercicio-Curso-em-Video-Pyton-Modulo1
|
f04a3b0bc360318b1420b23df3325908220773aa
|
[
"MIT"
] | null | null | null |
aula07/ex006.py
|
avacorreia/Exercicio-Curso-em-Video-Pyton-Modulo1
|
f04a3b0bc360318b1420b23df3325908220773aa
|
[
"MIT"
] | null | null | null |
aula07/ex006.py
|
avacorreia/Exercicio-Curso-em-Video-Pyton-Modulo1
|
f04a3b0bc360318b1420b23df3325908220773aa
|
[
"MIT"
] | null | null | null |
n1 = int(input('insira o 1 numero'))
n2 = int(input('insira o 2 numero'))
soma = n1 + n2
sub = n1 - n2
div = n1/n2
mul = n1 * n2
pot = n1 ** n2
divint = n1 // n2
res = n1 % n2
print('soma = ', soma)
print('Sub = ', sub)
print('Divisão = ', div)
print('multiplicação', mul)
print('potencia = ', pot)
print('Divisão inteira = ', divint)
print('resto da divisão = ', res)
| 20.555556
| 36
| 0.605405
|
be62832ec0b93736200fa73f8a1a91e17d22a3ee
| 34,258
|
py
|
Python
|
raiden/tests/integration/long_running/test_settlement.py
|
marcosmartinez7/lumino
|
2a5a74589aaf26172cee6ec23fde5f4fc1938a43
|
[
"MIT"
] | 8
|
2019-06-12T14:50:06.000Z
|
2022-02-15T16:20:07.000Z
|
raiden/tests/integration/long_running/test_settlement.py
|
marcosmartinez7/lumino
|
2a5a74589aaf26172cee6ec23fde5f4fc1938a43
|
[
"MIT"
] | 141
|
2019-06-18T13:04:08.000Z
|
2021-11-23T22:00:32.000Z
|
raiden/tests/integration/long_running/test_settlement.py
|
marcosmartinez7/lumino
|
2a5a74589aaf26172cee6ec23fde5f4fc1938a43
|
[
"MIT"
] | 17
|
2019-05-21T18:09:05.000Z
|
2020-10-29T13:01:01.000Z
|
import random
import gevent
import pytest
from raiden import waiting
from raiden.api.python import RaidenAPI
from raiden.constants import UINT64_MAX, EMPTY_PAYMENT_HASH_INVOICE
from raiden.exceptions import RaidenUnrecoverableError
from raiden.messages import LockedTransfer, LockExpired, RevealSecret
from raiden.storage.restore import channel_state_until_state_change
from raiden.tests.utils import factories
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import raiden_state_changes_search_for_item, search_for_item
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.protocol import WaitForMessage
from raiden.tests.utils.transfer import assert_synced_channel_state, get_channelstate, transfer
from raiden.transfer import channel, views
from raiden.transfer.state_change import (
ContractReceiveChannelBatchUnlock,
ContractReceiveChannelClosed,
ContractReceiveChannelSettled,
)
from raiden.utils import sha3
from raiden.utils.timeout import BlockTimeout
def wait_for_batch_unlock(app, token_network_id, participant, partner):
unlock_event = None
while not unlock_event:
gevent.sleep(1)
state_changes = app.raiden.wal.storage.get_statechanges_by_identifier(
from_identifier=0, to_identifier="latest"
)
unlock_event = search_for_item(
state_changes,
ContractReceiveChannelBatchUnlock,
{
"token_network_identifier": token_network_id,
"participant": participant,
"partner": partner,
},
)
@pytest.mark.parametrize("number_of_nodes", [2])
def test_settle_is_automatically_called(raiden_network, token_addresses):
raise_on_failure(
raiden_network,
run_test_settle_is_automatically_called,
raiden_network=raiden_network,
token_addresses=token_addresses,
)
def run_test_settle_is_automatically_called(raiden_network, token_addresses):
"""Settle is automatically called by one of the nodes."""
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
token_network = views.get_token_network_by_identifier(
views.state_from_app(app0), token_network_identifier
)
channel_identifier = get_channelstate(app0, app1, token_network_identifier).identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[app1.raiden.address]
)
# A ChannelClose event will be generated, this will be polled by both apps
# and each must start a task for calling settle
RaidenAPI(app1.raiden).channel_close(registry_address, token_address, app0.raiden.address)
waiting.wait_for_close(
app0.raiden,
registry_address,
token_address,
[channel_identifier],
app0.raiden.alarm.sleep_time,
)
channel_state = views.get_channelstate_for(
views.state_from_raiden(app0.raiden), registry_address, token_address, app1.raiden.address
)
assert channel_state.close_transaction.finished_block_number
waiting.wait_for_settle(
app0.raiden,
registry_address,
token_address,
[channel_identifier],
app0.raiden.alarm.sleep_time,
)
token_network = views.get_token_network_by_identifier(
views.state_from_app(app0), token_network_identifier
)
assert (
channel_identifier
not in token_network.partneraddresses_to_channelidentifiers[app1.raiden.address]
)
state_changes = app0.raiden.wal.storage.get_statechanges_by_identifier(
from_identifier=0, to_identifier="latest"
)
assert search_for_item(
state_changes,
ContractReceiveChannelClosed,
{
"token_network_identifier": token_network_identifier,
"channel_identifier": channel_identifier,
"transaction_from": app1.raiden.address,
"block_number": channel_state.close_transaction.finished_block_number,
},
)
assert search_for_item(
state_changes,
ContractReceiveChannelSettled,
{
"token_network_identifier": token_network_identifier,
"channel_identifier": channel_identifier,
},
)
@pytest.mark.parametrize("number_of_nodes", [2])
def test_lock_expiry(raiden_network, token_addresses, deposit):
"""Test lock expiry and removal."""
raise_on_failure(
raiden_network,
run_test_lock_expiry,
raiden_network=raiden_network,
token_addresses=token_addresses,
deposit=deposit,
)
def run_test_lock_expiry(raiden_network, token_addresses, deposit):
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address
)
hold_event_handler = bob_app.raiden.raiden_event_handler
wait_message_handler = bob_app.raiden.message_handler
token_network = views.get_token_network_by_identifier(
views.state_from_app(alice_app), token_network_identifier
)
channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
channel_identifier = channel_state.identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address]
)
alice_to_bob_amount = 10
identifier = 1
target = bob_app.raiden.address
transfer_1_secret = factories.make_secret(0)
transfer_1_secrethash = sha3(transfer_1_secret)
transfer_2_secret = factories.make_secret(1)
transfer_2_secrethash = sha3(transfer_2_secret)
hold_event_handler.hold_secretrequest_for(secrethash=transfer_1_secrethash)
transfer1_received = wait_message_handler.wait_for_message(
LockedTransfer, {"lock": {"secrethash": transfer_1_secrethash}}
)
transfer2_received = wait_message_handler.wait_for_message(
LockedTransfer, {"lock": {"secrethash": transfer_2_secrethash}}
)
remove_expired_lock_received = wait_message_handler.wait_for_message(
LockExpired, {"secrethash": transfer_1_secrethash}
)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=alice_to_bob_amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=transfer_1_secret,
)
transfer1_received.wait()
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
lock = channel.get_lock(alice_bob_channel_state.our_state, transfer_1_secrethash)
# This is the current state of the protocol:
#
# A -> B LockedTransfer
# B -> A SecretRequest
# - protocol didn't continue
assert_synced_channel_state(
token_network_identifier, alice_app, deposit, [lock], bob_app, deposit, []
)
# Verify lock is registered in both channel states
alice_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
assert transfer_1_secrethash in alice_channel_state.our_state.secrethashes_to_lockedlocks
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_identifier)
assert transfer_1_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_1_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task
remove_expired_lock_received.wait()
alice_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
assert transfer_1_secrethash not in alice_channel_state.our_state.secrethashes_to_lockedlocks
# Verify Bob received the message and processed the LockExpired message
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_identifier)
assert transfer_1_secrethash not in bob_channel_state.partner_state.secrethashes_to_lockedlocks
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_1_secrethash not in alice_chain_state.payment_mapping.secrethashes_to_task
# Make another transfer
alice_to_bob_amount = 10
identifier = 2
hold_event_handler.hold_secretrequest_for(secrethash=transfer_2_secrethash)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=alice_to_bob_amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=transfer_2_secret,
)
transfer2_received.wait()
# Make sure the other transfer still exists
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_2_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_identifier)
assert transfer_2_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks
@pytest.mark.parametrize("number_of_nodes", [2])
def test_batch_unlock(
raiden_network, token_addresses, secret_registry_address, deposit, blockchain_type
):
raise_on_failure(
raiden_network,
run_test_batch_unlock,
raiden_network=raiden_network,
token_addresses=token_addresses,
secret_registry_address=secret_registry_address,
deposit=deposit,
blockchain_type=blockchain_type,
)
def run_test_batch_unlock(
raiden_network, token_addresses, secret_registry_address, deposit, blockchain_type
):
"""Batch unlock can be called after the channel is settled."""
alice_app, bob_app = raiden_network
registry_address = alice_app.raiden.default_registry.address
token_address = token_addresses[0]
token_proxy = alice_app.raiden.chain.token(token_address)
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address
)
hold_event_handler = bob_app.raiden.raiden_event_handler
# Take a snapshot early on
alice_app.raiden.wal.snapshot()
token_network = views.get_token_network_by_identifier(
views.state_from_app(alice_app), token_network_identifier
)
channel_identifier = get_channelstate(alice_app, bob_app, token_network_identifier).identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address]
)
alice_initial_balance = token_proxy.balance_of(alice_app.raiden.address)
bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address)
# Take snapshot before transfer
alice_app.raiden.wal.snapshot()
alice_to_bob_amount = 10
identifier = 1
target = bob_app.raiden.address
secret = sha3(target)
secrethash = sha3(secret)
secret_request_event = hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=alice_to_bob_amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=secret,
)
secret_request_event.get() # wait for the messages to be exchanged
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
lock = channel.get_lock(alice_bob_channel_state.our_state, secrethash)
# This is the current state of the protocol:
#
# A -> B LockedTransfer
# B -> A SecretRequest
# - protocol didn't continue
assert_synced_channel_state(
token_network_identifier, alice_app, deposit, [lock], bob_app, deposit, []
)
# Take a snapshot early on
alice_app.raiden.wal.snapshot()
our_balance_proof = alice_bob_channel_state.our_state.balance_proof
# Test WAL restore to return the latest channel state
restored_channel_state = channel_state_until_state_change(
raiden=alice_app.raiden,
canonical_identifier=alice_bob_channel_state.canonical_identifier,
state_change_identifier="latest",
)
our_restored_balance_proof = restored_channel_state.our_state.balance_proof
assert our_balance_proof == our_restored_balance_proof
# A ChannelClose event will be generated, this will be polled by both apps
# and each must start a task for calling settle
RaidenAPI(bob_app.raiden).channel_close(
registry_address, token_address, alice_app.raiden.address
)
secret_registry_proxy = alice_app.raiden.chain.secret_registry(secret_registry_address)
secret_registry_proxy.register_secret(secret=secret)
assert lock, "the lock must still be part of the node state"
msg = "the secret must be registered before the lock expires"
assert lock.expiration > alice_app.raiden.get_block_number(), msg
assert lock.secrethash == sha3(secret)
waiting.wait_for_settle(
alice_app.raiden,
registry_address,
token_address,
[alice_bob_channel_state.identifier],
alice_app.raiden.alarm.sleep_time,
)
token_network = views.get_token_network_by_identifier(
views.state_from_app(bob_app), token_network_identifier
)
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[alice_app.raiden.address]
)
# Wait for both nodes to call batch unlock
timeout = 30 if blockchain_type == "parity" else 10
with gevent.Timeout(timeout):
wait_for_batch_unlock(
app=bob_app,
token_network_id=token_network_identifier,
participant=alice_bob_channel_state.partner_state.address,
partner=alice_bob_channel_state.our_state.address,
)
token_network = views.get_token_network_by_identifier(
views.state_from_app(bob_app), token_network_identifier
)
assert (
channel_identifier
not in token_network.partneraddresses_to_channelidentifiers[alice_app.raiden.address]
)
alice_new_balance = alice_initial_balance + deposit - alice_to_bob_amount
bob_new_balance = bob_initial_balance + deposit + alice_to_bob_amount
assert token_proxy.balance_of(alice_app.raiden.address) == alice_new_balance
assert token_proxy.balance_of(bob_app.raiden.address) == bob_new_balance
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
def test_settled_lock(token_addresses, raiden_network, deposit):
raise_on_failure(
raiden_network,
run_test_settled_lock,
token_addresses=token_addresses,
raiden_network=raiden_network,
deposit=deposit,
)
def run_test_settled_lock(token_addresses, raiden_network, deposit):
""" Any transfer following a secret reveal must update the locksroot, so
that an attacker cannot reuse a secret to double claim a lock.
"""
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
amount = 30
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
hold_event_handler = app1.raiden.raiden_event_handler
address0 = app0.raiden.address
address1 = app1.raiden.address
deposit0 = deposit
deposit1 = deposit
token_proxy = app0.raiden.chain.token(token_address)
initial_balance0 = token_proxy.balance_of(address0)
initial_balance1 = token_proxy.balance_of(address1)
identifier = 1
target = app1.raiden.address
secret = sha3(target)
secrethash = sha3(secret)
secret_available = hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
app0.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=secret,
)
secret_available.wait() # wait for the messages to be exchanged
# Save the merkle tree leaves from the pending transfer, used to test the unlock
channelstate_0_1 = get_channelstate(app0, app1, token_network_identifier)
batch_unlock = channel.get_batch_unlock(channelstate_0_1.our_state)
assert batch_unlock
hold_event_handler.release_secretrequest_for(app1.raiden, secrethash)
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount,
identifier=2,
)
RaidenAPI(app1.raiden).channel_close(registry_address, token_address, app0.raiden.address)
waiting.wait_for_settle(
app1.raiden,
app1.raiden.default_registry.address,
token_address,
[channelstate_0_1.identifier],
app1.raiden.alarm.sleep_time,
)
netting_channel = app1.raiden.chain.payment_channel(
canonical_identifier=channelstate_0_1.canonical_identifier
)
# The transfer locksroot must not contain the unlocked lock, the
# unlock must fail.
with pytest.raises(RaidenUnrecoverableError):
netting_channel.unlock(
merkle_tree_leaves=batch_unlock,
participant=channelstate_0_1.our_state.address,
partner=channelstate_0_1.partner_state.address,
)
expected_balance0 = initial_balance0 + deposit0 - amount * 2
expected_balance1 = initial_balance1 + deposit1 + amount * 2
assert token_proxy.balance_of(address0) == expected_balance0
assert token_proxy.balance_of(address1) == expected_balance1
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [1])
def test_automatic_secret_registration(raiden_chain, token_addresses):
raise_on_failure(
raiden_chain,
run_test_automatic_secret_registration,
raiden_chain=raiden_chain,
token_addresses=token_addresses,
)
def run_test_automatic_secret_registration(raiden_chain, token_addresses):
app0, app1 = raiden_chain
token_address = token_addresses[0]
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
hold_event_handler = app1.raiden.raiden_event_handler
amount = 100
identifier = 1
message_handler = WaitForMessage()
app1.raiden.message_handler = message_handler
target = app1.raiden.address
secret = sha3(target)
secrethash = sha3(secret)
hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
locked_transfer_received = message_handler.wait_for_message(LockedTransfer, {})
app0.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=secret,
)
# Wait for app1 to receive the locked transfer.
locked_transfer_received.wait()
# Stop app0 to avoid sending the unlock, this must be done after the locked
# transfer is sent.
app0.raiden.transport.stop()
reveal_secret = RevealSecret(message_identifier=random.randint(0, UINT64_MAX), secret=secret)
app0.raiden.sign(reveal_secret)
message_handler.on_message(app1.raiden, reveal_secret)
chain_state = views.state_from_app(app1)
secrethash = sha3(secret)
target_task = chain_state.payment_mapping.secrethashes_to_task[secrethash]
lock_expiration = target_task.target_state.transfer.lock.expiration
app1.raiden.chain.wait_until_block(target_block_number=lock_expiration)
assert app1.raiden.default_secret_registry.is_secret_registered(
secrethash=secrethash, block_identifier="latest"
)
@pytest.mark.xfail(reason="test incomplete")
@pytest.mark.parametrize("number_of_nodes", [3])
def test_start_end_attack(token_addresses, raiden_chain, deposit):
raise_on_failure(
raiden_chain,
run_test_start_end_attack,
token_addresses=token_addresses,
raiden_chain=raiden_chain,
deposit=deposit,
)
def run_test_start_end_attack(token_addresses, raiden_chain, deposit):
""" An attacker can try to steal tokens from a hub or the last node in a
path.
The attacker needs to use two addresses (A1 and A2) and connect both to the
hub H. Once connected a mediated transfer is initialized from A1 to A2
through H. Once the node A2 receives the mediated transfer the attacker
uses the known secret and reveal to close and settle the channel H-A2,
without revealing the secret to H's raiden node.
The intention is to make the hub transfer the token but for him to be
unable to require the token A1."""
amount = 30
token = token_addresses[0]
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token
)
hold_event_handler = app2.raiden.raiden_event_handler
# the attacker owns app0 and app2 and creates a transfer through app1
identifier = 1
target = app2.raiden.address
secret = sha3(target)
secrethash = sha3(secret)
hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
app0.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=amount,
fee=0,
target=target,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=secret,
)
gevent.sleep(1) # wait for the messages to be exchanged
attack_channel = get_channelstate(app2, app1, token_network_identifier)
attack_transfer = None # TODO
attack_contract = attack_channel.external_state.netting_channel.address
hub_contract = get_channelstate(
app1, app0, token_network_identifier
).external_state.netting_channel.address
# the attacker can create a merkle proof of the locked transfer
# <the commented code below is left for documentation purposes>
# lock = attack_channel.partner_state.get_lock_by_secrethash(secrethash)
# unlock_proof = attack_channel.partner_state.compute_proof_for_lock(secret, lock)
# start the settle counter
attack_balance_proof = attack_transfer.to_balanceproof()
attack_channel.netting_channel.channel_close(attack_balance_proof)
# wait until the last block to reveal the secret, hopefully we are not
# missing a block during the test
app2.raiden.chain.wait_until_block(target_block_number=attack_transfer.lock.expiration - 1)
# since the attacker knows the secret he can net the lock
# <the commented code below is left for documentation purposes>
# attack_channel.netting_channel.unlock(
# UnlockProofState(unlock_proof, attack_transfer.lock, secret)
# )
# XXX: verify that the secret was publicized
# at this point the hub might not know the secret yet, and won't be able to
# claim the token from the channel A1 - H
# the attacker settles the contract
app2.raiden.chain.next_block()
attack_channel.netting_channel.settle(token, attack_contract)
# at this point the attacker has the "stolen" funds
attack_contract = app2.raiden.chain.token_hashchannel[token][attack_contract]
assert attack_contract.participants[app2.raiden.address]["netted"] == deposit + amount
assert attack_contract.participants[app1.raiden.address]["netted"] == deposit - amount
# and the hub's channel A1-H doesn't
hub_contract = app1.raiden.chain.token_hashchannel[token][hub_contract]
assert hub_contract.participants[app0.raiden.address]["netted"] == deposit
assert hub_contract.participants[app1.raiden.address]["netted"] == deposit
# to mitigate the attack the Hub _needs_ to use a lower expiration for the
# locked transfer between H-A2 than A1-H. For A2 to acquire the token
# it needs to make the secret public in the blockchain so it publishes the
# secret through an event and the Hub is able to require its funds
app1.raiden.chain.next_block()
# XXX: verify that the Hub has found the secret, close and settle the channel
# the hub has acquired its token
hub_contract = app1.raiden.chain.token_hashchannel[token][hub_contract]
assert hub_contract.participants[app0.raiden.address]["netted"] == deposit + amount
assert hub_contract.participants[app1.raiden.address]["netted"] == deposit - amount
@pytest.mark.parametrize("number_of_nodes", [2])
def test_automatic_dispute(raiden_network, deposit, token_addresses):
raise_on_failure(
raiden_network,
run_test_automatic_dispute,
raiden_network=raiden_network,
deposit=deposit,
token_addresses=token_addresses,
)
def run_test_automatic_dispute(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
channel0 = get_channelstate(app0, app1, token_network_identifier)
token_proxy = app0.raiden.chain.token(channel0.token_address)
initial_balance0 = token_proxy.balance_of(app0.raiden.address)
initial_balance1 = token_proxy.balance_of(app1.raiden.address)
amount0_1 = 10
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount0_1,
identifier=1,
)
amount1_1 = 50
transfer(
initiator_app=app1,
target_app=app0,
token_address=token_address,
amount=amount1_1,
identifier=2,
)
amount0_2 = 60
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount0_2,
identifier=3,
)
# Alice can only provide one of Bob's transfer, so she is incentivized to
# use the one with the largest transferred_amount.
RaidenAPI(app0.raiden).channel_close(registry_address, token_address, app1.raiden.address)
# Bob needs to provide a transfer otherwise its netted balance will be
# wrong, so he is incentivised to use Alice's transfer with the largest
# transferred_amount.
#
# This is done automatically
# channel1.external_state.update_transfer(
# alice_second_transfer,
# )
waiting.wait_for_settle(
app0.raiden,
registry_address,
token_address,
[channel0.identifier],
app0.raiden.alarm.sleep_time,
)
# check that the channel is properly settled and that Bob's client
# automatically called updateTransfer() to reflect the actual transactions
assert token_proxy.balance_of(token_network_identifier) == 0
total0 = amount0_1 + amount0_2
total1 = amount1_1
expected_balance0 = initial_balance0 + deposit - total0 + total1
expected_balance1 = initial_balance1 + deposit + total0 - total1
assert token_proxy.balance_of(app0.raiden.address) == expected_balance0
assert token_proxy.balance_of(app1.raiden.address) == expected_balance1
@pytest.mark.parametrize("number_of_nodes", [2])
def test_batch_unlock_after_restart(raiden_network, token_addresses, deposit):
raise_on_failure(
raiden_network,
run_test_batch_unlock_after_restart,
raiden_network=raiden_network,
token_addresses=token_addresses,
deposit=deposit,
)
def run_test_batch_unlock_after_restart(raiden_network, token_addresses, deposit):
"""Simulate the case where:
- A sends B a transfer
- B sends A a transfer
- Secrets were never revealed
- B closes channel
- A crashes
- Wait for settle
- Wait for unlock from B
- Restart A
At this point, the current unlock logic will try to unlock
iff the node gains from unlocking. Which means that the node will try to unlock
either side. In the above scenario, each node will unlock its side.
This test makes sure that we do NOT invalidate A's unlock transaction based
on the ContractReceiveChannelBatchUnlock caused by B's unlock.
"""
alice_app, bob_app = raiden_network
registry_address = alice_app.raiden.default_registry.address
token_address = token_addresses[0]
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_app(alice_app),
payment_network_id=alice_app.raiden.default_registry.address,
token_address=token_address,
)
timeout = 10
token_network = views.get_token_network_by_identifier(
chain_state=views.state_from_app(alice_app), token_network_id=token_network_identifier
)
channel_identifier = get_channelstate(alice_app, bob_app, token_network_identifier).identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address]
)
alice_to_bob_amount = 10
identifier = 1
alice_transfer_secret = sha3(alice_app.raiden.address)
alice_transfer_secrethash = sha3(alice_transfer_secret)
bob_transfer_secret = sha3(bob_app.raiden.address)
bob_transfer_secrethash = sha3(bob_transfer_secret)
alice_transfer_hold = bob_app.raiden.raiden_event_handler.hold_secretrequest_for(
secrethash=alice_transfer_secrethash
)
bob_transfer_hold = alice_app.raiden.raiden_event_handler.hold_secretrequest_for(
secrethash=bob_transfer_secrethash
)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=alice_to_bob_amount,
fee=0,
target=bob_app.raiden.address,
identifier=identifier,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=alice_transfer_secret,
)
bob_app.raiden.start_mediated_transfer_with_secret(
token_network_identifier=token_network_identifier,
amount=alice_to_bob_amount,
fee=0,
target=alice_app.raiden.address,
identifier=identifier + 1,
payment_hash_invoice=EMPTY_PAYMENT_HASH_INVOICE,
secret=bob_transfer_secret,
)
alice_transfer_hold.wait(timeout=timeout)
bob_transfer_hold.wait(timeout=timeout)
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier)
alice_lock = channel.get_lock(alice_bob_channel_state.our_state, alice_transfer_secrethash)
bob_lock = channel.get_lock(alice_bob_channel_state.partner_state, bob_transfer_secrethash)
# This is the current state of protocol:
#
# A -> B LockedTransfer
# - protocol didn't continue
assert_synced_channel_state(
token_network_identifier=token_network_identifier,
app0=alice_app,
balance0=deposit,
pending_locks0=[alice_lock],
app1=bob_app,
balance1=deposit,
pending_locks1=[bob_lock],
)
# A ChannelClose event will be generated, this will be polled by both apps
# and each must start a task for calling settle
RaidenAPI(bob_app.raiden).channel_close(
registry_address=registry_address,
token_address=token_address,
partner_address=alice_app.raiden.address,
)
# wait for the close transaction to be mined, this is necessary to compute
# the timeout for the settle
with gevent.Timeout(timeout):
waiting.wait_for_close(
raiden=alice_app.raiden,
payment_network_id=registry_address,
token_address=token_address,
channel_ids=[alice_bob_channel_state.identifier],
retry_timeout=alice_app.raiden.alarm.sleep_time,
)
channel_closed = raiden_state_changes_search_for_item(
bob_app.raiden,
ContractReceiveChannelClosed,
{
"canonical_identifier": {
"token_network_address": token_network_identifier,
"channel_identifier": alice_bob_channel_state.identifier,
}
},
)
settle_max_wait_block = (
channel_closed.block_number + alice_bob_channel_state.settle_timeout * 2
)
settle_timeout = BlockTimeout(
RuntimeError("settle did not happen"),
bob_app.raiden,
settle_max_wait_block,
alice_app.raiden.alarm.sleep_time,
)
with settle_timeout:
waiting.wait_for_settle(
raiden=alice_app.raiden,
payment_network_id=registry_address,
token_address=token_address,
channel_ids=[alice_bob_channel_state.identifier],
retry_timeout=alice_app.raiden.alarm.sleep_time,
)
with gevent.Timeout(timeout):
wait_for_batch_unlock(
app=bob_app,
token_network_id=token_network_identifier,
participant=alice_bob_channel_state.partner_state.address,
partner=alice_bob_channel_state.our_state.address,
)
alice_app.start()
with gevent.Timeout(timeout):
wait_for_batch_unlock(
app=alice_app,
token_network_id=token_network_identifier,
participant=alice_bob_channel_state.partner_state.address,
partner=alice_bob_channel_state.our_state.address,
)
| 36.367304
| 99
| 0.740177
|
1df747a0e99e6e5eba676deaf0da2f70b26e4816
| 806
|
py
|
Python
|
ex32.py
|
EthanPen/Learn-Python-The-Hard-Way
|
42c679dcac3d971b72f6e92714146ff8e1488c75
|
[
"CNRI-Python"
] | null | null | null |
ex32.py
|
EthanPen/Learn-Python-The-Hard-Way
|
42c679dcac3d971b72f6e92714146ff8e1488c75
|
[
"CNRI-Python"
] | null | null | null |
ex32.py
|
EthanPen/Learn-Python-The-Hard-Way
|
42c679dcac3d971b72f6e92714146ff8e1488c75
|
[
"CNRI-Python"
] | null | null | null |
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
# append is a function that lists understand
elements.append(i)
# now we can print them out too
for i in elements:
print "Element was: %d" % i
| 27.793103
| 59
| 0.657568
|
7b13b24032ffc40007e64b7e65ed6a7fa5815b5b
| 2,750
|
py
|
Python
|
script/python/python-ad-0.9/lib/ad/util/parser.py
|
nkysg/Asenal
|
12444c7e50fae2be82d3c4737715a52e3693a3cd
|
[
"Apache-2.0"
] | 9
|
2015-03-28T16:05:01.000Z
|
2021-05-15T02:53:44.000Z
|
lib/ad/util/parser.py
|
geertj/python-ad
|
3089eae072bd2e871c11251961ec35a09b83dd38
|
[
"MIT"
] | null | null | null |
lib/ad/util/parser.py
|
geertj/python-ad
|
3089eae072bd2e871c11251961ec35a09b83dd38
|
[
"MIT"
] | 7
|
2015-04-14T13:04:52.000Z
|
2021-02-24T12:50:05.000Z
|
#
# This file is part of Python-AD. Python-AD is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# Python-AD is copyright (c) 2007 by the Python-AD authors. See the file
# "AUTHORS" for a complete overview.
import sys
import os.path
from ply import lex, yacc
class Parser(object):
"""Wrapper object for PLY lexer/parser."""
exception = ValueError
def _parsetab_name(cls, fullname=True):
"""Return a name for PLY's parsetab file."""
ptname = sys.modules[cls.__module__].__name__ + '_tab'
if not fullname:
ptname = ptname.split('.')[-1]
return ptname
_parsetab_name = classmethod(_parsetab_name)
def _write_parsetab(cls):
"""Write parser table (distribution purposes)."""
parser = cls()
tabname = cls._parsetab_name(False)
yacc.yacc(module=parser, debug=0, tabmodule=tabname)
_write_parsetab = classmethod(_write_parsetab)
def parse(self, input, fname=None):
lexer = lex.lex(object=self)
if hasattr(input, 'read'):
input = input.read()
lexer.input(input)
self.m_input = input
self.m_fname = fname
parser = yacc.yacc(module=self, debug=0,
tabmodule=self._parsetab_name())
parsed = parser.parse(lexer=lexer, tracking=True)
return parsed
def _position(self, o):
if hasattr(o, 'lineno') and hasattr(o, 'lexpos'):
lineno = o.lineno
lexpos = o.lexpos
pos = self.m_input.rfind('\n', 0, lexpos)
column = lexpos - pos
else:
lineno = None
column = None
return lineno, column
def t_ANY_error(self, t):
err = self.exception()
msg = 'illegal token'
if self.m_fname:
err.fname = self.m_fname
msg += ' in file %s' % self.m_fname
lineno, column = self._position(t)
if lineno is not None and column is not None:
msg += ' at %d:%d' % (lineno, column)
err.lineno = lineno
err.column = column
err.message = msg
raise err
def p_error(self, p):
err = self.exception()
msg = 'syntax error'
if self.m_fname:
err.fname = self.m_fname
msg += ' in file %s' % self.m_fname
lineno, column = self._position(p)
if lineno is not None and column is not None:
msg += ' at %d:%d' % (lineno, column)
err.lineno = lineno
err.column = column
err.message = msg
raise err
| 31.609195
| 73
| 0.572364
|
74df37b1bf91a8ee28820c319af2f4288426bca4
| 3,033
|
py
|
Python
|
pydepend/project.py
|
herczy/pydepend
|
ee64ba30efc3e19d643da2ed22e078ef4a06795d
|
[
"BSD-3-Clause"
] | 2
|
2019-04-21T06:10:09.000Z
|
2020-04-24T23:12:02.000Z
|
pydepend/project.py
|
herczy/pydepend
|
ee64ba30efc3e19d643da2ed22e078ef4a06795d
|
[
"BSD-3-Clause"
] | null | null | null |
pydepend/project.py
|
herczy/pydepend
|
ee64ba30efc3e19d643da2ed22e078ef4a06795d
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import ast
import os.path
import imp
from .collector import Collector
from .report import Result, ResultCollection
class Project(object):
def __scan_module(self, fq_name, path=None):
modpath = self.__resolve_module_path(fq_name, path=path)
return self.__scan_path(modpath)
def __resolve_module_path(self, name, path=None):
if not isinstance(name, tuple):
name = tuple(name.split('.'))
assert isinstance(name, tuple)
assert len(name) > 0
path = path or self.__path
modfile, pathname, description = imp.find_module(name[0], path)
if modfile:
modfile.close()
submodule = name[1:]
if not submodule:
return pathname
return self.__resolve_module_path(submodule, path=[pathname])
def __scan_path(self, path):
module_name = self.__get_module_name(path)
if module_name is not None:
if os.path.isfile(path):
yield module_name, path
elif os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py')):
yield module_name, path
for name in os.listdir(path):
if name.startswith('__init__.'):
continue
fullpath = os.path.join(path, name)
for res in self.__scan_path(fullpath):
yield res
def __get_module_name(self, path):
if os.path.isdir(path):
if not os.path.isfile(os.path.join(path, '__init__.py')):
return ()
dirname, basename = os.path.split(path)
return self.__get_module_name(dirname) + (basename,)
if path.endswith('.py'):
dirname, basename = os.path.split(path)
return self.__get_module_name(dirname) + (basename[:-3],)
def __init__(self, path=None):
self.__path = list(path or sys.path)
self.__modules = {}
self.__metrics = []
self.__report = None
def add_package(self, fq_name):
self.__modules.update(('.'.join(module), path) for module, path in self.__scan_module(fq_name))
@property
def modules(self):
return frozenset(self.__modules)
@property
def path(self):
return tuple(self.__path)
def get_module_node(self, name):
return Collector.collect_from_file(self.__modules[name])
def add_metric(self, metric):
self.__metrics.append(metric)
@property
def metrics(self):
return tuple(self.__metrics)
def set_report(self, report):
self.__report = report
def report(self, stream=sys.stdout):
results = ResultCollection()
for name in self.__modules:
metrics = {}
for metric in self.__metrics:
metrics[metric.get_metric_name()] = metric.calculate(self.get_module_node(name))
results.add(Result(name, metrics))
stream.write(self.__report.report(results))
| 29.446602
| 103
| 0.602704
|
1789c1a00a995b1c83b2de352f77c248412aece0
| 34,212
|
py
|
Python
|
Tests/MDXUtils.py
|
harveyca307/tm1py-1
|
d12a9bc02e0c543d61fcda9e367490c5388bd1fb
|
[
"MIT"
] | null | null | null |
Tests/MDXUtils.py
|
harveyca307/tm1py-1
|
d12a9bc02e0c543d61fcda9e367490c5388bd1fb
|
[
"MIT"
] | null | null | null |
Tests/MDXUtils.py
|
harveyca307/tm1py-1
|
d12a9bc02e0c543d61fcda9e367490c5388bd1fb
|
[
"MIT"
] | null | null | null |
import configparser
import json
import random
import unittest
from pathlib import Path
from TM1py import Subset
from TM1py.Objects import Cube, Dimension, Hierarchy, MDXView
from TM1py.Services import TM1Service
from TM1py.Utils import MDXUtils, Utils, format_url
from TM1py.Utils.MDXUtils import (
DimensionSelection,
_find_case_and_space_insensitive_first_occurrence,
read_dimension_composition_from_mdx,
read_dimension_composition_from_mdx_set,
read_dimension_composition_from_mdx_set_or_tuple,
read_dimension_composition_from_mdx_tuple,
split_mdx,
)
from TM1py.Utils.Utils import dimension_hierarchy_element_tuple_from_unique_name
from .TestUtils import skip_if_no_pandas
try:
import pandas as pd
except ImportError:
pass
PREFIX = "TM1py_Tests_Utils_"
MDX_TEMPLATE = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
WHERE {where}
"""
MDX_TEMPLATE_SHORT = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
"""
class TestMDXUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Establishes a connection to TM1 and creates TM1 objects to use across all tests
"""
# Connection to TM1
cls.config = configparser.ConfigParser()
cls.config.read(Path(__file__).parent.joinpath("config.ini"))
cls.tm1 = TM1Service(**cls.config["tm1srv01"])
# Build 4 Dimensions
cls.dim1_name = PREFIX + "Dimension1"
cls.dim1_element_names = ["A " + str(i) for i in range(10)]
cls.dim1 = Dimension(cls.dim1_name)
h = Hierarchy(cls.dim1_name, cls.dim1_name)
for element_name in cls.dim1_element_names:
h.add_element(element_name, "Numeric")
cls.dim1.add_hierarchy(h)
cls.dim2_name = PREFIX + "Dimension2"
cls.dim2_element_names = ["B " + str(i) for i in range(10)]
cls.dim2 = Dimension(cls.dim2_name)
h = Hierarchy(cls.dim2_name, cls.dim2_name)
for element_name in cls.dim2_element_names:
h.add_element(element_name, "Numeric")
cls.dim2.add_hierarchy(h)
cls.dim3_name = PREFIX + "Dimension3"
cls.dim3_element_names = ["C " + str(i) for i in range(10)]
cls.dim3 = Dimension(cls.dim3_name)
h = Hierarchy(cls.dim3_name, cls.dim3_name)
for element_name in cls.dim3_element_names:
h.add_element(element_name, "Numeric")
cls.dim3.add_hierarchy(h)
cls.dim4_name = PREFIX + "Dimension4"
cls.dim4_element_names = ["D " + str(i) for i in range(10)]
cls.dim4 = Dimension(cls.dim4_name)
h = Hierarchy(cls.dim4_name, cls.dim4_name)
for element_name in cls.dim4_element_names:
h.add_element(element_name, "Numeric")
cls.dim4.add_hierarchy(h)
# Define cube with 4 dimensions
cls.cube_name = PREFIX + "Cube"
cls.cube = Cube(
name=cls.cube_name,
dimensions=[cls.dim1_name, cls.dim2_name, cls.dim3_name, cls.dim4_name],
)
def setUp(self):
if self.tm1.cubes.exists(self.cube_name):
self.tm1.cubes.delete(self.cube_name)
for dimension in (self.dim1, self.dim2, self.dim3, self.dim4):
if self.tm1.dimensions.exists(dimension.name):
self.tm1.dimensions.delete(dimension.name)
self.tm1.dimensions.create(dimension)
self.tm1.cubes.create(self.cube)
# Build Subset
self.dim4_subset_Name = PREFIX + "Subset"
self.tm1.dimensions.subsets.create(
Subset(
subset_name=self.dim4_subset_Name,
dimension_name=self.dim4_name,
hierarchy_name=self.dim4_name,
expression="HEAD([{}].Members, 1)".format(self.dim4_name),
)
)
def tearDown(self):
self.tm1.cubes.delete(self.cube_name)
self.tm1.dimensions.delete(self.dim1_name)
self.tm1.dimensions.delete(self.dim2_name)
self.tm1.dimensions.delete(self.dim3_name)
self.tm1.dimensions.delete(self.dim4_name)
def test_construct_mdx(self):
rows = [
DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(
dimension_name=self.dim2_name, elements=self.dim2_element_names
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name),
)
]
contexts = {self.dim4_name: self.dim4_element_names[0]}
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
contexts=contexts,
suppress=None,
)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_no_titles(self):
rows = [
DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(
dimension_name=self.dim2_name, elements=self.dim2_element_names
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name),
),
DimensionSelection(
dimension_name=self.dim4_name, subset=self.dim4_subset_Name
),
]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name, rows=rows, columns=columns, suppress=None
)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_suppress_zeroes(self):
rows = [
DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(
dimension_name=self.dim2_name, elements=self.dim2_element_names
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name),
),
DimensionSelection(
dimension_name=self.dim4_name, subset=self.dim4_subset_Name
),
]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name, rows=rows, columns=columns, suppress="BOTH"
)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertLess(number_cells, 1000)
def test_determine_selection_type(self):
self.assertEqual(
DimensionSelection.determine_selection_type(
elements=["e1", "e2"], subset=None, expression=None
),
DimensionSelection.ITERABLE,
)
self.assertEqual(
DimensionSelection.determine_selection_type(["e1", "e2"]),
DimensionSelection.ITERABLE,
)
self.assertEqual(
DimensionSelection.determine_selection_type(
elements=None, subset="something", expression=None
),
DimensionSelection.SUBSET,
)
self.assertEqual(
DimensionSelection.determine_selection_type(None, "something", None),
DimensionSelection.SUBSET,
)
self.assertEqual(
DimensionSelection.determine_selection_type(
elements=None, subset=None, expression="{[d1].[e1]}"
),
DimensionSelection.EXPRESSION,
)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, "{[d1].[e1]}"),
DimensionSelection.EXPRESSION,
)
self.assertEqual(
DimensionSelection.determine_selection_type(
elements=None, subset=None, expression=None
),
None,
)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, None), None
)
self.assertEqual(DimensionSelection.determine_selection_type(), None)
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type,
["e2"],
"subset1",
"{[d1].[e1]}",
)
self.assertRaises(
ValueError, DimensionSelection.determine_selection_type, ["e2"], "subset1"
)
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type,
["e2"],
None,
"subset1",
)
def test_curly_braces(self):
self.assertEqual(MDXUtils.curly_braces("something"), "{something}")
self.assertEqual(MDXUtils.curly_braces("something}"), "{something}")
self.assertEqual(MDXUtils.curly_braces("{something"), "{something}")
self.assertEqual(MDXUtils.curly_braces("{something}"), "{something}")
def test_build_element_unique_names_without_hierarchies(self):
dimension_names = ["dim1", "dim1"]
element_names = ["elem1", "elem2"]
gen = Utils.build_element_unique_names(
dimension_names=dimension_names, element_names=element_names
)
element_unique_names = list(gen)
self.assertEqual(len(element_unique_names), 2)
self.assertTrue("[dim1].[elem1]" in element_unique_names)
self.assertTrue("[dim1].[elem2]" in element_unique_names)
def test_build_element_unique_names_with_hierarchies(self):
dimension_names = ["dim1", "dim1", "dim1"]
hierarchy_names = ["hier1", "hier2", "hier3"]
element_names = ["elem1", "elem2", "elem3"]
gen = Utils.build_element_unique_names(
dimension_names=dimension_names,
hierarchy_names=hierarchy_names,
element_names=element_names,
)
element_unique_names = list(gen)
self.assertEqual(len(element_unique_names), 3)
self.assertTrue("[dim1].[hier1].[elem1]" in element_unique_names)
self.assertTrue("[dim1].[hier2].[elem2]" in element_unique_names)
self.assertTrue("[dim1].[hier3].[elem3]" in element_unique_names)
@skip_if_no_pandas
def test_build_pandas_multiindex_dataframe_from_cellset(self):
rows = [
DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(
dimension_name=self.dim2_name, elements=self.dim2_element_names
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name),
),
DimensionSelection(
dimension_name=self.dim4_name, subset=self.dim4_subset_Name
),
]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name, rows=rows, columns=columns, suppress=suppress
)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertTrue(df.shape[0] == 1000)
self.assertTrue(df.shape[1] == 1)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
@skip_if_no_pandas
def test_build_pandas_dataframe_from_cellset(self):
rows = [
DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(
dimension_name=self.dim2_name, elements=self.dim2_element_names
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name),
),
DimensionSelection(
dimension_name=self.dim4_name, subset=self.dim4_subset_Name
),
]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name, rows=rows, columns=columns, suppress=suppress
)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=False)
self.assertTrue(df.shape[0] == 1000)
# cater for potential Sandboxes dimension on first position
if df.columns[0] == "Sandboxes":
self.assertTrue(df.shape[1] == 6)
else:
self.assertTrue(df.shape[1] == 5)
self.assertIsInstance(df, pd.DataFrame)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
@skip_if_no_pandas
def test_build_pandas_dataframe_empty_cellset(self):
self.tm1.cubes.cells.write_value(
value=0,
cube_name=self.cube_name,
element_tuple=(
self.dim1_element_names[0],
self.dim2_element_names[0],
self.dim3_element_names[0],
self.dim4_element_names[0],
),
dimensions=(self.dim1_name, self.dim2_name, self.dim3_name, self.dim4_name),
)
rows = [
DimensionSelection(
dimension_name=self.dim1_name, elements=(self.dim1_element_names[0],)
),
DimensionSelection(
dimension_name=self.dim2_name, elements=(self.dim2_element_names[0],)
),
]
columns = [
DimensionSelection(
dimension_name=self.dim3_name, elements=(self.dim3_element_names[0],)
),
DimensionSelection(
dimension_name=self.dim4_name, elements=(self.dim4_element_names[0],)
),
]
suppress = "Both"
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name, rows=rows, columns=columns, suppress=suppress
)
empty_cellset = self.tm1.cubes.cells.execute_mdx(mdx)
self.assertRaises(
ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, True
)
self.assertRaises(
ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, False
)
@unittest.skip("Not deterministic. Needs improvement.")
def test_read_cube_name_from_mdx(self):
all_cube_names = self.tm1.cubes.get_all_names()
for cube_name in all_cube_names:
private_views, public_views = self.tm1.cubes.views.get_all(cube_name)
for view in private_views + public_views:
mdx = view.MDX
self.assertEqual(
cube_name.upper().replace(" ", ""),
MDXUtils.read_cube_name_from_mdx(mdx),
)
def test_dimension_hierarchy_element_tuple_from_unique_name(self):
unique_element_name = "[d1].[e1]"
(
dimension,
hierarchy,
element,
) = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[d1].[e1]"
(
dimension,
hierarchy,
element,
) = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[leaves].[e1]"
(
dimension,
hierarchy,
element,
) = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "leaves")
self.assertEqual(element, "e1")
def test_read_dimension_composition_from_mdx_simple1(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(
self.dim1_name, self.dim2_name
),
columns="{{ [{}].MEMBERS }}".format(self.dim3_name),
cube="[{}]".format(self.cube_name),
where="([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0]),
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple2(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(
self.dim1_name, self.dim2_name
),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0]),
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name, self.dim2_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple3(self):
mdx = MDX_TEMPLATE.format(
rows="{[" + self.dim3_name + "].MEMBERS}",
columns="{[" + self.dim1_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([{}].[{}], [{}].[{}])".format(
self.dim4_name,
self.dim4_element_names[0],
self.dim2_name,
self.dim2_element_names[0],
),
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name])
self.assertEqual(titles, [self.dim4_name, self.dim2_name])
def test_read_dimension_composition_from_mdx_without_titles(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{["
+ self.dim1_name
+ "].MEMBERS} * {["
+ self.dim2_name
+ "].MEMBERS}",
columns="{["
+ self.dim3_name
+ "].MEMBERS} * {["
+ self.dim4_name
+ "].MEMBERS}",
cube="[{}]".format(self.cube_name),
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_single(self):
mdx = MDX_TEMPLATE.format(
rows="{(["
+ self.dim1_name
+ "].["
+ self.dim1_element_names[0]
+ "], ["
+ self.dim2_name
+ "].["
+ self.dim2_element_names[0]
+ "])}",
columns="{[" + self.dim3_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([" + self.dim4_name + "].[" + self.dim4_element_names[0] + "])",
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_multi(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{(["
+ self.dim1_name
+ "].["
+ self.dim1_element_names[0]
+ "], ["
+ self.dim2_name
+ "].["
+ self.dim2_element_names[0]
+ "]),(["
+ self.dim1_name
+ "].["
+ self.dim1_element_names[1]
+ "], ["
+ self.dim2_name
+ "].["
+ self.dim2_element_names[1]
+ "]) }",
columns="{(["
+ self.dim3_name
+ "].["
+ self.dim3_element_names[0]
+ "], ["
+ self.dim4_name
+ "].["
+ self.dim4_element_names[0]
+ "]),(["
+ self.dim3_name
+ "].["
+ self.dim3_element_names[1]
+ "], ["
+ self.dim4_name
+ "].["
+ self.dim4_element_names[1]
+ "]) }",
cube="[{}]".format(self.cube_name),
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
self.assertEqual(titles, [])
def test_read_dimension_composition_from_mdx_set_or_tuple(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_read_dimension_composition_from_mdx_set(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
def test_read_dimension_composition_from_mdx_tuple(self):
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_split_mdx_sets(self):
rows = "{{ [{dim1}].[elem1] , [{dim2}].[{elem2}] }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
)
columns = "{{ [{}].MEMBERS }}".format(self.dim3_name)
cube = "[{}]".format(self.cube_name)
where = "([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0])
mdx = MDX_TEMPLATE.format(rows=rows, columns=columns, cube=cube, where=where)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_tuples_without_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1],
)
columns = "{{([{dim3}].[{elem1}], [{dim4}].[{elem2}])}}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0],
dim4=self.dim4_name,
elem2=self.dim4_element_names[0],
)
cube = "[{}]".format(self.cube_name)
mdx = MDX_TEMPLATE_SHORT.format(rows=rows, columns=columns, cube=cube)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
def test_split_mdx_tuples_with_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1],
)
columns = "{{ ( [{dim3}].[{elem1}] ) }}".format(
dim3=self.dim3_name, elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem1}] )".format(
dim4=self.dim4_name, elem1=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(rows=rows, columns=columns, cube=cube, where=where)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_sets_and_tuples(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1],
)
columns = "{{ Tm1SubsetAll ( [{dim3}] ) }}".format(
dim3=self.dim3_name, elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem2}] )".format(
dim4=self.dim4_name, elem2=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(rows=rows, columns=columns, cube=cube, where=where)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_find_case_and_space_insensitive_first_occurrence(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(
self.dim1_name, self.dim2_name
),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0]),
)
selection, rest = _find_case_and_space_insensitive_first_occurrence(
text=mdx, pattern_start="ROWS,", pattern_end="}ON COLUMNS"
)
self.assertEqual(
"ROWS,{[TM1py_Tests_Utils_Dimension1].MEMBERS}*{[TM1py_Tests_Utils_Dimension2].MEMBERS}",
selection,
)
self.assertEqual(
"FROM[TM1py_Tests_Utils_Cube]WHERE([TM1py_Tests_Utils_Dimension4].[D0])",
rest,
)
def test_extract_unique_name_from_members(self):
members = [
{
"UniqueName": "[Dimension3].[Dimension3].[Element 592]",
"Element": {"UniqueName": "[Dimension3].[Dimension3].[Element 592]"},
}
]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension3].[Dimension3].[Element 592]"],
)
members = [
{
"UniqueName": "[Dimension1].[Dimension1].[Element 790]",
"Element": {"UniqueName": "[Dimension1].[Dimension1].[Element 790]"},
},
{
"UniqueName": "[Dimension2].[Dimension2].[Element 541]",
"Element": {"UniqueName": "[Dimension2].[Dimension2].[Element 541]"},
},
]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
[
"[Dimension1].[Dimension1].[Element 790]",
"[Dimension2].[Dimension2].[Element 541]",
],
)
members = [
{
"UniqueName": "",
"Element": {"UniqueName": "[Dimension1].[Dimension1].[Element 790]"},
},
{
"UniqueName": "",
"Element": {"UniqueName": "[Dimension2].[Dimension2].[Element 541]"},
},
]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
[
"[Dimension1].[Dimension1].[Element 790]",
"[Dimension2].[Dimension2].[Element 541]",
],
)
members = [
{"UniqueName": "[Dimension1].[Dimension1].[Element 790]", "Element": None},
{"UniqueName": "[Dimension2].[Dimension2].[Element 541]", "Element": None},
]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
[
"[Dimension1].[Dimension1].[Element 790]",
"[Dimension2].[Dimension2].[Element 541]",
],
)
def test_extract_axes_from_cellset(self):
with open(
Path(__file__).parent.joinpath("resources", "raw_cellset.json")
) as file:
raw_cellset_as_dict = json.load(file)
row_axis, column_axis, title_axis = Utils.extract_axes_from_cellset(
raw_cellset_as_dict=raw_cellset_as_dict
)
self.assertIn("[City].[City].[NYC]", json.dumps(row_axis))
self.assertIn("[City].[City].[Chicago]", json.dumps(row_axis))
self.assertIn("[Date].[Date].[2017-11-26]", json.dumps(column_axis))
self.assertIn("[Date].[Date].[2017-11-27]", json.dumps(column_axis))
self.assertIn("[Version].[Version].[Actual]", json.dumps(title_axis))
@unittest.skip("Not deterministic. Needs improvement.")
def test_mdx_from_cubeview(self):
cube_names = self.tm1.cubes.get_all_names()
cube_name = cube_names[random.randrange(0, len(cube_names))]
_, public_views = self.tm1.cubes.views.get_all(cube_name=cube_name)
# if no views on cube. Recursion
if len(public_views) == 0:
self.test_mdx_from_cubeview()
else:
# random public view on random cube
view = public_views[random.randrange(0, len(public_views))]
# if random view is MDXView. Recursion
if isinstance(view, MDXView):
self.test_mdx_from_cubeview()
else:
# if native view has no dimensions on the columns. Recursion
if len(view._columns) == 0:
self.test_mdx_from_cubeview()
else:
# sum up all numeric cells in Native View
data_native_view = self.tm1.cubes.cells.get_view_content(
cube_name, view.name, private=False
)
sum_native_view = sum(
[
float(cell["Value"])
for cell in data_native_view.values()
if str(cell["Value"]).isdigit()
]
)
# get mdx from native view
mdx = view.as_MDX
# sum up all numeric cells in the response of the mdx query
data_mdx = self.tm1.cubes.cells.execute_mdx(mdx)
sum_mdx = sum(
[
float(cell["Value"])
for cell in data_mdx.values()
if str(cell["Value"]).isdigit()
]
)
# test it !
self.assertEqual(sum_mdx, sum_native_view)
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
if __name__ == "__main__":
unittest.main()
| 38.965831
| 118
| 0.584853
|
1e2367c0855442606768ad594b4ecd7bee790509
| 2,433
|
py
|
Python
|
storyboard/db/migration/alembic_migrations/versions/057_allow_stories_and_tasks_to_be_made_.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/db/migration/alembic_migrations/versions/057_allow_stories_and_tasks_to_be_made_.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/db/migration/alembic_migrations/versions/057_allow_stories_and_tasks_to_be_made_.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Codethink Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Allow stories and tasks to be made private
This also allows worklist_items.list_id to be NULL, to facilitate
filtering of the relationship between worklists and their items
using dynamic loading.
This is needed because moving items temporarily causes them to be in
no list.
Revision ID: 057
Revises: 056
Create Date: 2016-04-27 15:45:51.646556
"""
# revision identifiers, used by Alembic.
revision = '057'
down_revision = '056'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(active_plugins=None, options=None):
op.create_table(
'story_permissions',
sa.Column('story_id', sa.Integer(), nullable=True),
sa.Column('permission_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),
sa.ForeignKeyConstraint(['story_id'], ['stories.id'], )
)
dialect = op.get_bind().engine.dialect
if dialect.name == 'sqlite':
col = sa.Column('private', sa.Boolean(), default=False)
else:
col = sa.Column('private', sa.Boolean(), default=False, nullable=False)
op.add_column(u'stories', col)
if dialect.supports_alter:
op.alter_column('worklist_items', 'list_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
def downgrade(active_plugins=None, options=None):
op.drop_constraint(
u'worklist_items_ibfk_1', 'worklist_items', type_='foreignkey')
op.alter_column('worklist_items', 'list_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.create_foreign_key(u'worklist_items_ibfk_1', 'worklist_items',
'worklists', ['list_id'], ['id'])
op.drop_column(u'stories', 'private')
op.drop_table('story_permissions')
| 34.267606
| 79
| 0.695849
|
2c92819f76bddaca52c85ab177e1e234291fd93c
| 7,061
|
py
|
Python
|
fink_broker/slackUtils.py
|
astrolabsoftware/fink-source
|
9003e933e1d8c3f03b70c2b277638de97be102ec
|
[
"Apache-2.0"
] | 17
|
2019-03-08T12:37:06.000Z
|
2022-02-01T18:02:07.000Z
|
fink_broker/slackUtils.py
|
astrolabsoftware/fink-source
|
9003e933e1d8c3f03b70c2b277638de97be102ec
|
[
"Apache-2.0"
] | 559
|
2019-03-07T14:55:27.000Z
|
2022-03-11T20:13:12.000Z
|
fink_broker/slackUtils.py
|
tallamjr/fink-broker
|
97753ff695b78ea52d084cac787dec6c52c4e4cc
|
[
"Apache-2.0"
] | 12
|
2019-03-08T13:04:38.000Z
|
2022-01-23T22:22:50.000Z
|
#!/usr/bin/env python3
# Copyright 2019 AstroLab Software
# Author: Abhishek Chauhan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import slack
from fink_broker.tester import spark_unit_tests
from pyspark.sql import DataFrame
from fink_broker.loggingUtils import get_fink_logger
logger = get_fink_logger(__name__, "INFO")
class FinkSlackClient:
def __init__(self, api_token):
self._client = slack.WebClient(token=api_token)
try:
self._client.auth_test()
except Exception:
logger.error("Authentication Error: Invalid Token")
# create a dict of {channelName: ID}
channels = self._client.channels_list()['channels']
self._channel_ids = {
x['name']: x['id'] for x in channels if 'name' in x.keys()}
# create a dict of {userName: ID}
members = self._client.users_list()['members']
self._user_ids = {
x['real_name']: x['id'] for x in members if 'real_name' in x.keys()}
def send_message(self, recipient, msg):
"""sends a message to a given channel/user on the slack workspace
Parameters
----------
recipient: str
name of recipient e.g. a channel: '#general'
or a user: 'Abhishek Chauhan'
msg: str
message payload to send
"""
# if recipient is a channel e.g. #general
if recipient[0] == '#':
name = recipient[1:]
if name not in self._channel_ids:
logger.warn("Private or Invalid Channel Name")
else: # user
name = recipient
if recipient not in self._user_ids:
logger.error("User is not member of your slack workspace")
return
self._client.chat_postMessage(
channel=name, text=msg, as_user="false",
username="fink-alert", icon_emoji="strend:")
def get_api_token():
"""returns slack api token
Returns
----------
api_token: str
value of the env variable SLACK_API_TOKEN if set, or None
"""
api_token = None
try:
api_token = os.environ["SLACK_API_TOKEN"]
except KeyError:
logger.error("SLACK_API_TOKEN is not set")
return api_token
def get_slack_client():
""" returns an object of class FinkSlackClient
Returns
----------
FinkSlackClient:
an object of class FinkSlackClient initialized with OAuth token
"""
api_token = get_api_token()
if api_token:
return FinkSlackClient(api_token)
else:
logger.error("please set the env variable: SLACK_API_TOKEN")
def get_show_string(
df: DataFrame, n: int = 20,
truncate: int = 0, vertical: bool = False) -> str:
"""returns the string printed by df.show()
Parameters
----------
df: DataFrame
a spark dataframe
n: int
number of rows to print
truncate: int
truncate level for columns, default: 0 means no truncation
vertical: bool
set true to get output in vertical format (not tabular)
Returns
----------
showString: str
string printed by DataFrame.show()
Examples
----------
>>> df = spark.sparkContext.parallelize(zip(
... ["ZTF18aceatkx", "ZTF18acsbjvw"],
... ["Star", "Unknown"])).toDF([
... "objectId", "cross_match_alerts_per_batch"])
>>> msg_string = get_show_string(df)
>>> print(msg_string)
+------------+----------------------------+
|objectId |cross_match_alerts_per_batch|
+------------+----------------------------+
|ZTF18aceatkx|Star |
|ZTF18acsbjvw|Unknown |
+------------+----------------------------+
<BLANKLINE>
"""
return(df._jdf.showString(n, truncate, vertical))
def send_slack_alerts(df: DataFrame, channels: str):
"""Send alerts to slack channel
Parameters
----------
df: DataFrame
spark dataframe to send slack alerts
channels: str
path to file with list of channels to which alerts
must be sent
Examples
----------
>>> df = spark.sparkContext.parallelize(zip(
... ["ZTF18aceatkx", "ZTF18acsbjvw"],
... [697251923115015002, 697251921215010004],
... [20.393772, 20.4233877],
... [-25.4669463, -27.0588511],
... ["slacktest", "Unknown"])).toDF([
... "objectId", "candid", "candidate_ra",
... "candidate_dec", "cross_match_alerts_per_batch"])
>>> df.show()
+------------+------------------+------------+-------------+----------------------------+
| objectId| candid|candidate_ra|candidate_dec|cross_match_alerts_per_batch|
+------------+------------------+------------+-------------+----------------------------+
|ZTF18aceatkx|697251923115015002| 20.393772| -25.4669463| slacktest|
|ZTF18acsbjvw|697251921215010004| 20.4233877| -27.0588511| Unknown|
+------------+------------------+------------+-------------+----------------------------+
<BLANKLINE>
>>> channels = "slacktest_channel.txt"
>>> with open(channels, 'wt') as f:
... f.write("slacktest")
9
>>> send_slack_alerts(df, channels)
>>> os.remove(channels)
"""
channels_list = []
with open(channels, 'rt') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
channels_list.append('#' + line)
finkSlack = get_slack_client()
# filter out unknown object types
df = df.filter("cross_match_alerts_per_batch!='Unknown'")
object_types = df \
.select("cross_match_alerts_per_batch")\
.distinct()\
.collect()
object_types = [x[0] for x in object_types]
# Send alerts to the respective channels
for obj in object_types:
channel_name = '#' + ''.join(e.lower() for e in obj if e.isalpha())
if channel_name in channels_list:
alert_text = get_show_string(
df.filter(df.cross_match_alerts_per_batch == obj))
slack_alert = "```\n" + alert_text + "```"
finkSlack.send_message(channel_name, slack_alert)
if __name__ == "__main__":
""" Execute the test suite with SparkSession initialised """
# Run the Spark test suite if SLACK_API_TOKEN exist
api_token = get_api_token()
if api_token:
spark_unit_tests(globals())
else:
logger.info("Skipping Unit Tests")
| 32.995327
| 93
| 0.574281
|
207d05e610cec336a68361bd58372d653fcda1eb
| 5,036
|
py
|
Python
|
src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
|
amabowilli/cfn-python-lint
|
21e37e096fdcd94507d0a520aff0f8c8230ee438
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
|
amabowilli/cfn-python-lint
|
21e37e096fdcd94507d0a520aff0f8c8230ee438
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py
|
amabowilli/cfn-python-lint
|
21e37e096fdcd94507d0a520aff0f8c8230ee438
|
[
"MIT-0"
] | null | null | null |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import hashlib
import json
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
from cfnlint.helpers import RESOURCE_SPECS
class ListDuplicatesAllowed(CloudFormationLintRule):
"""Check if duplicates exist in a List"""
id = 'I3037'
shortdesc = 'Check if a list that allows duplicates has any duplicates'
description = 'Certain lists support duplicate items.' \
'Provide an alert when list of strings or numbers have repeats.'
source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/rules.md#rules-1'
tags = ['resources', 'property', 'list']
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
def _check_duplicates(self, values, path, scenario=None):
""" Check for Duplicates """
matches = []
list_items = []
if isinstance(values, list):
for index, value in enumerate(values):
value_hash = hashlib.sha1(json.dumps(value, sort_keys=True).encode('utf-8')).hexdigest()
if value_hash in list_items:
if not scenario:
message = 'List has a duplicate value at {0}'
matches.append(RuleMatch(path + [index], message.format('/'.join(map(str, path + [index])))))
else:
scenario_text = ' and '.join(['condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
message = 'List has a duplicate value at {0} when {1}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)), scenario_text)))
list_items.append(value_hash)
return matches
def check_duplicates(self, values, path, cfn):
""" Check for duplicates """
matches = []
if isinstance(values, list):
matches.extend(self._check_duplicates(values, path))
elif isinstance(values, dict):
props = cfn.get_object_without_conditions(values)
for prop in props:
matches.extend(self._check_duplicates(prop.get('Object'), path, prop.get('Scenario')))
return matches
def check(self, cfn, properties, value_specs, path):
"""Check itself"""
matches = list()
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
if prop in value_specs:
property_type = value_specs.get(prop).get('Type')
primitive_type = value_specs.get(prop).get('PrimitiveItemType')
duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', False)
if property_type == 'List' and duplicates_allowed and primitive_type in ['String', 'Integer']:
matches.extend(
self.check_duplicates(
p_value[prop], p_path + [prop], cfn
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
matches.extend(self.check(cfn, properties, specs, path))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
matches.extend(self.check(cfn, properties, specs, path))
return matches
| 45.781818
| 118
| 0.638006
|
2cb276d45b7b504501942403d5bfc45041d67a65
| 17,172
|
py
|
Python
|
cea/analysis/lca/embodied.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | null | null | null |
cea/analysis/lca/embodied.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | null | null | null |
cea/analysis/lca/embodied.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | null | null | null |
"""
Embodied energy and related grey emissions model algorithm
"""
from __future__ import division
import os
import numpy as np
import pandas as pd
from cea.datamanagement.data_helper import calc_mainuse
from cea.datamanagement.data_helper import calc_category
from cea.utilities.dbf import dbf_to_dataframe
from geopandas import GeoDataFrame as Gdf
import cea.globalvar
import cea.inputlocator
import cea.config
from cea.constants import SERVICE_LIFE_OF_BUILDINGS, SERVICE_LIFE_OF_TECHNICAL_SYSTEMS, CONVERSION_AREA_TO_FLOOR_AREA_RATIO
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Martin Mosteiro"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def lca_embodied(year_to_calculate, locator, config):
"""
Algorithm to calculate the embodied emissions and non-renewable primary energy of buildings according to the method
of [Fonseca et al., 2015] and [Thoma et al., 2014]. The calculation method assumes a 60 year payoff for the embodied
energy and emissions of a building, after which both values become zero.
The results are provided in total as well as per square meter:
- embodied non-renewable primary energy: E_nre_pen_GJ and E_nre_pen_MJm2
- embodied greenhouse gas emissions: E_ghg_ton and E_ghg_kgm2
As part of the algorithm, the following files are read from InputLocator:
- architecture.shp: shapefile with the architecture of each building
locator.get_building_architecture()
- occupancy.shp: shapefile with the occupancy types of each building
locator.get_building_occupancy()
- age.shp: shapefile with the age and retrofit date of each building
locator.get_building_age()
- zone.shp: shapefile with the geometry of each building in the zone of study
locator.get_zone_geometry()
- Archetypes_properties: csv file with the database of archetypes including embodied energy and emissions
locator.get_archetypes_properties()
As a result, the following file is created:
- Total_LCA_embodied: .csv
csv file of yearly primary energy and grey emissions per building stored in locator.get_lca_embodied()
:param year_to_calculate: year between 1900 and 2100 indicating when embodied energy is evaluated
to account for emissions already offset from building construction and retrofits more than 60 years ago.
:type year_to_calculate: int
:param locator: an instance of InputLocator set to the scenario
:type locator: InputLocator
:returns: This function does not return anything
:rtype: NoneType
.. [Fonseca et al., 2015] Fonseca et al. (2015) "Assessing the environmental impact of future urban developments at
neighborhood scale." CISBAT 2015.
.. [Thoma et al., 2014] Thoma et al. (2014). "Estimation of base-values for grey energy, primary energy, global
warming potential (GWP 100A) and Umweltbelastungspunkte (UBP 2006) for Swiss constructions from before 1920
until today." CUI 2014.
Files read / written from InputLocator:
get_building_architecture
get_building_occupancy
get_building_age
get_zone_geometry
get_archetypes_embodied_energy
get_archetypes_embodied_emissions
path_LCA_embodied_energy:
path to database of archetypes embodied energy file
Archetypes_embodied_energy.csv
path_LCA_embodied_emissions:
path to database of archetypes grey emissions file
Archetypes_embodied_emissions.csv
path_age_shp: string
path to building_age.shp
path_occupancy_shp:
path to building_occupancyshp
path_geometry_shp:
path to building_geometrys.hp
path_architecture_shp:
path to building_architecture.shp
path_results : string
path to demand results folder emissions
"""
# local variables
architecture_df = dbf_to_dataframe(locator.get_building_architecture())
prop_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
occupancy_df = pd.DataFrame(prop_occupancy_df.loc[:, (prop_occupancy_df != 0).any(axis=0)])
age_df = dbf_to_dataframe(locator.get_building_age())
geometry_df = Gdf.from_file(locator.get_zone_geometry())
geometry_df['footprint'] = geometry_df.area
geometry_df['perimeter'] = geometry_df.length
geometry_df = geometry_df.drop('geometry', axis=1)
# get list of uses
list_uses = list(occupancy_df.drop({'Name'}, axis=1).columns)
# define main use:
occupancy_df['mainuse'] = calc_mainuse(occupancy_df, list_uses)
# DataFrame with joined data for all categories
cat_df = occupancy_df.merge(age_df, on='Name').merge(geometry_df, on='Name').merge(architecture_df, on='Name')
# calculate building geometry
## total window area
average_wwr = [np.mean([a,b,c,d]) for a,b,c,d in zip(cat_df['wwr_south'],cat_df['wwr_north'],cat_df['wwr_west'],cat_df['wwr_east'])]
cat_df['windows_ag'] = average_wwr * cat_df['perimeter'] * (cat_df['height_ag'] * (1-cat_df['void_deck']))
## wall area above ground
cat_df['area_walls_ext_ag'] = cat_df['perimeter'] * (cat_df['height_ag'] * (1-cat_df['void_deck'])) - cat_df['windows_ag']
## wall area below ground
cat_df['area_walls_ext_bg'] = cat_df['perimeter'] * cat_df['height_bg']
## floor area above ground
cat_df['floor_area_ag'] = cat_df['footprint'] * cat_df['floors_ag']
## floor area below ground
cat_df['floor_area_bg'] = cat_df['footprint'] * cat_df['floors_bg']
## total floor area
cat_df['total_area'] = cat_df['floor_area_ag'] + cat_df['floor_area_bg']
# get categories for each year of construction/retrofit
## each building component gets categorized according to its occupancy type, construction year and retrofit year
## e.g., for an office building built in 1975, cat_df['cat_built'] = 'OFFICE3'
## e.g., for an office building with windows renovated in 1975, cat_df['cat_windows'] = 'OFFICE9'
# calculate contributions to embodied energy and emissions
## calculated by multiplying the area of the given component by the energy and emissions per square meter for the
## given category according to the data in the archetype database
result_energy = calculate_contributions('EMBODIED_ENERGY', cat_df, config, locator, year_to_calculate,
total_column='GEN_GJ', specific_column='GEN_MJm2')
result_emissions = calculate_contributions('EMBODIED_EMISSIONS', cat_df, config, locator, year_to_calculate,
total_column='CO2_ton', specific_column='CO2_kgm2')
# export the results for embodied emissions (E_ghg_) and non-renewable primary energy (E_nre_pen_) for each
# building, both total (in t CO2-eq. and GJ) and per square meter (in kg CO2-eq./m2 and MJ/m2)
fields_to_plot = ['Name', 'GFA_m2', 'E_nre_pen_GJ', 'E_nre_pen_MJm2', 'E_ghg_ton', 'E_ghg_kgm2']
pd.DataFrame(
{'Name': result_energy.Name, 'E_nre_pen_GJ': result_energy.GEN_GJ, 'E_nre_pen_MJm2': result_energy.GEN_MJm2,
'E_ghg_ton': result_emissions.CO2_ton, 'E_ghg_kgm2': result_emissions.CO2_kgm2,
'GFA_m2': result_energy.total_area}).to_csv(locator.get_lca_embodied(),
columns=fields_to_plot, index=False, float_format='%.2f')
print('done!')
def calculate_contributions(archetype, cat_df, config, locator, year_to_calculate, total_column, specific_column):
"""
Calculate the embodied energy/emissions for each building based on their construction year, and the area and
renovation year of each building component.
:param archetype: String that defines whether the 'EMBODIED_ENERGY' or 'EMBODIED_EMISSIONS' are being calculated.
:type archetype: str
:param cat_df: DataFrame with joined data of all categories for each building, that is: occupancy, age, geometry,
architecture, building component area, construction category and renovation category for each building component
:type cat_df: DataFrame
:param locator: an InputLocator instance set to the scenario to work on
:type locator: InputLocator
:param year_to_calculate: year in which the calculation is done; since the embodied energy and emissions are
calculated over 60 years, if the year of calculation is more than 60 years after construction, the results
will be 0
:type year_to_calculate: int
:param total_column: label for the column with the total results (e.g., 'GEN_GJ')
:type total_column: str
:param specific_column: label for the column with the results per square meter (e.g., 'GEN_MJm2')
:type specific_column: str
:return result: DataFrame with the calculation results (i.e., the total and specific embodied energy or emisisons
for each building)
:rtype result: DataFrame
"""
# get archetype properties from the database
database_df = pd.read_excel(locator.get_life_cycle_inventory_building_systems(config.region), archetype)
database_df['Code'] = database_df.apply(lambda x: calc_code(x['building_use'], x['year_start'],
x['year_end'], x['standard']), axis=1)
cat_df['cat_built'] = calc_category(database_df, cat_df, 'built', 'C')
retro_cat = ['envelope', 'roof', 'windows', 'partitions', 'basement', 'HVAC']
for cat in retro_cat:
cat_df['cat_' + cat] = calc_category(database_df, cat_df, cat, 'R')
# merge databases according to category
built_df = cat_df.merge(database_df, left_on='cat_built', right_on='Code')
envelope_df = cat_df.merge(database_df, left_on='cat_envelope', right_on='Code')
roof_df = cat_df.merge(database_df, left_on='cat_roof', right_on='Code')
windows_df = cat_df.merge(database_df, left_on='cat_windows', right_on='Code')
partitions_df = cat_df.merge(database_df, left_on='cat_partitions', right_on='Code')
basement_df = cat_df.merge(database_df, left_on='cat_basement', right_on='Code')
HVAC_df = cat_df.merge(database_df, left_on='cat_HVAC', right_on='Code')
#do checkup in case some buildings or all buildings do not have a match.
#this happens when building has not been retrofitted.
# calculate the embodied energy/emissions due to construction
# these include: external walls, roof, windows, interior floors, partitions, HVAC systems, and excavation
## calculate how many years before the calculation year the building was built in
built_df['delta_year'] = year_to_calculate - built_df['built']
## if it was built more than 60 years before, the embodied energy/emissions have been "paid off" and are set to 0
built_df['confirm'] = built_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)
## if it was built less than 60 years before, the contribution from each building component is calculated
built_df['contrib'] = (((built_df['Wall_ext_ag'] * built_df['area_walls_ext_ag']) +
(built_df['Roof'] * built_df['footprint']) +
(built_df['windows_ag'] * built_df['Win_ext']) +
(built_df['floor_area_ag'] * built_df['Floor_int'] +
built_df['floor_area_ag'] * built_df['Wall_int_sup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO +
built_df['footprint'] * built_df['Wall_int_nosup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO) +
(basement_df['footprint'] * basement_df['Floor_g'] +
basement_df['Wall_ext_bg'] * basement_df['area_walls_ext_bg']) +
(built_df['footprint'] * built_df['Excavation'])) / SERVICE_LIFE_OF_BUILDINGS +
((HVAC_df['floor_area_ag'] + HVAC_df['footprint']) * HVAC_df[
'Services']) / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS) * built_df['confirm']
# calculate the embodied energy/emissions due to retrofits
# if a component was retrofitted more than 60 years before, its contribution has been "paid off" and is set to 0
## contributions due to envelope retrofit
envelope_df['delta_year'] = year_to_calculate - envelope_df['envelope']
envelope_df['confirm'] = envelope_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)
envelope_df['contrib'] = (envelope_df['Wall_ext_ag'] * envelope_df['area_walls_ext_ag']) * envelope_df[
'confirm'] / (SERVICE_LIFE_OF_BUILDINGS)
## contributions due to roof retrofit
roof_df['delta_year'] = year_to_calculate - roof_df['roof']
roof_df['confirm'] = roof_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)
roof_df['contrib'] = roof_df['Roof'] * roof_df['footprint'] * roof_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS
## contributions due to windows retrofit
windows_df['delta_year'] = year_to_calculate - windows_df['windows']
windows_df['confirm'] = windows_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)
windows_df['contrib'] = windows_df['windows_ag'] * windows_df['Win_ext'] * windows_df[
'confirm'] / SERVICE_LIFE_OF_BUILDINGS
## contributions due to partitions retrofit
partitions_df['delta_year'] = year_to_calculate - partitions_df['partitions']
partitions_df['confirm'] = partitions_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS),
axis=1)
partitions_df['contrib'] = (partitions_df['floor_area_ag'] * partitions_df['Floor_int'] +
partitions_df['floor_area_ag'] * partitions_df['Wall_int_sup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO +
partitions_df['footprint'] * partitions_df['Wall_int_nosup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO) * \
partitions_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS
## contributions due to basement_df
basement_df['delta_year'] = year_to_calculate - basement_df['basement']
basement_df['confirm'] = basement_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)
basement_df['contrib'] = ((basement_df['footprint'] * basement_df['Floor_g'] +
basement_df['Wall_ext_bg'] * basement_df['area_walls_ext_bg'])
* basement_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS)
## contributions due to HVAC_df
HVAC_df['delta_year'] = year_to_calculate - HVAC_df['HVAC']
HVAC_df['confirm'] = HVAC_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_TECHNICAL_SYSTEMS), axis=1)
HVAC_df['contrib'] = ((HVAC_df['floor_area_ag'] + HVAC_df['footprint']) * HVAC_df['Services']) * HVAC_df[
'confirm'] / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS
# the total embodied energy/emissions are calculated as a sum of the contributions from construction and retrofits
built_df[total_column] = (HVAC_df['contrib'] + basement_df['contrib'] + partitions_df['contrib']
+ built_df['contrib'] + roof_df['contrib'] + envelope_df['contrib']
+ windows_df['contrib']) / 1000
built_df[specific_column] = built_df[total_column] * 1000 / built_df['total_area']
# the total and specific embodied energy/emissions are returned
result = built_df[['Name', total_column, specific_column, 'total_area']]
return result
def calc_if_existing(x, y):
"""
Function to verify if one value is greater than or equal to another (then return 1) or not (return 0). This is used
to verify whether a building's construction or retrofits happened more than 60 years before the year to calculate.
Since the embodied energy and emissions are calculated over 60 years, if the year of calculation is more than 60
years after construction, the results will be 0.
:param x: Number of years since construction/retrofit
:type x: long
:param y: Number of years over which the embodied energy/emissions calculation is carried out (i.e., 60)
:type y: int
:return value: 1 if x <= y; 0 otherwise
:rtype value: int
"""
if x <= y:
return 1
else:
return 0
def calc_code(code1, code2, code3, code4):
return str(code1) + str(code2) + str(code3) + str(code4)
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
print('Running embodied-energy with scenario = %s' % config.scenario)
print('Running embodied-energy with year-to-calculate = %s' % config.emissions.year_to_calculate)
lca_embodied(locator=locator, year_to_calculate=config.emissions.year_to_calculate, config=config)
if __name__ == '__main__':
main(cea.config.Configuration())
| 55.038462
| 136
| 0.70615
|
21646d61871d21cac8f865dfa95d1409472ce8a9
| 8,923
|
py
|
Python
|
rllib/agents/dreamer/dreamer.py
|
Crissman/ray
|
2092b097eab41b118a117fdfadd0fe664db41f63
|
[
"Apache-2.0"
] | 3
|
2021-06-22T19:57:41.000Z
|
2021-06-23T07:16:44.000Z
|
rllib/agents/dreamer/dreamer.py
|
h453693821/ray
|
9eb79727aa6ad94b01f8b660b83e1182555a89f6
|
[
"Apache-2.0"
] | 84
|
2021-03-06T08:02:56.000Z
|
2022-03-05T08:07:19.000Z
|
rllib/agents/dreamer/dreamer.py
|
h453693821/ray
|
9eb79727aa6ad94b01f8b660b83e1182555a89f6
|
[
"Apache-2.0"
] | 2
|
2021-05-05T21:05:16.000Z
|
2021-06-22T21:16:03.000Z
|
import logging
import random
import numpy as np
from ray.rllib.agents import with_common_config
from ray.rllib.agents.dreamer.dreamer_torch_policy import DreamerTorchPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
LEARNER_INFO, _get_shared_metrics
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.agents.dreamer.dreamer_model import DreamerModel
from ray.rllib.execution.rollout_ops import ParallelRollouts
from ray.rllib.utils.typing import SampleBatchType
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# PlaNET Model LR
"td_model_lr": 6e-4,
# Actor LR
"actor_lr": 8e-5,
# Critic LR
"critic_lr": 8e-5,
# Grad Clipping
"grad_clip": 100.0,
# Discount
"discount": 0.99,
# Lambda
"lambda": 0.95,
# Clipping is done inherently via policy tanh.
"clip_actions": False,
# Training iterations per data collection from real env
"dreamer_train_iters": 100,
# Horizon for Enviornment (1000 for Mujoco/DMC)
"horizon": 1000,
# Number of episodes to sample for Loss Calculation
"batch_size": 50,
# Length of each episode to sample for Loss Calculation
"batch_length": 50,
# Imagination Horizon for Training Actor and Critic
"imagine_horizon": 15,
# Free Nats
"free_nats": 3.0,
# KL Coeff for the Model Loss
"kl_coeff": 1.0,
# Distributed Dreamer not implemented yet
"num_workers": 0,
# Prefill Timesteps
"prefill_timesteps": 5000,
# This should be kept at 1 to preserve sample efficiency
"num_envs_per_worker": 1,
# Exploration Gaussian
"explore_noise": 0.3,
# Batch mode
"batch_mode": "complete_episodes",
# Custom Model
"dreamer_model": {
"custom_model": DreamerModel,
# RSSM/PlaNET parameters
"deter_size": 200,
"stoch_size": 30,
# CNN Decoder Encoder
"depth_size": 32,
# General Network Parameters
"hidden_size": 400,
# Action STD
"action_init_std": 5.0,
},
"env_config": {
# Repeats action send by policy for frame_skip times in env
"frame_skip": 2,
}
})
# __sphinx_doc_end__
# yapf: enable
class EpisodicBuffer(object):
def __init__(self, max_length: int = 1000, length: int = 50):
"""Data structure that stores episodes and samples chunks
of size length from episodes
Args:
max_length: Maximum episodes it can store
length: Episode chunking lengh in sample()
"""
# Stores all episodes into a list: List[SampleBatchType]
self.episodes = []
self.max_length = max_length
self.timesteps = 0
self.length = length
def add(self, batch: SampleBatchType):
"""Splits a SampleBatch into episodes and adds episodes
to the episode buffer
Args:
batch: SampleBatch to be added
"""
self.timesteps += batch.count
episodes = batch.split_by_episode()
for i, e in enumerate(episodes):
episodes[i] = self.preprocess_episode(e)
self.episodes.extend(episodes)
if len(self.episodes) > self.max_length:
delta = len(self.episodes) - self.max_length
# Drop oldest episodes
self.episodes = self.episodes[delta:]
def preprocess_episode(self, episode: SampleBatchType):
"""Batch format should be in the form of (s_t, a_(t-1), r_(t-1))
When t=0, the resetted obs is paired with action and reward of 0.
Args:
episode: SampleBatch representing an episode
"""
obs = episode["obs"]
new_obs = episode["new_obs"]
action = episode["actions"]
reward = episode["rewards"]
act_shape = action.shape
act_reset = np.array([0.0] * act_shape[-1])[None]
rew_reset = np.array(0.0)[None]
obs_end = np.array(new_obs[act_shape[0] - 1])[None]
batch_obs = np.concatenate([obs, obs_end], axis=0)
batch_action = np.concatenate([act_reset, action], axis=0)
batch_rew = np.concatenate([rew_reset, reward], axis=0)
new_batch = {
"obs": batch_obs,
"rewards": batch_rew,
"actions": batch_action
}
return SampleBatch(new_batch)
def sample(self, batch_size: int):
"""Samples [batch_size, length] from the list of episodes
Args:
batch_size: batch_size to be sampled
"""
episodes_buffer = []
while len(episodes_buffer) < batch_size:
rand_index = random.randint(0, len(self.episodes) - 1)
episode = self.episodes[rand_index]
if episode.count < self.length:
continue
available = episode.count - self.length
index = int(random.randint(0, available))
episodes_buffer.append(episode.slice(index, index + self.length))
batch = {}
for k in episodes_buffer[0].keys():
batch[k] = np.stack([e[k] for e in episodes_buffer], axis=0)
return SampleBatch(batch)
def total_sampled_timesteps(worker):
return worker.policy_map[DEFAULT_POLICY_ID].global_timestep
class DreamerIteration:
def __init__(self, worker, episode_buffer, dreamer_train_iters, batch_size,
act_repeat):
self.worker = worker
self.episode_buffer = episode_buffer
self.dreamer_train_iters = dreamer_train_iters
self.repeat = act_repeat
self.batch_size = batch_size
def __call__(self, samples):
# Dreamer Training Loop
for n in range(self.dreamer_train_iters):
print(n)
batch = self.episode_buffer.sample(self.batch_size)
if n == self.dreamer_train_iters - 1:
batch["log_gif"] = True
fetches = self.worker.learn_on_batch(batch)
# Custom Logging
policy_fetches = self.policy_stats(fetches)
if "log_gif" in policy_fetches:
gif = policy_fetches["log_gif"]
policy_fetches["log_gif"] = self.postprocess_gif(gif)
# Metrics Calculation
metrics = _get_shared_metrics()
metrics.info[LEARNER_INFO] = fetches
metrics.counters[STEPS_SAMPLED_COUNTER] = self.episode_buffer.timesteps
metrics.counters[STEPS_SAMPLED_COUNTER] *= self.repeat
res = collect_metrics(local_worker=self.worker)
res["info"] = metrics.info
res["info"].update(metrics.counters)
res["timesteps_total"] = metrics.counters[STEPS_SAMPLED_COUNTER]
self.episode_buffer.add(samples)
return res
def postprocess_gif(self, gif: np.ndarray):
gif = np.clip(255 * gif, 0, 255).astype(np.uint8)
B, T, C, H, W = gif.shape
frames = gif.transpose((1, 2, 3, 0, 4)).reshape((1, T, C, H, B * W))
return frames
def policy_stats(self, fetches):
return fetches[DEFAULT_POLICY_ID]["learner_stats"]
def execution_plan(workers, config):
# Special Replay Buffer for Dreamer agent
episode_buffer = EpisodicBuffer(length=config["batch_length"])
local_worker = workers.local_worker()
# Prefill episode buffer with initial exploration (uniform sampling)
while total_sampled_timesteps(local_worker) < config["prefill_timesteps"]:
samples = local_worker.sample()
episode_buffer.add(samples)
batch_size = config["batch_size"]
dreamer_train_iters = config["dreamer_train_iters"]
act_repeat = config["action_repeat"]
rollouts = ParallelRollouts(workers)
rollouts = rollouts.for_each(
DreamerIteration(local_worker, episode_buffer, dreamer_train_iters,
batch_size, act_repeat))
return rollouts
def get_policy_class(config):
return DreamerTorchPolicy
def validate_config(config):
config["action_repeat"] = config["env_config"]["frame_skip"]
if config["framework"] != "torch":
raise ValueError("Dreamer not supported in Tensorflow yet!")
if config["batch_mode"] != "complete_episodes":
raise ValueError("truncate_episodes not supported")
if config["num_workers"] != 0:
raise ValueError("Distributed Dreamer not supported yet!")
if config["clip_actions"]:
raise ValueError("Clipping is done inherently via policy tanh!")
if config["action_repeat"] > 1:
config["horizon"] = config["horizon"] / config["action_repeat"]
DREAMERTrainer = build_trainer(
name="Dreamer",
default_config=DEFAULT_CONFIG,
default_policy=DreamerTorchPolicy,
get_policy_class=get_policy_class,
execution_plan=execution_plan,
validate_config=validate_config)
| 33.171004
| 79
| 0.654376
|
79911fd7c68ebfd4fb1adf3557b1f4c3eb36d7f2
| 8,061
|
py
|
Python
|
analysis/models/nodes/filters/damage_node.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
analysis/models/nodes/filters/damage_node.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
analysis/models/nodes/filters/damage_node.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
import itertools
import operator
from functools import reduce
from typing import Optional
from django.db import models
from django.db.models.query_utils import Q
from analysis.models.nodes.analysis_node import AnalysisNode
from annotation.models.damage_enums import PathogenicityImpact
from annotation.models.models import VariantAnnotation
class DamageNode(AnalysisNode):
impact_min = models.CharField(max_length=1, choices=PathogenicityImpact.CHOICES, null=True, blank=True)
impact_required = models.BooleanField(default=False)
splice_min = models.FloatField(null=True, blank=True)
splice_required = models.BooleanField(default=False)
splice_allow_null = models.BooleanField(default=True)
cadd_score_min = models.IntegerField(null=True, blank=True)
cadd_score_required = models.BooleanField(default=False)
cadd_score_allow_null = models.BooleanField(default=True)
revel_score_min = models.FloatField(null=True, blank=True)
revel_score_required = models.BooleanField(default=False)
revel_score_allow_null = models.BooleanField(default=True)
cosmic_count_min = models.IntegerField(null=True, blank=True)
cosmic_count_required = models.BooleanField(default=False)
damage_predictions_min = models.IntegerField(null=True, blank=True)
damage_predictions_required = models.BooleanField(default=False)
damage_predictions_allow_null = models.BooleanField(default=True)
protein_domain = models.BooleanField(default=False)
protein_domain_required = models.BooleanField(default=False)
published = models.BooleanField(default=False)
published_required = models.BooleanField(default=False)
# TODO: Remove these 2
always_keep_splice_variants_regardless_of_impact = models.BooleanField(default=True)
allow_null = models.BooleanField(default=False)
def modifies_parents(self):
return any([self.impact_min, self.splice_min, self.cadd_score_min, self.revel_score_min,
self.cosmic_count_min, self.damage_predictions_min, self.protein_domain, self.published])
def has_required(self):
return any([self.impact_required, self.splice_required, self.cadd_score_required, self.revel_score_required,
self.cosmic_count_required, self.damage_predictions_required,
self.protein_domain_required, self.published_required])
def _get_node_q(self) -> Optional[Q]:
or_filters = []
and_filters = []
if self.impact_min is not None:
q_impact = PathogenicityImpact.get_q(self.impact_min)
if self.impact_required:
and_filters.append(q_impact)
else:
or_filters.append(q_impact)
if self.splice_min is not None:
# [consequence contains 'splice' OR not null splice region] AND [variant class not SNV]
q_splice_indels = Q(variantannotation__consequence__contains='splice') | Q(variantannotation__splice_region__isnull=False)
q_splice_indels &= ~Q(variantannotation__variant_class='SN')
splicing_q_list = [
q_splice_indels,
Q(variantannotation__dbscsnv_ada_score__gte=self.splice_min),
Q(variantannotation__dbscsnv_rf_score__gte=self.splice_min),
]
if self.splice_required and self.splice_allow_null:
splicing_q_list.extend([
Q(variantannotation__dbscsnv_ada_score__isnull=True),
Q(variantannotation__dbscsnv_rf_score__isnull=True),
])
for _, (ds, _) in VariantAnnotation.SPLICEAI_DS_DP.items():
q_spliceai = Q(**{f"variantannotation__{ds}__gte": self.splice_min})
splicing_q_list.append(q_spliceai)
if self.splice_required and self.splice_allow_null:
q_spliceai_null = Q(**{f"variantannotation__{ds}__isnull": True})
splicing_q_list.append(q_spliceai_null)
q_splicing = reduce(operator.or_, splicing_q_list)
if self.splice_required:
and_filters.append(q_splicing)
else:
or_filters.append(q_splicing)
if self.cadd_score_min is not None:
q_cadd = Q(variantannotation__cadd_phred__gte=self.cadd_score_min)
if self.cadd_score_required:
if self.cadd_score_allow_null:
q_cadd |= Q(variantannotation__cadd_phred__isnull=True)
and_filters.append(q_cadd)
else:
or_filters.append(q_cadd)
if self.revel_score_min:
q_revel = Q(variantannotation__revel_score__gte=self.revel_score_min)
if self.revel_score_required:
if self.revel_score_allow_null:
q_revel |= Q(variantannotation__revel_score__isnull=True)
and_filters.append(q_revel)
else:
or_filters.append(q_revel)
if self.cosmic_count_min is not None:
q_cosmic_count = Q(variantannotation__cosmic_count__gte=self.cosmic_count_min)
if self.cosmic_count_required:
and_filters.append(q_cosmic_count)
else:
or_filters.append(q_cosmic_count)
if self.damage_predictions_min is not None:
q_damage = Q(variantannotation__predictions_num_pathogenic__gte=self.damage_predictions_min)
if self.damage_predictions_required:
if self.damage_predictions_allow_null:
max_benign = len(VariantAnnotation.PATHOGENICITY_FIELDS) - self.damage_predictions_min
q_damage = Q(variantannotation__predictions_num_benign__lte=max_benign)
and_filters.append(q_damage)
else:
or_filters.append(q_damage)
if self.protein_domain:
protein_domains_fields_or = []
for f in ["interpro_domain", "domains"]: # TODO: What about UniProt??
q_domain_field = Q(**{f"variantannotation__{f}__isnull": False})
protein_domains_fields_or.append(q_domain_field)
q_protein_domain = reduce(operator.or_, protein_domains_fields_or)
if self.protein_domain_required:
and_filters.append(q_protein_domain)
else:
or_filters.append(q_protein_domain)
if self.published:
published_fields_or = []
for f in itertools.chain(["pubmed"], VariantAnnotation.MASTERMIND_FIELDS.keys()):
q_published_field = Q(**{f"variantannotation__{f}__isnull": False})
published_fields_or.append(q_published_field)
q_published = reduce(operator.or_, published_fields_or)
if self.published_required:
and_filters.append(q_published)
else:
or_filters.append(q_published)
if or_filters:
q_or = reduce(operator.or_, or_filters)
and_filters.append(q_or)
if and_filters:
q = reduce(operator.and_, and_filters)
else:
q = None
return q
def _get_method_summary(self):
if self.modifies_parents():
method_summary = self.get_node_name()
else:
method_summary = 'No filters applied.'
return method_summary
def get_node_name(self):
name = ''
if self.modifies_parents():
if self.damage_predictions_min:
name = f"{self.damage_predictions_min} of {len(VariantAnnotation.PATHOGENICITY_FIELDS)}"
return name
@staticmethod
def get_help_text() -> str:
return "Impact, damage predictions, conservation and splicing filter"
def get_css_classes(self):
css_classes = super().get_css_classes()
if self.splice_min is not None:
css_classes.append("EffectNodeSplicing")
return css_classes
@staticmethod
def get_node_class_label():
return "EffectNode"
| 42.204188
| 134
| 0.66952
|
1b8be3b8c47f6ca3d7b589a4f8bf29550687b7bd
| 55,981
|
py
|
Python
|
towhee/models/collaborative_experts/collaborative_experts.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
towhee/models/collaborative_experts/collaborative_experts.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
towhee/models/collaborative_experts/collaborative_experts.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
# Built on top of the original implementation at https://github.com/albanie/collaborative-experts
#
# Modifications by Copyright 2022 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Dict
from towhee.models.collaborative_experts.util import expert_tensor_storage
from towhee.models.collaborative_experts.net_vlad import NetVLAD
from torch.autograd import Variable
from torch import nn
import torch
import torch.nn.functional as F
import numpy as np
import itertools
class Mish(nn.Module):
"""
Applies the mish function element-wise:
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))
SRC: https://github.com/digantamisra98/Mish/blob/master/Mish/Torch/mish.py
"""
def forward(self, input_):
"""
Forward pass of the function.
"""
return input_ * torch.tanh(F.softplus(input_))
def kronecker_prod(t1, t2):
# kronecker is performed along the last dim
kron = torch.bmm(t1.view(-1, t1.size(-1), 1), t2.contiguous().view(-1, 1, t2.size(-1)))
return kron.view(t1.shape[0], t1.shape[1], -1)
def drop_nans(x, ind, validate_missing):
"""Remove nans, which we expect to find at missing indices.
Args:
x (torch.Tensor): features
ind (torch.Tensor): binary values denoting whether or not a given feature is
present
validate_missing (bool): whether to validate that the missing location contains
a nan.
Returns:
(torch.tensor): the features, with the missing values masked to zero.
"""
missing = torch.nonzero(ind == 0).flatten()
if missing.numel():
if validate_missing:
vals = x[missing[0]]
assert vals.view(-1)[0], "expected nans at missing locations"
x_ = x
x_[missing] = 0
x = x_
return x
class CENet(nn.Module):
"""
Collaborative Experts Module.
"""
def __init__(
self,
task,
use_ce,
text_dim,
l2renorm,
expert_dims,
vlad_clusters,
ghost_clusters,
disable_nan_checks,
keep_missing_modalities,
test_caption_mode,
randomise_feats,
feat_aggregation,
ce_shared_dim,
trn_config,
trn_cat,
include_self,
use_mish,
use_bn_reason,
num_h_layers,
num_g_layers,
kron_dets=False,
freeze_weights=False,
geometric_mlp=False,
rand_proj=False,
mimic_ce_dims=False,
coord_dets=False,
concat_experts=False,
spatial_feats=False,
concat_mix_experts=False,
verbose=False,
num_classes=None):
super().__init__()
self.l2renorm = l2renorm
self.task = task
self.geometric_mlp = geometric_mlp
self.feat_aggregation = feat_aggregation
self.expert_dims = expert_dims
self.num_h_layers = num_h_layers
self.num_g_layers = num_g_layers
self.use_mish = use_mish
self.use_bn_resaon = use_bn_reason
self.include_self = include_self
self.kron_dets = kron_dets
self.rand_proj = rand_proj
self.coord_dets = coord_dets
self.disable_nan_checks = disable_nan_checks
self.trn_config = trn_config
self.trn_cat = trn_cat
if randomise_feats:
self.random_feats = set(x for x in randomise_feats.split(","))
else:
self.random_feats = set()
# sanity checks on the features that may be vladded
pre_vlad_feat_sizes = {"ocr": 300, "audio": 128, "speech": 300}
pre_vlad_feat_sizes = {key: val for key, val in pre_vlad_feat_sizes.items()
if feat_aggregation[key]["temporal"] == "vlad"}
# we basically disable safety checks for detection-sem
if spatial_feats:
spatial_feat_dim = 16
else:
spatial_feat_dim = 5
if self.geometric_mlp:
self.geometric_mlp_model = SpatialMLP(spatial_feat_dim)
if kron_dets:
sem_det_dim = 300 * spatial_feat_dim
elif coord_dets:
sem_det_dim = spatial_feat_dim
elif rand_proj:
sem_det_dim = 300 + 300
self.proj = nn.Linear(spatial_feat_dim, 300)
else:
sem_det_dim = 300 + spatial_feat_dim
self.spatial_feat_dim = spatial_feat_dim
pre_vlad_feat_sizes["detection-sem"] = sem_det_dim
if "detection-sem" in expert_dims:
new_in_dim = sem_det_dim * vlad_clusters["detection-sem"]
expert_dims["detection-sem"] = (new_in_dim, expert_dims["detection-sem"][1])
vlad_feat_sizes = dict(vlad_clusters.items())
self.pooling = nn.ModuleDict()
for mod, expected in pre_vlad_feat_sizes.items():
if mod in expert_dims.keys():
feature_size = expert_dims[mod][0] // vlad_clusters[mod]
msg = f"expected {expected} for {mod} features atm"
assert feature_size == expected, msg
self.pooling[mod] = NetVLAD(
feature_size=feature_size,
cluster_size=vlad_clusters[mod],
)
if "retrieval" in self.task:
if vlad_clusters["text"] == 0:
self.text_pooling = nn.Sequential()
else:
self.text_pooling = NetVLAD(
feature_size=text_dim,
cluster_size=vlad_clusters["text"],
ghost_clusters=ghost_clusters["text"],
)
text_dim = self.text_pooling.out_dim
else:
self.num_classes = num_classes
text_dim = None
self.tensor_storage = expert_tensor_storage(
experts=self.expert_dims.keys(),
feat_aggregation=self.feat_aggregation,
)
self.ce = CEModule(
use_ce=use_ce,
task=self.task,
verbose=verbose,
l2renorm=l2renorm,
trn_cat=self.trn_cat,
trn_config=self.trn_config,
random_feats=self.random_feats,
freeze_weights=freeze_weights,
text_dim=text_dim,
test_caption_mode=test_caption_mode,
concat_experts=concat_experts,
concat_mix_experts=concat_mix_experts,
expert_dims=expert_dims,
vlad_feat_sizes=vlad_feat_sizes,
disable_nan_checks=disable_nan_checks,
keep_missing_modalities=keep_missing_modalities,
mimic_ce_dims=mimic_ce_dims,
include_self=include_self,
use_mish=use_mish,
use_bn_reason=use_bn_reason,
num_h_layers=num_h_layers,
num_g_layers=num_g_layers,
num_classes=num_classes,
same_dim=ce_shared_dim,
)
def randomise_feats(self, experts, key):
if key in self.random_feats:
# keep expected nans
nan_mask = torch.isnan(experts[key])
experts[key] = torch.randn_like(experts[key])
if not self.disable_nan_checks:
nans = torch.tensor(float("nan")) # pylint: disable=not-callable
experts[key][nan_mask] = nans.to(experts[key].device)
return experts
def forward(self, experts, ind, text=None, raw_captions=None, text_token_mask=None):
aggregated_experts = OrderedDict()
if "detection-sem" in self.expert_dims:
det_sem = experts["detection-sem"]
box_feats = det_sem[:, :, :self.spatial_feat_dim]
sem_feats = det_sem[:, :, self.spatial_feat_dim:]
if self.geometric_mlp:
x = box_feats.view(-1, box_feats.shape[-1])
x = self.geometric_mlp_model(x)
box_feats = x.view(box_feats.shape)
if self.kron_dets:
feats = kronecker_prod(box_feats, sem_feats)
elif self.coord_dets:
feats = box_feats.contiguous()
elif self.rand_proj:
feats = box_feats.contiguous()
projected = self.proj(feats)
feats = torch.cat((projected, sem_feats.contiguous()), dim=2)
else:
feats = torch.cat((box_feats, sem_feats.contiguous()), dim=2)
experts["detection-sem"] = feats
# Handle all nan-checks
for mod in self.expert_dims:
experts = self.randomise_feats(experts, mod)
experts[mod] = drop_nans(x=experts[mod], ind=ind[mod], validate_missing=True)
if mod in self.tensor_storage["fixed"]:
aggregated_experts[mod] = experts[mod]
elif mod in self.tensor_storage["variable"]:
aggregated_experts[mod] = self.pooling[mod](experts[mod])
if "retrieval" in self.task:
bb, captions_per_video, max_words, text_feat_dim = text.size()
text = text.view(bb * captions_per_video, max_words, text_feat_dim)
if isinstance(self.text_pooling, NetVLAD):
kwargs = {"mask": text_token_mask}
else:
kwargs = {}
text = self.text_pooling(text, **kwargs)
text = text.view(bb, captions_per_video, -1)
else:
text = None
return self.ce(text, aggregated_experts, ind, raw_captions)
class TemporalAttention(torch.nn.Module):
"""
TemporalAttention Module
"""
def __init__(self, img_feature_dim, num_attention):
super().__init__()
self.weight = Variable(
torch.randn(img_feature_dim, num_attention),
requires_grad=True).cuda() # d*seg
self.img_feature_dim = img_feature_dim
self.num_attention = num_attention
def forward(self, input_):
record = []
input_avg = torch.mean(input_.clone(), dim=1)
input_max = torch.max(input_.clone(), dim=1)
record.append(input_avg)
record.append(input_max[0])
output = torch.matmul(input_, self.weight)
attentions = F.softmax(output, dim=1)
for idx in range(attentions.shape[-1]):
temp = attentions[:, :, idx]
temp_output = torch.sum(temp.unsqueeze(2) * input_, dim=1)
norm = temp_output.norm(p=2, dim=-1, keepdim=True)
temp_output = temp_output.div(norm)
record.append(temp_output)
act_all = torch.cat((record), 1)
return act_all
class RelationModuleMultiScale(torch.nn.Module):
"""
RelationModuleMultiScale Module
"""
# Temporal Relation module in multiply scale, suming over
# [2-frame relation, 3-frame relation, ..., n-frame relation]
def __init__(self, img_feature_dim, num_frames, num_class):
super().__init__()
self.subsample_num = 3 # how many relations selected to sum up
self.img_feature_dim = img_feature_dim
# generate the multiple frame relations
self.scales = list(range(num_frames, 1, -1))
self.relations_scales = []
self.subsample_scales = []
for scale in self.scales:
relations_scale = self.return_relationset(num_frames, scale)
self.relations_scales.append(relations_scale)
# how many samples of relation to select in each forward pass
self.subsample_scales.append(min(self.subsample_num, len(relations_scale)))
self.num_class = num_class
self.num_frames = num_frames
num_bottleneck = 256
self.fc_fusion_scales = nn.ModuleList() # high-tech modulelist
for i in range(len(self.scales)):
scale = self.scales[i]
fc_fusion = nn.Sequential(
nn.ReLU(),
nn.Linear(scale * self.img_feature_dim, num_bottleneck),
nn.ReLU(),
nn.Linear(num_bottleneck, self.num_class),
)
self.fc_fusion_scales += [fc_fusion]
def forward(self, input_):
# the first one is the largest scale
act_all = input_[:, self.relations_scales[0][0], :]
act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
act_all = self.fc_fusion_scales[0](act_all)
for scale_id in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(
len(self.relations_scales[scale_id]),
self.subsample_scales[scale_id],
replace=False,
)
for idx in idx_relations_randomsample:
act_relation = input_[:, self.relations_scales[scale_id][idx], :]
act_relation = act_relation.view(act_relation.size(0), self.scales[scale_id] * self.img_feature_dim)
act_relation = self.fc_fusion_scales[scale_id](act_relation)
act_all += act_relation
return act_all
def return_relationset(self, num_frames, num_frames_relation):
return list(itertools.combinations(list(range(num_frames)), num_frames_relation))
class RelationModuleMultiScale_Cat(torch.nn.Module): # pylint: disable=invalid-name
"""
RelationModuleMultiScale_Cat Module
"""
# Temporal Relation module in multiply scale, suming over [2-frame relation, 3-frame relation, ..., n-frame relation]
def __init__(self, img_feature_dim, num_frames, num_class):
super().__init__()
self.subsample_num = 3 # how many relations selected to sum up
self.img_feature_dim = img_feature_dim
self.scales = list(range(num_frames, 1, -1)) # generate the multiple frame relations
self.relations_scales = []
self.subsample_scales = []
for scale in self.scales:
relations_scale = self.return_relationset(num_frames, scale)
self.relations_scales.append(relations_scale)
self.subsample_scales.append(min(self.subsample_num,
len(relations_scale))) # how many samples of relation to select in each forward pass
self.num_class = num_class
self.num_frames = num_frames
num_bottleneck = 256
self.fc_fusion_scales = nn.ModuleList() # high-tech modulelist
for i in range(len(self.scales)):
scale = self.scales[i]
fc_fusion = nn.Sequential(
nn.ReLU(),
nn.Linear(scale * self.img_feature_dim, num_bottleneck),
nn.ReLU(),
nn.Linear(num_bottleneck, self.num_class),
)
self.fc_fusion_scales += [fc_fusion]
def forward(self, input_):
record = []
# the first one is the largest scale
act_all = input_[:, self.relations_scales[0][0], :]
act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
act_all = self.fc_fusion_scales[0](act_all)
norm = act_all.norm(p=2, dim=-1, keepdim=True)
act_all = act_all.div(norm)
record.append(act_all)
for scale_id in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(len(self.relations_scales[scale_id]),
self.subsample_scales[scale_id], replace=False)
act_all = 0
for idx in idx_relations_randomsample:
act_relation = input_[:, self.relations_scales[scale_id][idx], :]
act_relation = act_relation.view(act_relation.size(0), self.scales[scale_id] * self.img_feature_dim)
act_relation = self.fc_fusion_scales[scale_id](act_relation)
act_all += act_relation
norm = act_all.norm(p=2, dim=-1, keepdim=True)
act_all = act_all.div(norm)
record.append(act_all)
act_all = torch.cat((record), 1)
return act_all
def return_relationset(self, num_frames, num_frames_relation):
return list(itertools.combinations(list(range(num_frames)), num_frames_relation))
class CEModule(nn.Module):
"""
CE Module
"""
def __init__(self, expert_dims, text_dim, use_ce, verbose, l2renorm, num_classes,
trn_config, trn_cat, use_mish, include_self, num_h_layers, num_g_layers,
disable_nan_checks, random_feats, test_caption_mode, mimic_ce_dims,
concat_experts, concat_mix_experts, freeze_weights, task,
keep_missing_modalities, vlad_feat_sizes, same_dim, use_bn_reason):
super().__init__()
modalities = list(expert_dims.keys())
self.expert_dims = expert_dims
self.modalities = modalities
self.disable_nan_checks = disable_nan_checks
self.mimic_ce_dims = mimic_ce_dims
self.concat_experts = concat_experts
self.same_dim = same_dim
self.use_mish = use_mish
self.use_bn_reason = use_bn_reason
self.num_h_layers = num_h_layers
self.num_g_layers = num_g_layers
self.include_self = include_self
self.num_classes = num_classes
self.task = task
self.vlad_feat_sizes = vlad_feat_sizes
self.concat_mix_experts = concat_mix_experts
self.test_caption_mode = test_caption_mode
self.reduce_dim = 64
self.moe_cg = ContextGating
self.freeze_weights = freeze_weights
self.random_feats = random_feats
self.use_ce = use_ce
self.verbose = verbose
self.keep_missing_modalities = keep_missing_modalities
self.l2renorm = l2renorm
self.trn_config = trn_config
self.trn_cat = trn_cat
if self.use_mish:
self.non_lin = Mish()
else:
self.non_lin = nn.ReLU()
if "retrieval" in self.task:
num_mods = len(expert_dims)
self.moe_fc = nn.Linear(text_dim, len(expert_dims))
self.moe_weights = torch.ones(1, num_mods) / num_mods
use_bns = [True for _ in self.modalities]
self.trn_list = nn.ModuleList()
self.repeat_temporal = {}
for mod in modalities:
self.repeat_temporal[mod] = 1
if self.trn_cat == 2:
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[
mod] # This is exatcly how many different attention
num_frames = 1 # mimic simple avg and max based on segments
# num_class = expert_dims[mod][0]
self.trn_list += [TemporalAttention(img_feature_dim, num_frames)]
self.repeat_temporal[mod] = num_frames + 2
elif self.trn_cat == 1:
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[mod] # hard code
num_class = expert_dims[mod][0]
self.trn_list += [
RelationModuleMultiScale_Cat(img_feature_dim, num_frames, num_class)
]
self.repeat_temporal[mod] = len(list(range(num_frames, 1, -1)))
elif self.trn_cat == 0:
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[mod] # hard code
num_class = expert_dims[mod][0]
self.trn_list += [
RelationModuleMultiScale(img_feature_dim, num_frames,
num_class)
]
else:
raise NotImplementedError()
in_dims = [expert_dims[mod][0] * self.repeat_temporal[mod] for mod in modalities]
agg_dims = [expert_dims[mod][1] * self.repeat_temporal[mod] for mod in modalities]
if self.use_ce or self.mimic_ce_dims:
dim_reducers = [ReduceDim(in_dim, same_dim) for in_dim in in_dims]
self.video_dim_reduce = nn.ModuleList(dim_reducers)
if self.use_ce:
# The g_reason module has a first layer that is specific to the design choice
# (e.g. triplet vs pairwise), then a shared component which is common to all
# designs.
if self.use_ce in {"pairwise", "pairwise-star", "triplet"}:
num_inputs = 3 if self.use_ce == "triplet" else 2
self.g_reason_1 = nn.Linear(same_dim * num_inputs, same_dim)
elif self.use_ce == "pairwise-star-specific":
num_inputs = 2
g_reason_unshared_weights = [G_reason(same_dim, num_inputs, self.non_lin)
for mod in modalities]
self.g_reason_unshared_weights = nn.ModuleList(g_reason_unshared_weights)
elif self.use_ce in {"pairwise-star-tensor"}:
reduce_dim = self.reduce_dim
self.dim_reduce = nn.Linear(same_dim, reduce_dim)
self.g_reason_1 = nn.Linear(self.reduce_dim * reduce_dim, same_dim)
else:
raise ValueError(f"unrecognised CE config: {self.use_ce}")
g_reason_shared = []
for _ in range(self.num_g_layers - 1):
if self.use_bn_reason:
g_reason_shared.append(nn.BatchNorm1d(same_dim))
g_reason_shared.append(self.non_lin)
g_reason_shared.append(nn.Linear(same_dim, same_dim))
self.g_reason_shared = nn.Sequential(*g_reason_shared)
h_reason = []
for _ in range(self.num_h_layers):
if self.use_bn_reason:
h_reason.append(nn.BatchNorm1d(same_dim))
h_reason.append(self.non_lin)
h_reason.append(nn.Linear(same_dim, same_dim))
self.h_reason = nn.Sequential(*h_reason)
gated_vid_embds = [GatedEmbeddingUnitReasoning(same_dim) for _ in in_dims]
text_out_dims = [same_dim for _ in agg_dims]
elif self.mimic_ce_dims: # ablation study
gated_vid_embds = [MimicCEGatedEmbeddingUnit(same_dim, same_dim, use_bn=True)
for _ in modalities]
text_out_dims = [same_dim for _ in agg_dims]
elif self.concat_mix_experts: # ablation study
# use a single large GEU to mix the experts - the output will be the sum
# of the aggregation sizes
in_dim, out_dim = sum(in_dims), sum(agg_dims)
gated_vid_embds = [GatedEmbeddingUnit(in_dim, out_dim, use_bn=True)]
elif self.concat_experts: # ablation study
# We do not use learnable parameters for the video combination, (we simply
# use a high dimensional inner product).
gated_vid_embds = []
else:
gated_vid_embds = [GatedEmbeddingUnit(in_dim, dim, use_bn) for
in_dim, dim, use_bn in zip(in_dims, agg_dims, use_bns)]
text_out_dims = agg_dims
self.video_GU = nn.ModuleList(gated_vid_embds) # pylint: disable=invalid-name
if "retrieval" in self.task:
if self.concat_experts:
gated_text_embds = [nn.Sequential()]
elif self.concat_mix_experts:
# As with the video inputs, we similiarly use a single large GEU for the
# text embedding
gated_text_embds = [GatedEmbeddingUnit(text_dim, sum(agg_dims),
use_bn=True)]
else:
gated_text_embds = [GatedEmbeddingUnit(text_dim, dim, use_bn=True) for
dim in text_out_dims]
self.text_GU = nn.ModuleList(gated_text_embds) # pylint: disable=invalid-name
else:
total_dim = 0
for mod in self.expert_dims.keys():
total_dim += self.expert_dims[mod][1] * self.repeat_temporal[mod]
self.classifier = nn.Linear(total_dim, self.num_classes)
def compute_moe_weights(self, text, ind):
_ = ind
# compute weights for all captions (including when assigned K captions to
# the same video)
bb, kk, dd = text.shape
mm = len(self.modalities)
msg = f"expected between 1 and 10 modalities, found {mm} ({self.modalities})"
assert 1 <= mm <= 10, msg
# Treat each caption independently in the softmax (which runs over modalities)
text = text.view(bb * kk, dd)
if self.freeze_weights:
moe_weights = self.moe_weights.repeat(bb, kk, 1)
if text.is_cuda:
moe_weights = moe_weights.cuda()
else:
# if False:
# print("USING BIGGER WEIGHT PREDS")
# moe_weights = self.moe_fc_bottleneck1(text)
# moe_weights = self.moe_cg(moe_weights)
# moe_weights = self.moe_fc_proj(moe_weights)
# moe_weights = moe_weights * 1
# else:
moe_weights = self.moe_fc(text) # BK x D -> BK x M
moe_weights = F.softmax(moe_weights, dim=1)
moe_weights = moe_weights.view(bb, kk, mm)
if self.verbose:
print("--------------------------------")
for idx, key in enumerate(self.modalities):
msg = "{}: mean: {:.3f}, std: {:.3f}, min: {:.3f}, max: {:.3f}"
msg = msg.format(
key,
moe_weights[:, :, idx].mean().item(),
moe_weights[:, :, idx].std().item(),
moe_weights[:, :, idx].min().item(),
moe_weights[:, :, idx].max().item(),
)
print(msg)
return moe_weights
def forward(self, text, experts, ind, raw_captions):
"""Compute joint embeddings and, if requested, a confusion matrix between
video and text representations in the minibatch.
Notation: B = batch size, M = number of modalities
"""
if "retrieval" in self.task:
# Pass text embeddings through gated units
text_embd = {}
# Unroll repeated captions into present minibatch
bb, captions_per_video, feat_dim = text.size()
text = text.view(bb * captions_per_video, feat_dim)
for modality, layer in zip(self.modalities, self.text_GU):
# NOTE: Due to the batch norm, the gated units are sensitive to passing
# in a lot of zeroes, so we do the masking step after the forwards pass
text_ = layer(text)
# We always assume that text is available for retrieval
text_ = text_.view(bb, captions_per_video, -1)
if "text" in self.random_feats:
text_ = torch.rand_like(text_)
text_embd[modality] = text_
text = text.view(bb, captions_per_video, -1)
# vladded nans are handled earlier (during pooling)
# We also avoid zeroing random features, since this will leak information
# exclude = list(self.vlad_feat_sizes.keys()) + list(self.random_feats)
# experts = self.mask_missing_embeddings(experts, ind, exclude=exclude)
# MOE weights computation + normalization - note that we use the first caption
# sample to predict the weights
moe_weights = self.compute_moe_weights(text, ind=ind)
if self.l2renorm:
for modality in self.modalities:
norm = experts[modality].norm(p=2, dim=-1, keepdim=True)
experts[modality] = experts[modality].div(norm)
for modality, layer in zip(self.modalities, self.trn_list):
experts[modality] = layer(experts[modality])
if hasattr(self, "video_dim_reduce"):
# Embed all features to a common dimension
for modality, layer in zip(self.modalities, self.video_dim_reduce):
experts[modality] = layer(experts[modality])
if self.use_ce:
dev = experts[self.modalities[0]].device
if self.include_self:
all_combinations = list(itertools.product(experts, repeat=2))
else:
all_combinations = list(itertools.permutations(experts, 2))
assert len(self.modalities) > 1, "use_ce requires multiple modalities"
if self.use_ce in {"pairwise-star", "pairwise-star-specific",
"pairwise-star-tensor"}:
sum_all = 0
sum_ind = 0
for mod0 in experts.keys():
sum_all += (experts[mod0] * ind[mod0].float().to(dev).unsqueeze(1))
sum_ind += ind[mod0].float().to(dev).unsqueeze(1)
avg_modality = sum_all / sum_ind
for ii, l in enumerate(self.video_GU):
mask_num = 0
curr_mask = 0
temp_dict = {}
avai_dict = {}
curr_modality = self.modalities[ii]
if self.use_ce == "pairwise-star":
fused = torch.cat((experts[curr_modality], avg_modality), 1) # -> B x 2D
temp = self.g_reason_1(fused) # B x 2D -> B x D
temp = self.g_reason_shared(temp) # B x D -> B x D
curr_mask = temp * ind[curr_modality].float().to(dev).unsqueeze(1)
elif self.use_ce == "pairwise-star-specific":
fused = torch.cat((experts[curr_modality], avg_modality), 1) # -> B x 2D
temp = self.g_reason_unshared_weights[ii](fused)
temp = self.g_reason_shared(temp) # B x D -> B x D
curr_mask = temp * ind[curr_modality].float().to(dev).unsqueeze(1)
elif self.use_ce == "pairwise-star-tensor":
mod0_reduce = self.dim_reduce(experts[curr_modality])
mod0_reduce = mod0_reduce.unsqueeze(2) # B x reduced_dim x1
mod1_reduce = self.dim_reduce(avg_modality)
mod1_reduce = mod1_reduce.unsqueeze(1) # B x1 x reduced_dim
flat_dim = self.reduce_dim * self.reduce_dim
fused = torch.matmul(mod0_reduce, mod1_reduce).view(-1, flat_dim)
temp = self.g_reason_1(fused) # B x 2D -> B x D
temp = self.g_reason_shared(temp) # B x D -> B x D
curr_mask = temp * ind[curr_modality].float().to(dev).unsqueeze(1)
elif self.use_ce in {"pairwise", "triplet"}:
for modality_pair in all_combinations:
mod0, mod1 = modality_pair
if self.use_ce == "pairwise":
if mod0 == curr_modality:
new_key = f"{mod0}_{mod1}"
fused = torch.cat((experts[mod0], experts[mod1]), 1)
temp = self.g_reason_1(fused) # B x 2D -> B x D
temp = self.g_reason_shared(temp)
temp_dict[new_key] = temp
avail = (ind[mod0].float() * ind[mod1].float())
avai_dict[new_key] = avail.to(dev)
elif self.use_ce == "triplet":
if (curr_modality not in {mod0, mod1}) or self.include_self:
new_key = f"{curr_modality}_{mod0}_{mod1}"
fused = torch.cat((experts[curr_modality], experts[mod0],
experts[mod1]), 1) # -> B x 2D
temp = self.g_reason_1(fused) # B x 2D -> B x D
temp = self.g_reason_shared(temp)
temp_dict[new_key] = temp
avail = (ind[curr_modality].float() * ind[mod0].float() *
ind[mod1].float()).to(dev)
avai_dict[new_key] = avail
# Combine the paired features into a mask through elementwise sum
for mm, value in temp_dict.items():
curr_mask += value * avai_dict[mm].unsqueeze(1)
mask_num += avai_dict[mm]
curr_mask = torch.div(curr_mask, (mask_num + 0.00000000001).unsqueeze(1))
else:
raise ValueError(f"Unknown CE mechanism: {self.use_ce}")
curr_mask = self.h_reason(curr_mask)
experts[curr_modality] = l(experts[curr_modality], curr_mask)
elif self.concat_mix_experts:
concatenated = torch.cat(tuple(experts.values()), dim=1)
vid_embd_ = self.video_GU[0](concatenated)
text_embd_ = text_embd[self.modalities[0]]
text_embd_ = text_embd_.view(-1, text_embd_.shape[-1])
elif self.concat_experts:
vid_embd_ = torch.cat(tuple(experts.values()), dim=1)
text_embd_ = text_embd[self.modalities[0]]
text_embd_ = text_embd_.view(-1, text_embd_.shape[-1])
else:
for modality, layer in zip(self.modalities, self.video_GU):
experts[modality] = layer(experts[modality])
if self.training:
merge_caption_similiarities = "avg"
else:
merge_caption_similiarities = self.test_caption_mode
if self.task == "classification":
# for modality, layer in zip(self.modalities, self.video_dim_reduce_later):
# attempt to perform affordable classifier, might be removed later
# experts[modality] = layer(experts[modality])
concatenated = torch.cat(tuple(experts.values()), dim=1)
preds = self.classifier(concatenated)
return {"modalities": self.modalities, "class_preds": preds}
elif self.concat_experts or self.concat_mix_experts:
# zero pad to accommodate mismatch in sizes (after first setting the number
# of VLAD clusters for the text to get the two vectors as close as possible
# in size)
if text_embd_.shape[1] > vid_embd_.shape[1]:
sz = (vid_embd_.shape[0], text_embd_.shape[1])
dtype, device = text_embd_.dtype, text_embd_.device
vid_embd_padded = torch.zeros(size=sz, dtype=dtype, device=device)
# try:
# vid_embd_padded[:, :vid_embd_.shape[1]] = vid_embd_
# except:
# import ipdb; ipdb.set_trace()
vid_embd_ = vid_embd_padded
else:
sz = (text_embd_.shape[0], vid_embd_.shape[1])
dtype, device = text_embd_.dtype, text_embd_.device
text_embd_padded = torch.zeros(size=sz, dtype=dtype, device=device)
text_embd_padded[:, :text_embd_.shape[1]] = text_embd_
text_embd_ = text_embd_padded
cross_view_conf_matrix = torch.matmul(text_embd_, vid_embd_.t())
elif self.task == "compute_video_embeddings":
return {"modalities": self.modalities, "embeddings": experts}
else:
cross_view_conf_matrix = sharded_cross_view_inner_product(
ind=ind,
vid_embds=experts,
text_embds=text_embd,
keep_missing_modalities=self.keep_missing_modalities,
l2renorm=self.l2renorm,
text_weights=moe_weights,
subspaces=self.modalities,
raw_captions=raw_captions,
merge_caption_similiarities=merge_caption_similiarities,
)
return {
"modalities": self.modalities,
"cross_view_conf_matrix": cross_view_conf_matrix,
"text_embds": text_embd,
"vid_embds": experts,
}
class GatedEmbeddingUnit(nn.Module):
"""
GatedEmbeddingUnit
"""
def __init__(self, input_dimension, output_dimension, use_bn):
super().__init__()
self.fc = nn.Linear(input_dimension, output_dimension)
self.cg = ContextGating(output_dimension, add_batch_norm=use_bn)
def forward(self, x):
x = self.fc(x)
x = self.cg(x)
x = F.normalize(x)
return x
class MimicCEGatedEmbeddingUnit(nn.Module):
def __init__(self, input_dimension, output_dimension, use_bn):
super().__init__()
_ = output_dimension
self.cg = ContextGating(input_dimension, add_batch_norm=use_bn)
def forward(self, x):
x = self.cg(x)
x = F.normalize(x)
return x
class ReduceDim(nn.Module):
"""
ReduceDim Module
"""
def __init__(self, input_dimension, output_dimension):
super().__init__()
self.fc = nn.Linear(input_dimension, output_dimension)
# self.fc = nn.Linear(input_dimension, 512)
# self.fc2 = nn.Linear(512, output_dimension)
def forward(self, x):
x = self.fc(x)
# x = self.fc2(F.relu(x))
x = F.normalize(x)
return x
class ContextGating(nn.Module):
"""
ContextGating Module
"""
def __init__(self, dimension, add_batch_norm=True):
super().__init__()
self.fc = nn.Linear(dimension, dimension)
self.add_batch_norm = add_batch_norm
self.batch_norm = nn.BatchNorm1d(dimension)
def forward(self, x):
x1 = self.fc(x)
if self.add_batch_norm:
x1 = self.batch_norm(x1)
x = torch.cat((x, x1), 1)
return F.glu(x, 1)
class GatedEmbeddingUnitReasoning(nn.Module):
def __init__(self, output_dimension):
super().__init__()
self.cg = ContextGatingReasoning(output_dimension)
def forward(self, x, mask):
x = self.cg(x, mask)
x = F.normalize(x)
return x
class SpatialMLP(nn.Module):
def __init__(self, dimension):
super().__init__()
self.cg1 = ContextGating(dimension)
self.cg2 = ContextGating(dimension)
def forward(self, x):
x = self.cg1(x)
return self.cg2(x)
class ContextGatingReasoning(nn.Module):
"""
ContextGatingReasoning
"""
def __init__(self, dimension, add_batch_norm=True):
super().__init__()
self.fc = nn.Linear(dimension, dimension)
self.add_batch_norm = add_batch_norm
self.batch_norm = nn.BatchNorm1d(dimension)
self.batch_norm2 = nn.BatchNorm1d(dimension)
def forward(self, x, x1):
x2 = self.fc(x)
if self.add_batch_norm:
x1 = self.batch_norm(x1)
x2 = self.batch_norm2(x2)
t = x1 + x2
x = torch.cat((x, t), 1)
return F.glu(x, 1)
class G_reason(nn.Module): # pylint: disable=invalid-name
"""
G_reason Module
"""
def __init__(self, same_dim, num_inputs, non_lin):
super().__init__()
self.g_reason_1_specific = nn.Linear(same_dim * num_inputs, same_dim)
self.g_reason_2_specific = nn.Linear(same_dim, same_dim)
self.non_lin = non_lin
def forward(self, x):
x = self.g_reason_1_specific(x) # B x 2D -> B x D
x = self.non_lin(x)
x = self.g_reason_2_specific(x)
return x
def sharded_cross_view_inner_product(vid_embds, text_embds, text_weights,
subspaces, l2renorm, ind,
keep_missing_modalities,
merge_caption_similiarities="avg", tol=1E-5,
raw_captions=None):
"""Compute a similarity matrix from sharded vectors.
Args:
embds1 (dict[str:torch.Tensor]): the set of sub-embeddings that, when
concatenated, form the whole. The ith shard has shape `B x K x F_i`
(i.e. they can differ in the last dimension).
embds2 (dict[str:torch.Tensor]): same format.
weights2 (torch.Tensor): weights for the shards in `embds2`.
l2norm (bool::True): whether to l2 renormalize the full embeddings.
Returns:
(torch.tensor): similarity matrix of size `BK x BK`.
NOTE: If multiple captions are provided, we can aggregate their similarities to
provide a single video-text similarity score.
"""
_ = raw_captions
bb = vid_embds[subspaces[0]].size(0)
tt, num_caps, _ = text_embds[subspaces[0]].size()
device = vid_embds[subspaces[0]].device
# unroll separate captions onto first dimension and treat them separately
sims = torch.zeros(tt * num_caps, bb, device=device)
text_weights = text_weights.view(tt * num_caps, -1)
if keep_missing_modalities:
# assign every expert/text inner product the same weight, even if the expert
# is missing
text_weight_tensor = torch.ones(tt * num_caps, bb, len(subspaces),
dtype=text_weights.dtype,
device=text_weights.device)
else:
# mark expert availabilities along the second axis
available = torch.ones(1, bb, len(subspaces), dtype=text_weights.dtype)
for ii, modality in enumerate(subspaces):
available[:, :, ii] = ind[modality]
available = available.to(text_weights.device)
msg = "expected `available` modality mask to only contain 0s or 1s"
assert set(torch.unique(available).cpu().numpy()).issubset(set([0, 1])), msg
# set the text weights along the first axis and combine with availabilities to
# produce a <T x B x num_experts> tensor
text_weight_tensor = text_weights.view(tt * num_caps, 1, len(subspaces)) * available
# normalise to account for missing experts
normalising_weights = text_weight_tensor.sum(2).view(tt * num_caps, bb, 1)
text_weight_tensor = torch.div(text_weight_tensor, normalising_weights)
if l2renorm:
raise NotImplementedError("Do not use renorm until availability fix is complete")
else:
l2_mass_text, l2_mass_vid = 1, 1
for idx, modality in enumerate(subspaces):
vid_embd_ = vid_embds[modality].reshape(bb, -1) / l2_mass_vid
text_embd_ = text_embds[modality].view(tt * num_caps, -1)
msg = "expected weights to be applied to text embeddings"
assert text_embd_.shape[0] == text_weights.shape[0], msg
text_embd_ = text_embd_ / l2_mass_text
weighting = text_weight_tensor[:, :, idx]
sims += weighting * torch.matmul(text_embd_, vid_embd_.t()) # (T x num_caps) x (B)
if l2renorm:
# if not (sims.max() < 1 + tol):
# import ipdb; ipdb.set_trace()
assert sims.max() < 1 + tol, "expected cosine similarities to be < 1"
assert sims.min() > -1 - tol, "expected cosine similarities to be > -1"
if torch.isnan(sims).sum().item():
raise ValueError("Found nans in similarity matrix!")
if num_caps > 1:
# aggregate similarities from different captions
if merge_caption_similiarities == "avg":
sims = sims.view(bb, num_caps, bb)
sims = torch.mean(sims, dim=1)
sims = sims.view(bb, bb)
elif merge_caption_similiarities == "indep":
pass
else:
msg = "unrecognised merge mode: {}"
raise ValueError(msg.format(merge_caption_similiarities))
return sims
def sharded_single_view_inner_product(embds, subspaces, text_weights=None,
l2renorm=True):
"""Compute a similarity matrix from sharded vectors.
Args:
embds (dict[str:torch.Tensor]): the set of sub-embeddings that, when concatenated,
form the whole. The ith shard has shape `B x K x F_i` (i.e. they can
differ in the last dimension), or shape `B x F_i`
l2norm (bool::True): whether to l2 normalize the full embedding.
Returns:
(torch.tensor): similarity matrix of size `BK x BK`.
"""
_ = subspaces
subspaces = list(embds.keys())
device = embds[subspaces[0]].device
shape = embds[subspaces[0]].shape
if len(shape) == 3:
bb, kk, _ = shape
num_embds = bb * kk
assert text_weights is not None, "Expected 3-dim tensors for text (+ weights)"
assert text_weights.shape[0] == bb
assert text_weights.shape[1] == kk
elif len(shape) == 2:
bb, _ = shape
num_embds = bb
assert text_weights is None, "Expected 2-dim tensors for non-text (no weights)"
else:
raise ValueError("input tensor with {} dims unrecognised".format(len(shape)))
sims = torch.zeros(num_embds, num_embds, device=device)
if l2renorm:
l2_mass = 0
for idx, modality in enumerate(subspaces):
embd_ = embds[modality]
if text_weights is not None:
# text_weights (i.e. moe_weights) are shared among subspace for video
embd_ = text_weights[:, :, idx:idx + 1] * embd_
embd_ = embds[modality].reshape(num_embds, -1)
l2_mass += embd_.pow(2).sum(1)
l2_mass = torch.sqrt(l2_mass.clamp(min=1E-6)).unsqueeze(1)
else:
l2_mass = 1
for idx, modality in enumerate(subspaces):
embd_ = embds[modality]
if text_weights is not None:
embd_ = text_weights[:, :, idx:idx + 1] * embd_
embd_ = embd_.reshape(num_embds, -1) / l2_mass
sims += torch.matmul(embd_, embd_.t())
if torch.isnan(sims).sum().item():
raise ValueError("Found nans in similarity matrix!")
return sims
def create_model(config: Dict = None, weights_path: str = None, device: str = None):
"""
Create CENet model.
Args:
config (`Dict`):
Config dict.
weights_path (`str`):
Pretrained checkpoint path, if None, build a model without pretrained weights.
device (`str`):
Model device, `cuda` or `cpu`.
Returns:
(`CENet`):
CENet model.
"""
if config is None:
config = {
"task": "retrieval",
"use_ce": "pairwise",
"text_dim": 768,
"l2renorm": False,
"expert_dims": OrderedDict([("audio", (1024, 768)), ("face", (512, 768)), ("i3d.i3d.0", (1024, 768)),
("imagenet.resnext101_32x48d.0", (2048, 768)),
("imagenet.senet154.0", (2048, 768)),
("ocr", (12900, 768)), ("r2p1d.r2p1d-ig65m.0", (512, 768)),
("scene.densenet161.0", (2208, 768)), ("speech", (5700, 768))]),
"vlad_clusters": {"ocr": 43, "text": 28, "audio": 8, "speech": 19, "detection-sem": 50},
"ghost_clusters": {"text": 1, "ocr": 1, "audio": 1, "speech": 1},
"disable_nan_checks": False,
"keep_missing_modalities": False,
"test_caption_mode": "indep",
"randomise_feats": "",
"feat_aggregation": {
"imagenet.senet154.0": {"fps": 25, "stride": 1, "pixel_dim": 256, "aggregate-axis": 1, "offset": 0,
"temporal": "avg", "aggregate": "concat", "type": "embed",
"feat_dims": {"embed": 2048, "logits": 1000}},
"trn.moments-trn.0": {"fps": 25, "offset": 0, "stride": 8, "pixel_dim": 256, "inner_stride": 5,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 1792, "logits": 339}},
"scene.densenet161.0": {"stride": 1, "fps": 25, "offset": 0, "temporal": "avg", "pixel_dim": 256,
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 2208, "logits": 1000}},
"i3d.i3d.0": {"fps": 25, "offset": 0, "stride": 25, "inner_stride": 1, "pixel_dim": 256,
"temporal": "avg",
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 1024, "logits": 400}},
"i3d.i3d.1": {"fps": 25, "offset": 0, "stride": 4, "inner_stride": 1, "pixel_dim": 256,
"temporal": "avg",
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 1024, "logits": 400}},
"moments_3d.moments-resnet3d50.0": {"fps": 25, "offset": 1, "stride": 8, "pixel_dim": 256,
"inner_stride": 5, "temporal": "avg", "aggregate": "concat",
"aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 2048, "logits": 3339}},
"s3dg.s3dg.1": {"fps": 10, "offset": 0, "stride": 8, "num_segments": None, "pixel_dim": 224,
"inner_stride": 1, "temporal": "avg", "aggregate": "concat", "aggregate-axis": 1,
"type": "embed", "feat_dims": {"embed": 1024, "logits": 512}},
"s3dg.s3dg.0": {"fps": 10, "offset": 0, "stride": 16, "num_segments": None, "pixel_dim": 256,
"inner_stride": 1, "temporal": "avg", "aggregate": "concat", "aggregate-axis": 1,
"type": "embed", "feat_dims": {"embed": 1024, "logits": 512}},
"r2p1d.r2p1d-ig65m.0": {"fps": 30, "offset": 0, "stride": 32, "inner_stride": 1, "pixel_dim": 256,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 512, "logits": 359}},
"r2p1d.r2p1d-ig65m.1": {"fps": 30, "offset": 0, "stride": 32, "inner_stride": 1, "pixel_dim": 256,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 512, "logits": 359}},
"r2p1d.r2p1d-ig65m-kinetics.0": {"fps": 30, "offset": 0, "stride": 32, "inner_stride": 1,
"pixel_dim": 256,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1,
"type": "embed", "feat_dims": {"embed": 512, "logits": 400}},
"r2p1d.r2p1d-ig65m-kinetics.1": {"fps": 30, "offset": 0, "stride": 8, "inner_stride": 1,
"pixel_dim": 256,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1,
"type": "embed", "feat_dims": {"embed": 512, "logits": 400}},
"moments_2d.resnet50.0": {"fps": 25, "stride": 1, "offset": 0, "pixel_dim": 256, "temporal": "avg",
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 2048, "logits": 1000}},
"imagenet.resnext101_32x48d.0": {"fps": 25, "stride": 1, "offset": 0, "pixel_dim": 256,
"temporal": "avg",
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 2048, "logits": 1000}},
"imagenet.resnext101_32x48d.1": {"fps": 25, "stride": 1, "offset": 0, "pixel_dim": 256,
"temporal": "avg",
"aggregate": "concat", "aggregate-axis": 1, "type": "embed",
"feat_dims": {"embed": 2048, "logits": 1000}},
"ocr": {"model": "yang", "temporal": "vlad", "type": "embed", "flaky": True, "binarise": False,
"feat_dims": {"embed": 300}},
"audio.vggish.0": {"model": "vggish", "flaky": True, "temporal": "vlad", "type": "embed",
"binarise": False},
"audio": {"model": "vggish", "flaky": True, "temporal": "vlad", "type": "embed", "binarise": False},
"antoine-rgb": {"model": "antoine", "temporal": "avg", "type": "embed", "feat_dims": {"embed": 2048}},
"flow": {"model": "antoine", "temporal": "avg", "type": "embed", "feat_dims": {"embed": 1024}},
"speech": {"model": "w2v", "flaky": True, "temporal": "vlad", "type": "embed", "binarise": False,
"feat_dims": {"embed": 300}},
"face": {"model": "antoine", "temporal": "avg", "flaky": True, "binarise": False},
"detection-sem": {"fps": 1, "stride": 3, "temporal": "vlad", "feat_type": "sem", "model": "detection",
"type": "embed"},
"moments-static.moments-resnet50.0": {"fps": 25, "stride": 1, "offset": 3, "pixel_dim": 256,
"temporal": "avg", "aggregate": "concat", "aggregate-axis": 1,
"type": "embed", "feat_dims": {"embed": 2048, "logits": 1000}}},
"ce_shared_dim": 768,
"trn_config": {},
"trn_cat": 0,
"include_self": 1,
"use_mish": 1,
"use_bn_reason": 1,
"num_h_layers": 0,
"num_g_layers": 3,
"kron_dets": False,
"freeze_weights": False,
"geometric_mlp": False,
"rand_proj": False,
"mimic_ce_dims": 0,
"coord_dets": False,
"concat_experts": False,
"spatial_feats": False,
"concat_mix_experts": False,
"verbose": False,
"num_classes": None,
}
ce_net_model = CENet(**config)
if weights_path is not None:
checkpoint = torch.load(weights_path, map_location="cpu")
state_dict = checkpoint["state_dict"]
# support backwards compatibility
deprecated = ["ce.moe_fc_bottleneck1", "ce.moe_cg", "ce.moe_fc_proj"]
for mod in deprecated:
for suffix in ("weight", "bias"):
key = f"{mod}.{suffix}"
if key in state_dict:
print(f"WARNING: Removing deprecated key {key} from model")
state_dict.pop(key)
ce_net_model.load_state_dict(state_dict)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
ce_net_model.to(device)
return ce_net_model
| 44.749001
| 130
| 0.563316
|
d8620e879e3a4bca7143f884ff106be7f63e3e21
| 1,824
|
py
|
Python
|
Task8.py
|
venkatchinmai/OPL-Assignments
|
71084f4029b9322c0415b0497abed2be061dfa75
|
[
"MIT"
] | null | null | null |
Task8.py
|
venkatchinmai/OPL-Assignments
|
71084f4029b9322c0415b0497abed2be061dfa75
|
[
"MIT"
] | null | null | null |
Task8.py
|
venkatchinmai/OPL-Assignments
|
71084f4029b9322c0415b0497abed2be061dfa75
|
[
"MIT"
] | null | null | null |
class add:
def __init__(self,number1,number2):
self.number1=number1
self.number2=number2
def __str__(self):
return "(+ "+str(self.number1)+" "+str(self.number2)+" )"
class mul:
def __init__(self,number3,number4):
self.number3=number3
self.number4=number4
def __str__(self):
return "(* "+str(self.number3)+" "+str(self.number4)+" )"
class num:
def __init__(self,number5):
self.number5=number5
def __str__(self):
return str(self.number5)
class cons:
def __init__(self,left,right):
self.left=left
self.right=right
class atom:
def __init__(self,at):
self.at=at
class E:
def __init__(self,opt):
self.opt=opt
def __str__(self):
if len(self.opt)==1:
return self.opt
elif len(self.opt)==3:
return iff(str(self.opt[1]),str(self.opt[2]),str(self.opt[3]))
else:
return 0
class iff:
def __init__(self,op1,op2,op3):
self.op1=op1
self.op2=op2
self.op3=op3
def __str__(self):
return "if(" + str(self.op1) + "," +str(self.op2)+ "," + str(self.op3) + ")"
def ee(*do):
for i in do:
opt=i
return(opt)
class val:
def __init__(self,a):
self.a=a
def __str__(self):
return str(self.a)
def value(self):
return str(self.a)
class Boo:
def __init__(self,bo):
self.bo=bo
def __str__(self):
return str(self.bo)
class prim:
def __init__(self,pr):
self.pr=pr
def value(self):
return str(self.pr)
# j1=iff(Bool(True),num(6),num(6))
print(iff('>',iff("<",num(7),num(6)),num(8)))
# Cons( Atom("if"), Cons( Atom("true"), Cons( Atom("5", Cons( Atom("6"), Empty()))))
| 21.458824
| 88
| 0.546601
|
c30ee7b543edc97e803f5f48f2accdff24f00aed
| 18,356
|
py
|
Python
|
graphsage/unsupervised_train.py
|
pyalex/GraphSAGE
|
683df699586a2724720fed60dc25c3523c4bf675
|
[
"MIT"
] | 3
|
2019-03-20T08:44:34.000Z
|
2021-02-26T12:30:38.000Z
|
graphsage/unsupervised_train.py
|
pyalex/GraphSAGE
|
683df699586a2724720fed60dc25c3523c4bf675
|
[
"MIT"
] | null | null | null |
graphsage/unsupervised_train.py
|
pyalex/GraphSAGE
|
683df699586a2724720fed60dc25c3523c4bf675
|
[
"MIT"
] | 1
|
2019-05-29T08:15:25.000Z
|
2019-05-29T08:15:25.000Z
|
from __future__ import division
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
from graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel
from graphsage.minibatch import EdgeMinibatchIterator
from graphsage.neigh_samplers import UniformNeighborSampler
from graphsage.utils import load_data_from_graph
from graph_tool import topology
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# core params..
flags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.')
flags.DEFINE_float('learning_rate', 0.001, 'initial learning rate.')
flags.DEFINE_string("model_size", "small", "Can be big or small; model specific def'ns")
flags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.')
# left to default values in main experiments
flags.DEFINE_integer('epochs', 3, 'number of epochs to train.')
flags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.')
flags.DEFINE_integer('max_degree', 128, 'maximum node degree.')
flags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1')
flags.DEFINE_integer('samples_2', 25, 'number of users samples in layer 2')
flags.DEFINE_integer('dim_1', 256, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_integer('dim_2', 256, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_boolean('random_context', False, 'Whether to use random context or direct edges')
flags.DEFINE_integer('neg_sample_size', 80, 'number of negative samples')
flags.DEFINE_integer('batch_size', 128, 'minibatch size.')
flags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.')
flags.DEFINE_integer('identity_dim', 0,
'Set to positive value to use identity embedding features of that dimension. Default 0.')
# logging, saving, validation settings etc.
flags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training')
flags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings')
flags.DEFINE_integer('validate_iter', 500, "how often to run a validation minibatch.")
flags.DEFINE_integer('validate_batch_size', 2048, "how many nodes per validation sample.")
flags.DEFINE_integer('gpu', 1, "which gpu to use.")
flags.DEFINE_integer('print_every', 50, "How often to print training info.")
flags.DEFINE_integer('max_total_steps', 10 ** 10, "Maximum total number of iterations")
flags.DEFINE_integer('walks_per_user', 20, "Walks per user")
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
def log_dir():
log_dir = FLAGS.base_log_dir + "/unsup-" + FLAGS.train_prefix.split("/")[-1]
log_dir += "/{model:s}_{model_size:s}_{lr:0.6f}/".format(
model=FLAGS.model,
model_size=FLAGS.model_size,
lr=FLAGS.learning_rate)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# Define model evaluation function
def evaluate(sess, model, minibatch_iter, size=None, feed=None):
t_test = time.time()
feed_dict_val = minibatch_iter.val_feed_dict(size)
feed_dict_val.update(feed or {})
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test)
def incremental_evaluate(sess, model, minibatch_iter, size):
t_test = time.time()
finished = False
val_losses = []
val_mrrs = []
iter_num = 0
while not finished:
feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num)
iter_num += 1
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
val_losses.append(outs_val[0])
val_mrrs.append(outs_val[2])
return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test)
def save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod="", feed=None):
val_embeddings = []
finished = False
seen = set([])
nodes = []
iter_num = 0
name = "val"
while not finished:
feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num)
feed_dict_val.update(feed or {})
iter_num += 1
outs_val = sess.run([model.loss, model.mrr, model.outputs1],
feed_dict=feed_dict_val)
# ONLY SAVE FOR embeds1 because of planetoid
for i, edge in enumerate(edges):
if not edge[0] in seen:
val_embeddings.append(outs_val[-1][i, :])
nodes.append(edge[0])
seen.add(edge[0])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
val_embeddings = np.vstack(val_embeddings)
np.save(out_dir + name + mod + ".npy", val_embeddings)
with open(out_dir + name + mod + ".txt", "w") as fp:
fp.write("\n".join(map(str, nodes)))
def construct_placeholders():
# Define placeholders
placeholders = {
'batch1': tf.placeholder(tf.int32, shape=(None), name='batch1'),
'batch2': tf.placeholder(tf.int32, shape=(None), name='batch2'),
#'features': tf.placeholder(tf.float32, shape=emb_dim, name='features'),
# negative samples for all nodes in the batch
'neg_samples': tf.placeholder(tf.int32, shape=(None,), name='neg_samples'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
}
return placeholders
def train(train_data, test_data=None):
G = train_data[0]
features_np = train_data[1]
id_map = train_data[2]
walks = train_data[-1]
reverse_id_map = {idx: v for v, idx in id_map.items()}
if features_np is not None:
# pad with dummy zero vector
features_np = np.vstack([features_np, np.zeros((features_np.shape[1],))])
placeholders = construct_placeholders()
minibatch = EdgeMinibatchIterator(
G,
id_map,
placeholders,
context_pairs=walks,
batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size
)
adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)
adj_info = tf.Variable(adj_info_ph, trainable=False, name="adj_info")
features_ph = tf.placeholder(tf.float32, shape=features_np.shape)
features = tf.Variable(features_ph, trainable=False, name="features")
if FLAGS.model == 'graphsage_mean':
# Create model
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
model_size=FLAGS.model_size,
identity_dim=FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'gcn':
# Create model
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, 2 * FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, 2 * FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="gcn",
model_size=FLAGS.model_size,
identity_dim=FLAGS.identity_dim,
concat=False,
logging=True)
elif FLAGS.model == 'graphsage_seq':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
identity_dim=FLAGS.identity_dim,
aggregator_type="seq",
model_size=FLAGS.model_size,
logging=True)
elif FLAGS.model == 'graphsage_maxpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="maxpool",
model_size=FLAGS.model_size,
identity_dim=FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'graphsage_meanpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="meanpool",
model_size=FLAGS.model_size,
identity_dim=FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'n2v':
model = Node2VecModel(placeholders, features.shape[0],
minibatch.deg,
# 2x because graphsage uses concat
nodevec_dim=2 * FLAGS.dim_1,
lr=FLAGS.learning_rate)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
config.allow_soft_placement = True
# Initialize session
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)
# Init variables
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj,
features_ph: features_np
})
# Train model
train_shadow_mrr = None
shadow_mrr = None
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
up_adj_info = tf.assign(adj_info, adj_info_ph, name='up_adj')
up_features = tf.assign(features, features_ph, name='up_features')
for epoch in range(FLAGS.epochs):
minibatch.shuffle()
iter = 0
print('Epoch: %04d' % (epoch + 1))
epoch_val_costs.append(0)
while not minibatch.end():
# Construct feed dictionary
feed_dict = minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
# Training step
outs = sess.run([
merged, model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1,
model.inputs1], feed_dict=feed_dict)
train_cost = outs[2]
train_mrr = outs[5]
if train_shadow_mrr is None:
train_shadow_mrr = train_mrr #
else:
train_shadow_mrr -= (1 - 0.99) * (train_shadow_mrr - train_mrr)
if iter % FLAGS.validate_iter == 0:
# Validation
# inputs1, neg, true_loss, negative_loss = outs[-4:]
# distances = np.vstack([
# topology.shortest_distance(G, G.vertex(reverse_id_map[v]),
# [G.vertex(reverse_id_map[n]) for n in neg], directed=False)
# for v in inputs1
# ])
#
# print('Debug: true loss, negative loss', np.mean(true_loss), np.mean(negative_loss), distances)
sess.run(up_adj_info.op, feed_dict={adj_info_ph: minibatch.test_adj})
val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch,
size=FLAGS.validate_batch_size)
epoch_val_costs[-1] += val_cost
sess.run(up_adj_info.op, feed_dict={adj_info_ph: minibatch.adj})
if shadow_mrr is None:
shadow_mrr = val_mrr
else:
shadow_mrr -= (1 - 0.99) * (shadow_mrr - val_mrr)
if total_steps % FLAGS.print_every == 0:
summary_writer.add_summary(outs[0], total_steps)
# Print results
avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)
if total_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % iter,
"train_loss=", "{:.5f}".format(train_cost),
"train_mrr=", "{:.5f}".format(train_mrr),
"train_mrr_ema=", "{:.5f}".format(train_shadow_mrr), # exponential moving average
"val_loss=", "{:.5f}".format(val_cost),
"val_mrr=", "{:.5f}".format(val_mrr),
"val_mrr_ema=", "{:.5f}".format(shadow_mrr), # exponential moving average
"time=", "{:.5f}".format(avg_time))
iter += 1
total_steps += 1
if total_steps > FLAGS.max_total_steps:
break
if total_steps > FLAGS.max_total_steps:
break
print("Optimization Finished!")
tf.saved_model.simple_save(
sess, log_dir() + '/saved_model', placeholders, {'output': model.outputs1}
)
if FLAGS.model == "n2v":
# stopping the gradient for the already trained nodes
train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']],
dtype=tf.int32)
test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']],
dtype=tf.int32)
update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids))
no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids))
update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds))
no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds)))
model.context_embeds = update_nodes + no_update_nodes
sess.run(model.context_embeds)
# run random walks
from graphsage.utils import run_random_walks
nodes = [n for n in G.nodes_iter() if G.node[n]["val"] or G.node[n]["test"]]
start_time = time.time()
pairs = run_random_walks(G, nodes, num_walks=50)
walk_time = time.time() - start_time
test_minibatch = EdgeMinibatchIterator(G,
id_map,
placeholders, batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size,
context_pairs = pairs,
n2v_retrain=True,
fixed_n2v=True)
start_time = time.time()
print("Doing test training for n2v.")
test_steps = 0
for epoch in range(FLAGS.n2v_test_epochs):
test_minibatch.shuffle()
while not test_minibatch.end():
feed_dict = test_minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1], feed_dict=feed_dict)
if test_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % test_steps,
"train_loss=", "{:.5f}".format(outs[1]),
"train_mrr=", "{:.5f}".format(outs[-2]))
test_steps += 1
train_time = time.time() - start_time
save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod="-test")
print("Total time: ", train_time+walk_time)
print("Walk time: ", walk_time)
print("Train time: ", train_time)
def main(argv=None):
print("Loading training data..")
# train_data = load_data(FLAGS.train_prefix, load_walks=True)
train_data = load_data_from_graph(
graph_file=FLAGS.train_prefix + '/graph.gt',
features_file=FLAGS.train_prefix + '/doc2vec.npy',
labels_file=None,
map_file=FLAGS.train_prefix + '/id_map',
walks_file=FLAGS.train_prefix + '/revisions.json',
walks_per_user=FLAGS.walks_per_user
)
print("Done loading training data..")
train(train_data)
if __name__ == '__main__':
tf.app.run()
| 42.688372
| 120
| 0.590216
|
04772287f43a21a4e69da47db18178d4944fb412
| 8,599
|
py
|
Python
|
datumaro/tests/test_RISE.py
|
lravindr/cvat
|
b025acea43fbb55c7ea7eac7b12007f0eb6d3f45
|
[
"MIT"
] | 2
|
2020-03-16T03:41:27.000Z
|
2020-03-16T03:53:01.000Z
|
datumaro/tests/test_RISE.py
|
lravindr/cvat
|
b025acea43fbb55c7ea7eac7b12007f0eb6d3f45
|
[
"MIT"
] | 24
|
2020-11-13T18:43:15.000Z
|
2022-03-12T00:21:52.000Z
|
datumaro/tests/test_RISE.py
|
lravindr/cvat
|
b025acea43fbb55c7ea7eac7b12007f0eb6d3f45
|
[
"MIT"
] | 9
|
2021-05-17T07:00:03.000Z
|
2021-06-26T02:15:10.000Z
|
from collections import namedtuple
import numpy as np
from unittest import TestCase
from datumaro.components.extractor import Label, Bbox
from datumaro.components.launcher import Launcher
from datumaro.components.algorithms.rise import RISE
class RiseTest(TestCase):
def test_rise_can_be_applied_to_classification_model(self):
class TestLauncher(Launcher):
def __init__(self, class_count, roi, **kwargs):
self.class_count = class_count
self.roi = roi
def launch(self, inputs):
for inp in inputs:
yield self._process(inp)
def _process(self, image):
roi = self.roi
roi_area = (roi[1] - roi[0]) * (roi[3] - roi[2])
if 0.5 * roi_area < np.sum(image[roi[0]:roi[1], roi[2]:roi[3], 0]):
cls = 0
else:
cls = 1
cls_conf = 0.5
other_conf = (1.0 - cls_conf) / (self.class_count - 1)
return [
Label(i, attributes={
'score': cls_conf if cls == i else other_conf }) \
for i in range(self.class_count)
]
roi = [70, 90, 7, 90]
model = TestLauncher(class_count=3, roi=roi)
rise = RISE(model, max_samples=(7 * 7) ** 2, mask_width=7, mask_height=7)
image = np.ones((100, 100, 3))
heatmaps = next(rise.apply(image))
self.assertEqual(1, len(heatmaps))
heatmap = heatmaps[0]
self.assertEqual(image.shape[:2], heatmap.shape)
h_sum = np.sum(heatmap)
h_area = np.prod(heatmap.shape)
roi_sum = np.sum(heatmap[roi[0]:roi[1], roi[2]:roi[3]])
roi_area = (roi[1] - roi[0]) * (roi[3] - roi[2])
roi_den = roi_sum / roi_area
hrest_den = (h_sum - roi_sum) / (h_area - roi_area)
self.assertLess(hrest_den, roi_den)
def test_rise_can_be_applied_to_detection_model(self):
ROI = namedtuple('ROI',
['threshold', 'x', 'y', 'w', 'h', 'label'])
class TestLauncher(Launcher):
def __init__(self, rois, class_count, fp_count=4, pixel_jitter=20, **kwargs):
self.rois = rois
self.roi_base_sums = [None, ] * len(rois)
self.class_count = class_count
self.fp_count = fp_count
self.pixel_jitter = pixel_jitter
@staticmethod
def roi_value(roi, image):
return np.sum(
image[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w, :])
def launch(self, inputs):
for inp in inputs:
yield self._process(inp)
def _process(self, image):
detections = []
for i, roi in enumerate(self.rois):
roi_sum = self.roi_value(roi, image)
roi_base_sum = self.roi_base_sums[i]
first_run = roi_base_sum is None
if first_run:
roi_base_sum = roi_sum
self.roi_base_sums[i] = roi_base_sum
cls_conf = roi_sum / roi_base_sum
if roi.threshold < roi_sum / roi_base_sum:
cls = roi.label
detections.append(
Bbox(roi.x, roi.y, roi.w, roi.h,
label=cls, attributes={'score': cls_conf})
)
if first_run:
continue
for j in range(self.fp_count):
if roi.threshold < cls_conf:
cls = roi.label
else:
cls = (i + j) % self.class_count
box = [roi.x, roi.y, roi.w, roi.h]
offset = (np.random.rand(4) - 0.5) * self.pixel_jitter
detections.append(
Bbox(*(box + offset),
label=cls, attributes={'score': cls_conf})
)
return detections
rois = [
ROI(0.3, 10, 40, 30, 10, 0),
ROI(0.5, 70, 90, 7, 10, 0),
ROI(0.7, 5, 20, 40, 60, 2),
ROI(0.9, 30, 20, 10, 40, 1),
]
model = model = TestLauncher(class_count=3, rois=rois)
rise = RISE(model, max_samples=(7 * 7) ** 2, mask_width=7, mask_height=7)
image = np.ones((100, 100, 3))
heatmaps = next(rise.apply(image))
heatmaps_class_count = len(set([roi.label for roi in rois]))
self.assertEqual(heatmaps_class_count + len(rois), len(heatmaps))
# import cv2
# roi_image = image.copy()
# for i, roi in enumerate(rois):
# cv2.rectangle(roi_image, (roi.x, roi.y), (roi.x + roi.w, roi.y + roi.h), (32 * i) * 3)
# cv2.imshow('img', roi_image)
for c in range(heatmaps_class_count):
class_roi = np.zeros(image.shape[:2])
for i, roi in enumerate(rois):
if roi.label != c:
continue
class_roi[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w] \
+= roi.threshold
heatmap = heatmaps[c]
roi_pixels = heatmap[class_roi != 0]
h_sum = np.sum(roi_pixels)
h_area = np.sum(roi_pixels != 0)
h_den = h_sum / h_area
rest_pixels = heatmap[class_roi == 0]
r_sum = np.sum(rest_pixels)
r_area = np.sum(rest_pixels != 0)
r_den = r_sum / r_area
# print(r_den, h_den)
# cv2.imshow('class %s' % c, heatmap)
self.assertLess(r_den, h_den)
for i, roi in enumerate(rois):
heatmap = heatmaps[heatmaps_class_count + i]
h_sum = np.sum(heatmap)
h_area = np.prod(heatmap.shape)
roi_sum = np.sum(heatmap[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w])
roi_area = roi.h * roi.w
roi_den = roi_sum / roi_area
hrest_den = (h_sum - roi_sum) / (h_area - roi_area)
# print(hrest_den, h_den)
# cv2.imshow('roi %s' % i, heatmap)
self.assertLess(hrest_den, roi_den)
# cv2.waitKey(0)
@staticmethod
def DISABLED_test_roi_nms():
ROI = namedtuple('ROI',
['conf', 'x', 'y', 'w', 'h', 'label'])
class_count = 3
noisy_count = 3
rois = [
ROI(0.3, 10, 40, 30, 10, 0),
ROI(0.5, 70, 90, 7, 10, 0),
ROI(0.7, 5, 20, 40, 60, 2),
ROI(0.9, 30, 20, 10, 40, 1),
]
pixel_jitter = 10
detections = []
for i, roi in enumerate(rois):
detections.append(
Bbox(roi.x, roi.y, roi.w, roi.h,
label=roi.label, attributes={'score': roi.conf})
)
for j in range(noisy_count):
cls_conf = roi.conf * j / noisy_count
cls = (i + j) % class_count
box = [roi.x, roi.y, roi.w, roi.h]
offset = (np.random.rand(4) - 0.5) * pixel_jitter
detections.append(
Bbox(*(box + offset),
label=cls, attributes={'score': cls_conf})
)
import cv2
image = np.zeros((100, 100, 3))
for i, det in enumerate(detections):
roi = ROI(det.attributes['score'], *det.get_bbox(), det.label)
p1 = (int(roi.x), int(roi.y))
p2 = (int(roi.x + roi.w), int(roi.y + roi.h))
c = (0, 1 * (i % (1 + noisy_count) == 0), 1)
cv2.rectangle(image, p1, p2, c)
cv2.putText(image, 'd%s-%s-%.2f' % (i, roi.label, roi.conf),
p1, cv2.FONT_HERSHEY_SIMPLEX, 0.25, c)
cv2.imshow('nms_image', image)
cv2.waitKey(0)
nms_boxes = RISE.nms(detections, iou_thresh=0.25)
print(len(detections), len(nms_boxes))
for i, det in enumerate(nms_boxes):
roi = ROI(det.attributes['score'], *det.get_bbox(), det.label)
p1 = (int(roi.x), int(roi.y))
p2 = (int(roi.x + roi.w), int(roi.y + roi.h))
c = (0, 1, 0)
cv2.rectangle(image, p1, p2, c)
cv2.putText(image, 'p%s-%s-%.2f' % (i, roi.label, roi.conf),
p1, cv2.FONT_HERSHEY_SIMPLEX, 0.25, c)
cv2.imshow('nms_image', image)
cv2.waitKey(0)
| 37.225108
| 100
| 0.485522
|
08d1197bb0e66f87ae50de028f57b286cf564b17
| 9,660
|
py
|
Python
|
atkinson/errors/reporters/trello.py
|
jguiditta/atkinson
|
17479ac1a5b2975f8ec056409ff7b0436c143701
|
[
"MIT"
] | null | null | null |
atkinson/errors/reporters/trello.py
|
jguiditta/atkinson
|
17479ac1a5b2975f8ec056409ff7b0436c143701
|
[
"MIT"
] | 6
|
2018-10-18T17:03:05.000Z
|
2021-12-08T15:03:56.000Z
|
atkinson/errors/reporters/trello.py
|
jguiditta/atkinson
|
17479ac1a5b2975f8ec056409ff7b0436c143701
|
[
"MIT"
] | 4
|
2018-07-11T16:11:57.000Z
|
2019-02-27T11:04:07.000Z
|
#! /usr/bin/env python
""" Trello Card error reporter """
import re
from trollo import TrelloApi
from atkinson.config.manager import ConfigManager
from atkinson.errors.reporters import BaseReport
def get_trello_api():
"""
Construct an interface to trello using the trollo api
"""
conf = ConfigManager('trello.yml').config
return TrelloApi(conf['api_key'], conf['token'])
def get_columns(api, config):
"""
Get the trello column ids for a trello board given a api handle and the id
of the board.
:param api: A trollo Boards api handle.
:param dict config: The configuration data (see note below for details).
:return: A tuple of column trello ids
.. note::
Required configuration keys and values
* board_id The trello board unique id
* new_column The name(str) of the column where new cards are added
This would be the same name as seen in the Trello webui
* close_column The name(str) of the column where closed/inactive
are moved.
This would be the same name as seen in the Trello webui
"""
for key in ['board_id', 'new_column', 'close_column']:
if key not in config:
raise KeyError(f"A required key '{key}' is missing in the config")
column_data = {x['name']: x['id']
for x in api.get_list(config['board_id'])}
return (column_data[config['new_column']],
column_data[config['close_column']])
class TrelloCard(BaseReport):
""" Trello error card base class"""
def __init__(self, card_id, api, new_column, close_column):
"""
Class constructor
:param str card_id: The trello unique id for the card to work on
:param api: A trollo TrelloApi instance
:param str new_column: The trello unique id for a column to place
new cards
:param str close_column: The trello unique id for a column to place
completed cards
"""
self.__card_id = card_id
self.__card_data = None
self.__checklist_items = {}
self.__new_column = new_column
self.__close_column = close_column
self.__api = api
# seed data from trello
self._get_card_data()
self._get_checklist_data()
@classmethod
def new(cls, title, description, config):
"""
Create a TrelloCard instance
:param str title: A title for the trello card
:param str description: A description for the trello card
:param dict config: A configuration dictionary
:returns: A TrelloCard instance
.. note::
Required configuration keys and values
* board_id The trello board unique id
* new_column The name(str) of the column where new cards are added
This would be the same name as seen in the Trello webui
* close_column The name(str) of the column where closed/inactive
are moved.
This would be the same name as seen in the Trello webui
"""
api = get_trello_api()
# Get the trello ids for our columns in the config
new_column, close_column = get_columns(api.boards, config)
card_id = api.cards.new(title, new_column, description)['id']
return cls(card_id, api, new_column, close_column)
@classmethod
def get(cls, report_id, config):
"""
Get a report object based on the report id.
:param str report_id: The id of the report to construct
:type report_id: Trello unique id
:param dict config: Reporter configuration dictionary
:return: A TrelloCard instance based on the report_id
.. note::
Required configuration keys and values
* board_id The trello board unique id
* new_column The name(str) of the column where new cards are added
This would be the same name as seen in the Trello webui
* close_column The name(str) of the column where closed/inactive
are moved.
This would be the same name as seen in the Trello webui
"""
api = get_trello_api()
# Get the trello ids for our columns in the config
new_column, close_column = get_columns(api.boards, config)
return cls(report_id, api, new_column, close_column)
def _get_card_data(self):
""" Fetch the cards data from Trello """
self.__card_data = self.__api.cards.get(self.__card_id)
def _get_checklist_data(self):
""" Fetch the checklist data from Trello """
for checklist in self.__card_data.get('idChecklists', []):
data = self.__api.checklists.get(checklist)
check_data = {'id': data['id'], 'items': {}}
for item in data['checkItems']:
name, link = self._markdown_to_tuple(item['name'])
check_data['items'][name] = item
check_data['items'][name]['link'] = link
self.__checklist_items[data['name']] = check_data
def _markdown_to_tuple(self, markdown):
""" Extract a tuple for a markdown formatted link """
if markdown.find('[') != -1:
match = re.search(r'\[(.+)\]\((.*)\)', markdown)
if match:
return (match.group(1), match.group(2))
else:
return (markdown, '')
def _dict_to_markdown(self, data):
""" Format a dict into a markdown link """
return f"[{data['name']}]({data.get('link', '')})"
def _add_checklist(self, checklist_name):
""" Add a new checklist """
self.__api.cards.new_checklist(self.__card_id, checklist_name)
def _update_checklist(self, checklist_items):
""" Check or uncheck checklist items """
# Makes sure we have the latest data from the card.
self._get_card_data()
self._get_checklist_data()
for checklist_name, list_items in checklist_items.items():
current_list = self.__checklist_items.get(checklist_name)
incoming = {x['name']: x['link'] for x in list_items}
# Process the items items
on_card = {x for x in current_list['items']}
sorted_items = set(list(incoming))
checked = {x for x in current_list['items']
if current_list['items'][x]['state'] == 'complete'}
to_move_bottom = [current_list['items'][x]['id'] for x in checked]
# items to add
for item in sorted(sorted_items - on_card):
data = self._dict_to_markdown({'name': item,
'link': incoming[item]})
self.__api.checklists.new_checkItem(current_list['id'],
data)
# items to check
for item in sorted((on_card - checked) - sorted_items):
item_id = current_list['items'][item]['id']
self.__api.cards.check_checkItem(self.__card_id, item_id)
if item_id not in to_move_bottom:
to_move_bottom.append(item_id)
# items to uncheck and/or update
for item in sorted(checked & sorted_items):
work_item = current_list['items'][item]
self.__api.cards.uncheck_checkItem(self.__card_id,
work_item['id'])
self.__api.cards.move_checkItem(self.__card_id,
work_item['id'], 'top')
to_move_bottom.remove(work_item['id'])
# Check for naming updates
for item in sorted(on_card & sorted_items):
work_item = current_list['items'][item]
if work_item['link'] != incoming[item]:
new_name = self._dict_to_markdown({'name': item,
'link': incoming[item]})
self.__api.cards.rename_checkItem(self.__card_id,
work_item['id'],
new_name)
for item in to_move_bottom:
self.__api.cards.move_checkItem(self.__card_id,
item, 'bottom')
@property
def report_id(self):
return self.__card_id
def update(self, **kwargs):
""" Update the current report
:param kwargs: A dictionary of report items to update
"""
if 'description' in kwargs:
self.__api.cards.update_desc(self.__card_id, kwargs['description'])
if 'checklist' in kwargs:
for checklist_name in kwargs['checklist']:
if checklist_name not in self.__checklist_items:
self._add_checklist(checklist_name)
self._update_checklist(kwargs['checklist'])
# refresh card information.
self._get_card_data()
self._get_checklist_data()
def close(self):
""" Close report """
current_column = self.__card_data['idList']
clear_checklist = {x: {} for x in self.__checklist_items}
self._update_checklist(clear_checklist)
if current_column != self.__close_column:
self.__api.cards.update_idList(self.__card_id,
self.__close_column)
self.__api.cards.update_pos(self.__card_id, 'bottom')
| 39.753086
| 82
| 0.578261
|
6319a6fb2e0a8a1767f37d99b5d9dc59f4fcbaad
| 2,958
|
py
|
Python
|
sentry/web/frontend/generic.py
|
davedash/sentry
|
8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94
|
[
"BSD-3-Clause"
] | null | null | null |
sentry/web/frontend/generic.py
|
davedash/sentry
|
8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94
|
[
"BSD-3-Clause"
] | null | null | null |
sentry/web/frontend/generic.py
|
davedash/sentry
|
8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.web.frontend.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.http import HttpResponseRedirect, Http404, HttpResponseNotModified, \
HttpResponse
from django.core.urlresolvers import reverse
from sentry.conf import settings
from sentry.permissions import can_create_projects
from sentry.web.decorators import login_required
from sentry.web.helpers import get_login_url, get_project_list, \
render_to_response
@login_required
def dashboard(request):
project_list = get_project_list(request.user, key='slug')
has_projects = len(project_list) > 1 or (len(project_list) == 1 and project_list.values()[0].pk != settings.PROJECT)
if not has_projects:
if not request.user.is_authenticated():
request.session['_next'] = request.build_absolute_uri()
return HttpResponseRedirect(get_login_url())
elif can_create_projects(request.user):
return HttpResponseRedirect(reverse('sentry-new-project'))
return render_to_response('sentry/dashboard.html', {}, request)
def static_media(request, path, root=None):
"""
Serve static files below a given point in the directory structure.
"""
from django.utils.http import http_date
from django.views.static import was_modified_since
import mimetypes
import os.path
import posixpath
import stat
import urllib
document_root = root or os.path.join(settings.MODULE_ROOT, 'static')
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(mimetype=mimetype)
contents = open(fullpath, 'rb').read()
response = HttpResponse(contents, mimetype=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
return response
| 37.443038
| 120
| 0.68526
|
9728de65224d5ef1838bb9f47b6810f1fe7f7264
| 68,183
|
py
|
Python
|
src/bin/train_nstages-sparse-wavernn_dualgru_compact_lpcseg.py
|
ml-applications/cyclevae-vc-neuralvoco
|
a1976c127eaf9d2a3ef7a8a783839743ffb69c5c
|
[
"Apache-2.0"
] | 1
|
2020-08-27T14:05:38.000Z
|
2020-08-27T14:05:38.000Z
|
src/bin/train_nstages-sparse-wavernn_dualgru_compact_lpcseg.py
|
ml-applications/cyclevae-vc-neuralvoco
|
a1976c127eaf9d2a3ef7a8a783839743ffb69c5c
|
[
"Apache-2.0"
] | null | null | null |
src/bin/train_nstages-sparse-wavernn_dualgru_compact_lpcseg.py
|
ml-applications/cyclevae-vc-neuralvoco
|
a1976c127eaf9d2a3ef7a8a783839743ffb69c5c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
from dateutil.relativedelta import relativedelta
from distutils.util import strtobool
import logging
import os
import sys
import time
from collections import defaultdict
from tensorboardX import SummaryWriter
import numpy as np
import six
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
from utils import find_files
from utils import read_hdf5
from utils import read_txt
from vcneuvoco import GRU_WAVE_DECODER_DUALGRU_COMPACT_LPCSEG
from vcneuvoco import encode_mu_law
from radam import RAdam
from dataset import FeatureDatasetNeuVoco, padding
#import warnings
#warnings.filterwarnings('ignore')
#np.set_printoptions(threshold=np.inf)
#torch.set_printoptions(threshold=np.inf)
def data_generator(dataloader, device, batch_size, upsampling_factor, limit_count=None, seg=5, batch_sizes=None):
"""TRAINING BATCH GENERATOR
Args:
wav_list (str): list of wav files
feat_list (str): list of feat files
batch_size (int): batch size
wav_transform (func): preprocessing function for waveform
Return:
(object): generator instance
"""
while True:
# process over all of files
c_idx = 0
count = 0
for idx, batch in enumerate(dataloader):
slens = batch['slen'].data.numpy()
flens = batch['flen'].data.numpy()
max_slen = np.max(slens) ## get max samples length
max_flen = np.max(flens) ## get max samples length
xs = batch['x'][:,:max_slen].to(device)
feat = batch['feat'][:,:max_flen].to(device)
featfiles = batch['featfile']
n_batch_utt = feat.size(0)
if batch_sizes is not None:
batch_size = batch_sizes[np.random.randint(3)]
len_frm = max_flen
len_smpl = max_slen
x_ss = 0
f_ss = 0
x_bs = batch_size*upsampling_factor
f_bs = batch_size
delta_smpl = batch_size*upsampling_factor
delta_frm = batch_size
flens_acc = np.array(flens)
slens_acc = np.array(slens)
while True:
del_index_utt = []
idx_select = []
idx_select_full = []
for i in range(n_batch_utt):
if flens_acc[i] <= 0:
del_index_utt.append(i)
if len(del_index_utt) > 0:
xs = torch.LongTensor(np.delete(xs.cpu().data.numpy(), del_index_utt, axis=0)).to(device)
feat = torch.FloatTensor(np.delete(feat.cpu().data.numpy(), del_index_utt, axis=0)).to(device)
featfiles = np.delete(featfiles, del_index_utt, axis=0)
flens_acc = np.delete(flens_acc, del_index_utt, axis=0)
slens_acc = np.delete(slens_acc, del_index_utt, axis=0)
n_batch_utt -= len(del_index_utt)
for i in range(n_batch_utt):
if flens_acc[i] < f_bs:
idx_select.append(i)
if len(idx_select) > 0:
idx_select_full = torch.LongTensor(np.delete(np.arange(n_batch_utt), idx_select, axis=0)).to(device)
idx_select = torch.LongTensor(idx_select).to(device)
yield xs, feat, c_idx, idx, featfiles, x_bs, f_bs, x_ss, f_ss, n_batch_utt, del_index_utt, max_slen, \
slens_acc, idx_select, idx_select_full
for i in range(n_batch_utt):
flens_acc[i] -= delta_frm
slens_acc[i] -= delta_smpl
count += 1
if limit_count is not None and count > limit_count:
break
#len_frm -= delta_frm
len_smpl -= delta_smpl
#if len_frm > 0:
if len_smpl >= seg:
x_ss += delta_smpl
f_ss += delta_frm
else:
break
if limit_count is not None and count > limit_count:
break
c_idx += 1
#if c_idx > 0:
#if c_idx > 1:
#if c_idx > 2:
# break
yield [], [], -1, -1, [], [], [], [], [], [], [], [], [], [], []
#def eval_generator(dataloader, device, batch_size, upsampling_factor, limit_count=None, seg=5):
def eval_generator(dataloader, device, batch_size, upsampling_factor, limit_count=None):
"""TRAINING BATCH GENERATOR
Args:
wav_list (str): list of wav files
feat_list (str): list of feat files
batch_size (int): batch size
wav_transform (func): preprocessing function for waveform
Return:
(object): generator instance
"""
while True:
# process over all of files
c_idx = 0
count = 0
for idx, batch in enumerate(dataloader):
slens = batch['slen'].data.numpy()
flens = batch['flen'].data.numpy()
max_slen = np.max(slens) ## get max samples length
max_flen = np.max(flens) ## get max samples length
xs = batch['x'][:,:max_slen].to(device)
feat = batch['feat'][:,:max_flen].to(device)
featfiles = batch['featfile']
n_batch_utt = feat.size(0)
len_frm = max_flen
len_smpl = max_slen
x_ss = 0
f_ss = 0
x_bs = batch_size*upsampling_factor
f_bs = batch_size
delta_smpl = batch_size*upsampling_factor
delta_frm = batch_size
flens_acc = np.array(flens)
while True:
del_index_utt = []
for i in range(n_batch_utt):
if flens_acc[i] >= f_bs:
flens_acc[i] -= delta_frm
else:
del_index_utt.append(i)
if len(del_index_utt) > 0:
xs = torch.LongTensor(np.delete(xs.cpu().data.numpy(), del_index_utt, axis=0)).to(device)
feat = torch.FloatTensor(np.delete(feat.cpu().data.numpy(), del_index_utt, axis=0)).to(device)
featfiles = np.delete(featfiles, del_index_utt, axis=0)
flens_acc = np.delete(flens_acc, del_index_utt, axis=0)
n_batch_utt -= len(del_index_utt)
yield xs, feat, c_idx, idx, featfiles, x_bs, f_bs, x_ss, f_ss, n_batch_utt, del_index_utt, max_slen
count += 1
if limit_count is not None and count > limit_count:
break
len_frm -= delta_frm
#len_smpl -= delta_smpl
if len_frm >= f_bs:
#if len_frm >= seg:
x_ss += delta_smpl
f_ss += delta_frm
else:
break
if limit_count is not None and count > limit_count:
break
c_idx += 1
#if c_idx > 0:
#if c_idx > 1:
#if c_idx > 2:
# break
yield [], [], -1, -1, [], [], [], [], [], [], [], []
def save_checkpoint(checkpoint_dir, model_waveform,
optimizer, numpy_random_state, torch_random_state, iterations):
"""FUNCTION TO SAVE CHECKPOINT
Args:
checkpoint_dir (str): directory to save checkpoint
model (torch.nn.Module): pytorch model instance
optimizer (Optimizer): pytorch optimizer instance
iterations (int): number of current iterations
"""
model_waveform.cpu()
checkpoint = {
"model_waveform": model_waveform.state_dict(),
"optimizer": optimizer.state_dict(),
"numpy_random_state": numpy_random_state,
"torch_random_state": torch_random_state,
"iterations": iterations}
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
torch.save(checkpoint, checkpoint_dir + "/checkpoint-%d.pkl" % iterations)
model_waveform.cuda()
logging.info("%d-iter checkpoint created." % iterations)
def write_to_tensorboard(writer, steps, loss):
"""Write to tensorboard."""
for key, value in loss.items():
writer.add_scalar(key, value, steps)
def sparsify(model_waveform, iter_idx, t_start, t_end, interval, densities, densities_p=None, density_conv_s_c=None, density_conv_s_c_p=None,
density_out=None, density_out_p=None):
if iter_idx < t_start or ((iter_idx-t_start) % interval != 0 and iter_idx < t_end):
pass
else:
logging.info('sparsify: %ld %ld %ld %ld' % (iter_idx, t_start, t_end, interval))
p = model_waveform.gru.weight_hh_l0 #recurrent
nb = p.shape[0] // p.shape[1]
N = p.shape[0] // nb
N_16 = N // 16
ones = torch.diag(torch.ones(N, device=p.device))
#q = model_waveform.gru.weight_ih_l0 #input
#M = q.shape[0] // nb
#M_16 = M // 16
#M_row = q.shape[1]
#if M == M_row:
# if M == N:
# ones_ = ones
# else:
# ones_ = torch.diag(torch.ones(M, device=q.device))
if densities_p is not None:
for k in range(nb):
density = densities[k]
if iter_idx < t_end:
r = 1 - (iter_idx-t_start)/(t_end - t_start)
density = densities_p[k] - (densities_p[k]-densities[k])*(1 - r)**5
logging.info('%ld: %lf %lf %lf' % (k+1, densities_p[k], densities[k], density))
#recurrent weight
A = p[k*N:(k+1)*N, :]
L = (A - torch.diag(torch.diag(A))).transpose(1, 0).reshape(N, N_16, 16) #horizontal block structure (16) in input part, in real-time simultaneously computed for each 16 output using 2 registers (256x2 bits)
S = torch.sum(L*L, -1)
SS, _ = torch.sort(S.reshape(-1))
thresh = SS[round(N*N_16*(1-density))]
mask = torch.clamp(torch.repeat_interleave((S>=thresh).float(), 16, dim=1) + ones, max=1).transpose(1,0)
p[k*N:(k+1)*N, :] = p[k*N:(k+1)*N, :]*mask
# #input weight
# A = q[k*M:(k+1)*M, :]
# if M == M_row:
# L = (A - torch.diag(torch.diag(A))).transpose(1, 0).reshape(M, M_16, 16) #horizontal block structure (16) in input part, in real-time simultaneously computed for each 16 output using 2 registers (256x2 bits)
# else:
# L = A.transpose(1, 0).reshape(M_row, M_16, 16) #horizontal block structure (16) in input part, in real-time simultaneously computed for each 16 output using 2 registers (256x2 bits)
# S = torch.sum(L*L, -1)
# SS, _ = torch.sort(S.reshape(-1))
# thresh = SS[round(M_row*M_16*(1-density))]
# if M == M_row:
# mask = torch.clamp(torch.repeat_interleave((S>=thresh).float(), 16, dim=1) + ones_, max=1).transpose(1,0)
# else:
# mask = torch.repeat_interleave((S>=thresh).float(), 16, dim=1).transpose(1,0)
# q[k*M:(k+1)*M, :] = q[k*M:(k+1)*M, :]*mask
else:
for k in range(nb):
density = densities[k]
if iter_idx < t_end:
r = 1 - (iter_idx-t_start)/(t_end - t_start)
density = 1 - (1-densities[k])*(1 - r)**5
logging.info('%ld: 1 %lf %lf' % (k+1, densities[k], density))
#recurrent weight
A = p[k*N:(k+1)*N, :]
L = (A - torch.diag(torch.diag(A))).transpose(1, 0).reshape(N, N_16, 16)
S = torch.sum(L*L, -1)
SS, _ = torch.sort(S.reshape(-1))
thresh = SS[round(N*N_16*(1-density))]
mask = torch.clamp(torch.repeat_interleave((S>=thresh).float(), 16, dim=1) + ones, max=1).transpose(1,0)
p[k*N:(k+1)*N, :] = p[k*N:(k+1)*N, :]*mask
# #input weight
# A = q[k*M:(k+1)*M, :]
# if M == M_row:
# L = (A - torch.diag(torch.diag(A))).transpose(1, 0).reshape(M, M_16, 16)
# else:
# L = A.transpose(1, 0).reshape(M_row, M_16, 16)
# S = torch.sum(L*L, -1)
# SS, _ = torch.sort(S.reshape(-1))
# thresh = SS[round(M_row*M_16*(1-density))]
# if M == M_row:
# mask = torch.clamp(torch.repeat_interleave((S>=thresh).float(), 16, dim=1) + ones_, max=1).transpose(1,0)
# else:
# mask = torch.repeat_interleave((S>=thresh).float(), 16, dim=1).transpose(1,0)
# q[k*M:(k+1)*M, :] = q[k*M:(k+1)*M, :]*mask
#out.out
if density_out is not None:
s = model_waveform.out.out.weight #conv after upsampling before multiplication with waveform for GRU input
O = s.shape[0]
O_16 = O // 16
P = s.shape[1]
density = density_out
if iter_idx < t_end:
r = 1 - (iter_idx-t_start)/(t_end - t_start)
if density_out_p is not None:
density = density_out_p - (density_out_p-density_out)*(1 - r)**5
else:
density = 1 - (1-density_out)*(1 - r)**5
if density_out_p is not None:
logging.info('%lf %lf %lf' % (density_out_p, density_out, density))
else:
logging.info('1 %lf %lf' % (density_out, density))
A = s[:, :, 0]
L = A.transpose(1, 0).reshape(P, O_16, 16) #horizontal block structure (16) in input part, in real-time simultaneously computed for each 16 output using 2 registers (256x2 bits)
S = torch.sum(L*L, -1)
SS, _ = torch.sort(S.reshape(-1))
thresh = SS[round(P*O_16*(1-density))]
mask = torch.repeat_interleave((S>=thresh).float(), 16, dim=1).transpose(1,0)
s[:, :, 0] = s[:, :, 0]*mask
def main():
parser = argparse.ArgumentParser()
# path setting
parser.add_argument("--waveforms",
type=str, help="directory or list of wav files")
parser.add_argument("--waveforms_eval",
type=str, help="directory or list of evaluation wav files")
parser.add_argument("--feats", required=True,
type=str, help="directory or list of wav files")
parser.add_argument("--feats_eval", required=True,
type=str, help="directory or list of evaluation feat files")
parser.add_argument("--stats", required=True,
type=str, help="directory or list of evaluation wav files")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the model")
# network structure setting
parser.add_argument("--upsampling_factor", default=120,
type=int, help="number of dimension of aux feats")
parser.add_argument("--hidden_units_wave", default=384,
type=int, help="depth of dilation")
parser.add_argument("--hidden_units_wave_2", default=32,
type=int, help="depth of dilation")
parser.add_argument("--kernel_size_wave", default=7,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--dilation_size_wave", default=1,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--mcep_dim", default=50,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--seg", default=2,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--lpc", default=4,
type=int, help="kernel size of dilated causal convolution")
parser.add_argument("--right_size", default=0,
type=int, help="kernel size of dilated causal convolution")
# network training setting
parser.add_argument("--lr", default=1e-4,
type=float, help="learning rate")
parser.add_argument("--batch_size", default=10,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--epoch_count", default=100,
type=int, help="number of training epochs")
parser.add_argument("--do_prob", default=0,
type=float, help="dropout probability")
parser.add_argument("--batch_size_utt", default=5,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--batch_size_utt_eval", default=5,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--n_workers", default=2,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--n_quantize", default=256,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--causal_conv_wave", default=False,
type=strtobool, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--n_stage", default=4,
type=int, help="number of sparsification stages")
parser.add_argument("--t_start", default=20001,
type=int, help="iter idx to start sparsify")
parser.add_argument("--t_end", default=2520000,
type=int, help="iter idx to finish densitiy sparsify")
parser.add_argument("--interval", default=100,
type=int, help="interval in finishing densitiy sparsify")
parser.add_argument("--densities", default="0.27-0.27-0.39",
type=str, help="final densitiy of reset, update, new hidden gate matrices")
# other setting
parser.add_argument("--pad_len", default=3000,
type=int, help="seed number")
parser.add_argument("--save_interval_iter", default=5000,
type=int, help="interval steps to logr")
parser.add_argument("--save_interval_epoch", default=10,
type=int, help="interval steps to logr")
parser.add_argument("--log_interval_steps", default=50,
type=int, help="interval steps to logr")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--resume", default=None,
type=str, help="model path to restart training")
parser.add_argument("--pretrained", default=None,
type=str, help="model path to restart training")
parser.add_argument("--string_path", default=None,
type=str, help="model path to restart training")
parser.add_argument("--GPU_device", default=None,
type=int, help="selection of GPU device")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
if args.GPU_device is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_device)
# make experimental directory
if not os.path.exists(args.expdir):
os.makedirs(args.expdir)
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if str(device) == "cpu":
raise ValueError('ERROR: Training by CPU is not acceptable.')
torch.backends.cudnn.benchmark = True #faster
#if args.pretrained is None:
if 'mel' in args.string_path:
mean_stats = torch.FloatTensor(read_hdf5(args.stats, "/mean_melsp"))
scale_stats = torch.FloatTensor(read_hdf5(args.stats, "/scale_melsp"))
args.excit_dim = 0
#mean_stats = torch.FloatTensor(np.r_[read_hdf5(args.stats, "/mean_feat_mceplf0cap")[:2], read_hdf5(args.stats, "/mean_melsp")])
#scale_stats = torch.FloatTensor(np.r_[read_hdf5(args.stats, "/scale_feat_mceplf0cap")[:2], read_hdf5(args.stats, "/scale_melsp")])
#args.excit_dim = 2
#mean_stats = torch.FloatTensor(np.r_[read_hdf5(args.stats, "/mean_feat_mceplf0cap")[:6], read_hdf5(args.stats, "/mean_melsp")])
#scale_stats = torch.FloatTensor(np.r_[read_hdf5(args.stats, "/scale_feat_mceplf0cap")[:6], read_hdf5(args.stats, "/scale_melsp")])
#args.excit_dim = 6
else:
mean_stats = torch.FloatTensor(read_hdf5(args.stats, "/mean_"+args.string_path.replace("/","")))
scale_stats = torch.FloatTensor(read_hdf5(args.stats, "/scale_"+args.string_path.replace("/","")))
if mean_stats.shape[0] > args.mcep_dim+2:
if 'feat_org_lf0' in args.string_path:
args.cap_dim = mean_stats.shape[0]-(args.mcep_dim+2)
args.excit_dim = 2+args.cap_dim
else:
args.cap_dim = mean_stats.shape[0]-(args.mcep_dim+3)
args.excit_dim = 2+1+args.cap_dim
#args.cap_dim = mean_stats.shape[0]-(args.mcep_dim+2)
#args.excit_dim = 2+args.cap_dim
else:
args.cap_dim = None
args.excit_dim = 2
# save args as conf
if args.n_stage < 3:
args.n_stage = 3
elif args.n_stage > 5:
args.n_stage = 5
#if args.batch_size < 10:
# args.batch_size = 10
torch.save(args, args.expdir + "/model.conf")
#batch_sizes = [None]*3
#batch_sizes[0] = int(args.batch_size*0.5)
#batch_sizes[1] = int(args.batch_size)
#batch_sizes[2] = int(args.batch_size*1.5)
#logging.info(batch_sizes)
# define network
model_waveform = GRU_WAVE_DECODER_DUALGRU_COMPACT_LPCSEG(
feat_dim=args.mcep_dim+args.excit_dim,
upsampling_factor=args.upsampling_factor,
hidden_units=args.hidden_units_wave,
hidden_units_2=args.hidden_units_wave_2,
kernel_size=args.kernel_size_wave,
dilation_size=args.dilation_size_wave,
seg=args.seg,
lpc=args.lpc,
causal_conv=args.causal_conv_wave,
right_size=args.right_size,
do_prob=args.do_prob)
logging.info(model_waveform)
criterion_ce = torch.nn.CrossEntropyLoss(reduction='none')
criterion_l1 = torch.nn.L1Loss(reduction='none')
# send to gpu
if torch.cuda.is_available():
model_waveform.cuda()
criterion_ce.cuda()
criterion_l1.cuda()
mean_stats = mean_stats.cuda()
scale_stats = scale_stats.cuda()
else:
logging.error("gpu is not available. please check the setting.")
sys.exit(1)
model_waveform.train()
model_waveform.scale_in.weight = torch.nn.Parameter(torch.unsqueeze(torch.diag(1.0/scale_stats.data),2))
model_waveform.scale_in.bias = torch.nn.Parameter(-(mean_stats.data/scale_stats.data))
for param in model_waveform.parameters():
param.requires_grad = True
for param in model_waveform.scale_in.parameters():
param.requires_grad = False
parameters = filter(lambda p: p.requires_grad, model_waveform.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000
logging.info('Trainable Parameters (waveform): %.3f million' % parameters)
module_list = list(model_waveform.conv.parameters())
#module_list += list(model_waveform.conv_s_c.parameters()) + list(model_waveform.conv_s_x.parameters())
module_list += list(model_waveform.conv_s_c.parameters()) + list(model_waveform.embed_wav.parameters())
module_list += list(model_waveform.gru.parameters()) + list(model_waveform.gru_2.parameters())
module_list += list(model_waveform.out.parameters())
optimizer = RAdam(module_list, lr=args.lr)
#optimizer = torch.optim.Adam(module_list, lr=args.lr)
# resume
if args.pretrained is not None:
checkpoint = torch.load(args.pretrained)
model_waveform.load_state_dict(checkpoint["model_waveform"])
epoch_idx = checkpoint["iterations"]
logging.info("pretrained from %d-iter checkpoint." % epoch_idx)
epoch_idx = 0
elif args.resume is not None:
checkpoint = torch.load(args.resume)
model_waveform.load_state_dict(checkpoint["model_waveform"])
optimizer.load_state_dict(checkpoint["optimizer"])
epoch_idx = checkpoint["iterations"]
logging.info("restored from %d-iter checkpoint." % epoch_idx)
else:
epoch_idx = 0
def zero_wav_pad(x): return padding(x, args.pad_len*args.upsampling_factor, value=0.0) # noqa: E704
def zero_feat_pad(x): return padding(x, args.pad_len, value=0.0) # noqa: E704
pad_wav_transform = transforms.Compose([zero_wav_pad])
pad_feat_transform = transforms.Compose([zero_feat_pad])
wav_transform = transforms.Compose([lambda x: encode_mu_law(x, args.n_quantize)])
# define generator training
if os.path.isdir(args.waveforms):
filenames = sorted(find_files(args.waveforms, "*.wav", use_dir_name=False))
wav_list = [args.waveforms + "/" + filename for filename in filenames]
elif os.path.isfile(args.waveforms):
wav_list = read_txt(args.waveforms)
else:
logging.error("--waveforms should be directory or list.")
sys.exit(1)
if os.path.isdir(args.feats):
feat_list = [args.feats + "/" + filename for filename in filenames]
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
assert len(wav_list) == len(feat_list)
logging.info("number of training data = %d." % len(feat_list))
dataset = FeatureDatasetNeuVoco(wav_list, feat_list, pad_wav_transform, pad_feat_transform, args.upsampling_factor,
args.string_path, wav_transform=wav_transform)
#args.string_path, wav_transform=wav_transform, with_excit=False)
#args.string_path, wav_transform=wav_transform, with_excit=True)
dataloader = DataLoader(dataset, batch_size=args.batch_size_utt, shuffle=True, num_workers=args.n_workers)
#generator = data_generator(dataloader, device, args.batch_size, args.upsampling_factor, limit_count=1, seg=args.seg, batch_sizes=batch_sizes)
#generator = data_generator(dataloader, device, args.batch_size, args.upsampling_factor, limit_count=None, seg=args.seg, batch_sizes=batch_sizes)
#generator = data_generator(dataloader, device, args.batch_size, args.upsampling_factor, limit_count=1, seg=args.seg)
generator = data_generator(dataloader, device, args.batch_size, args.upsampling_factor, limit_count=None, seg=args.seg)
# define generator evaluation
if os.path.isdir(args.waveforms_eval):
filenames = sorted(find_files(args.waveforms_eval, "*.wav", use_dir_name=False))
wav_list_eval = [args.waveforms + "/" + filename for filename in filenames]
elif os.path.isfile(args.waveforms_eval):
wav_list_eval = read_txt(args.waveforms_eval)
else:
logging.error("--waveforms_eval should be directory or list.")
sys.exit(1)
if os.path.isdir(args.feats_eval):
feat_list_eval = [args.feats_eval + "/" + filename for filename in filenames]
elif os.path.isfile(args.feats):
feat_list_eval = read_txt(args.feats_eval)
else:
logging.error("--feats_eval should be directory or list.")
sys.exit(1)
assert len(wav_list_eval) == len(feat_list_eval)
logging.info("number of evaluation data = %d." % len(feat_list_eval))
dataset_eval = FeatureDatasetNeuVoco(wav_list_eval, feat_list_eval, pad_wav_transform, pad_feat_transform, args.upsampling_factor,
args.string_path, wav_transform=wav_transform)
#args.string_path, wav_transform=wav_transform, with_excit=False)
#args.string_path, wav_transform=wav_transform, with_excit=True)
dataloader_eval = DataLoader(dataset_eval, batch_size=args.batch_size_utt_eval, shuffle=False, num_workers=args.n_workers)
##generator_eval = eval_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=1, seg=args.seg)
#generator_eval = eval_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=None, seg=args.seg)
#generator_eval = eval_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=1)
#generator_eval = eval_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=None)
#generator_eval = data_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=1)
generator_eval = data_generator(dataloader_eval, device, args.batch_size, args.upsampling_factor, limit_count=None)
writer = SummaryWriter(args.expdir)
total_train_loss = defaultdict(list)
total_eval_loss = defaultdict(list)
flag_conv_s_c = False
#conv_s_c_params = model_waveform.conv_s_c.weight.shape[0]*model_waveform.conv_s_c.weight.shape[1]
#densities_conv_s_c = [None]*args.n_stage
#density_conv_s_c = float(16384/conv_s_c_params)
#if conv_s_c_params > 16384:
# density_delta_conv_s_c = (1-density_conv_s_c)/args.n_stage
# logging.info(density_conv_s_c)
# logging.info(density_delta_conv_s_c)
# densities_conv_s_c[0] = 1-density_delta_conv_s_c
# for i in range(1,args.n_stage):
# if i < args.n_stage-1:
# densities_conv_s_c[i] = densities_conv_s_c[i-1]-density_delta_conv_s_c
# else:
# densities_conv_s_c[i] = density_conv_s_c
# logging.info(densities_conv_s_c)
# flag_conv_s_c = True
flag_out = False
out_params = model_waveform.out.out.weight.shape[0]*model_waveform.out.out.weight.shape[1]
densities_out = [None]*args.n_stage
density_out = float(8192/out_params)
if out_params > 8192:
density_delta_out = (1-density_out)/args.n_stage
logging.info(density_out)
logging.info(density_delta_out)
densities_out[0] = 1-density_delta_out
for i in range(1,args.n_stage):
if i < args.n_stage-1:
densities_out[i] = densities_out[i-1]-density_delta_out
else:
densities_out[i] = density_out
logging.info(densities_out)
flag_out = True
#sparsify = lpcnet.Sparsify(2000, 40000, 400, (0.05, 0.05, 0.2))
density_deltas_ = args.densities.split('-')
density_deltas = [None]*len(density_deltas_)
for i in range(len(density_deltas_)):
density_deltas[i] = (1-float(density_deltas_[i]))/args.n_stage
t_deltas = [None]*args.n_stage
t_starts = [None]*args.n_stage
t_ends = [None]*args.n_stage
densities = [None]*args.n_stage
t_delta = args.t_end - args.t_start + 1
#t_deltas[0] = round((1/(args.n_stage-1))*0.6*t_delta)
if args.n_stage > 3:
t_deltas[0] = round((1/2)*0.2*t_delta)
else:
t_deltas[0] = round(0.2*t_delta)
t_starts[0] = args.t_start
t_ends[0] = args.t_start + t_deltas[0] - 1
densities[0] = [None]*len(density_deltas)
for j in range(len(density_deltas)):
densities[0][j] = 1-density_deltas[j]
for i in range(1,args.n_stage):
if i < args.n_stage-1:
#t_deltas[i] = round((1/(args.n_stage-1))*0.6*t_delta)
if args.n_stage > 3:
if i < 2:
t_deltas[i] = round((1/2)*0.2*t_delta)
else:
if args.n_stage > 4:
t_deltas[i] = round((1/2)*0.3*t_delta)
else:
t_deltas[i] = round(0.3*t_delta)
else:
t_deltas[i] = round(0.3*t_delta)
else:
#t_deltas[i] = round(0.4*t_delta)
t_deltas[i] = round(0.5*t_delta)
t_starts[i] = t_ends[i-1] + 1
t_ends[i] = t_starts[i] + t_deltas[i] - 1
densities[i] = [None]*len(density_deltas)
if i < args.n_stage-1:
for j in range(len(density_deltas)):
densities[i][j] = densities[i-1][j]-density_deltas[j]
else:
for j in range(len(density_deltas)):
densities[i][j] = float(density_deltas_[j])
logging.info(t_delta)
logging.info(t_deltas)
logging.info(t_starts)
logging.info(t_ends)
logging.info(args.interval)
logging.info(densities)
idx_stage = 0
# train
total = 0
iter_count = 0
batch_x_i = [None]*model_waveform.seg
batch_x_output_i = [None]*model_waveform.seg
batch_loss_ce = [None]*model_waveform.seg
batch_loss_prc = [None]*model_waveform.seg
batch_loss_ce_sum_select_ = [None]*model_waveform.seg
batch_loss_prc_sum_select_ = [None]*model_waveform.seg
loss_ce = [None]*model_waveform.seg
loss_prc = [None]*model_waveform.seg
eval_loss_ce = [None]*model_waveform.seg
eval_loss_ce_std = [None]*model_waveform.seg
eval_loss_prc = [None]*model_waveform.seg
eval_loss_prc_std = [None]*model_waveform.seg
min_eval_loss_ce = [None]*model_waveform.seg
min_eval_loss_ce_std = [None]*model_waveform.seg
min_eval_loss_prc = [None]*model_waveform.seg
min_eval_loss_prc_std = [None]*model_waveform.seg
for i in range(model_waveform.seg):
loss_ce[i] = []
loss_prc[i] = []
prev_n_batch_utt = args.batch_size_utt
min_eval_loss_ce[-1] = 99999999.99
min_eval_loss_ce_std[-1] = 99999999.99
iter_idx = 0
min_idx = -1
#min_eval_loss_ce[0] = 2.199576
#min_eval_loss_ce_std[0] = 0.817393
#iter_idx = 69908
#min_idx = 6
while idx_stage < args.n_stage-1 and iter_idx + 1 >= t_starts[idx_stage+1]:
idx_stage += 1
logging.info(idx_stage)
if args.resume is not None:
np.random.set_state(checkpoint["numpy_random_state"])
torch.set_rng_state(checkpoint["torch_random_state"])
logging.info("==%d EPOCH==" % (epoch_idx+1))
logging.info("Training data")
while epoch_idx < args.epoch_count:
start = time.time()
batch_x, batch_feat, c_idx, utt_idx, featfile, x_bs, f_bs, x_ss, f_ss, n_batch_utt, \
del_index_utt, max_slen, slens_acc, idx_select, idx_select_full = next(generator)
if c_idx < 0: # summarize epoch
# save current epoch model
numpy_random_state = np.random.get_state()
torch_random_state = torch.get_rng_state()
# report current epoch
text_log = "(EPOCH:%d) average optimization loss =" % (epoch_idx + 1)
for i in range(model_waveform.seg_1,-1,-1):
text_log += " [%d] %.6f (+- %.6f) %.6f (+- %.6f) %% ;" % (i+1, \
np.mean(loss_ce[i]), np.std(loss_ce[i]), np.mean(loss_prc[i]), np.std(loss_prc[i]))
logging.info("%s; (%.3f min., %.3f sec / batch)" % (text_log, total / 60.0, total / iter_count))
logging.info("estimated time until max. epoch = {0.days:02}:{0.hours:02}:{0.minutes:02}:"\
"{0.seconds:02}".format(relativedelta(seconds=int((args.epoch_count - (epoch_idx + 1)) * total))))
# compute loss in evaluation data
total = 0
iter_count = 0
for i in range(model_waveform.seg):
loss_ce[i] = []
loss_prc[i] = []
model_waveform.eval()
for param in model_waveform.parameters():
param.requires_grad = False
logging.info("Evaluation data")
while True:
with torch.no_grad():
start = time.time()
batch_x, batch_feat, c_idx, utt_idx, featfile, x_bs, f_bs, x_ss, f_ss, n_batch_utt, \
del_index_utt, max_slen, slens_acc, idx_select, idx_select_full = next(generator_eval)
if c_idx < 0:
break
x_es = x_ss+x_bs
f_es = f_ss+f_bs
logging.info("%d %d %d %d %d" % (max_slen, x_ss, x_bs, f_ss, f_bs))
if max_slen >= x_es:
if model_waveform.lpc > 0:
# lpc from 1st group to segth group (index t-(seg-1) to t) --> index lpc: t-(seg-1)-n_lpc, seg-1+lpc = lpc_offset
if x_ss-model_waveform.lpc >= 0:
batch_x_lpc = batch_x[:,x_ss-model_waveform.lpc:x_es-1]
else:
batch_x_lpc = F.pad(batch_x[:,:x_es-1], (-(x_ss-model_waveform.lpc), 0), "constant", args.n_quantize // 2)
# seg+seg-1 -> generate seg group samples [1st,2nd,segth_samples] in seg, seg-1+seg = seg_offset
if x_ss-model_waveform.seg >= 0:
batch_x_prev = batch_x[:,x_ss-model_waveform.seg:x_es-model_waveform.seg]
else:
batch_x_prev = F.pad(batch_x[:,:x_es-model_waveform.seg], (-(x_ss-model_waveform.seg), 0), "constant", args.n_quantize // 2)
batch_x = batch_x[:,x_ss:x_es]
batch_feat = batch_feat[:,f_ss:f_es]
else:
if model_waveform.lpc > 0:
# lpc from 1st group to segth group (index t-(seg-1) to t) --> index lpc: t-(seg-1)-n_lpc, seg-1+lpc = lpc_offset
if x_ss-model_waveform.lpc >= 0:
batch_x_lpc = batch_x[:,x_ss-model_waveform.lpc:-1]
else:
batch_x_lpc = F.pad(batch_x[:,:-1], (-(x_ss-model_waveform.lpc), 0), "constant", args.n_quantize // 2)
# seg+seg-1 -> generate seg group samples [1st,2nd,segth_samples] in seg, seg-1+seg = seg_offset
if x_ss-model_waveform.seg_offset >= 0:
batch_x_prev = batch_x[:,x_ss-model_waveform.seg:-model_waveform.seg]
else:
batch_x_prev = F.pad(batch_x[:,:-model_waveform.seg], (-(x_ss-model_waveform.seg), 0), "constant", args.n_quantize // 2)
batch_x = batch_x[:,x_ss:]
batch_feat = batch_feat[:,f_ss:]
# prev ground-truth wave. for prediction calc. for each 1 shift segment
if model_waveform.lpc > 0:
# B x T --> B x T x K --> B x T_seg x K x seg --> B x T_seg x seg x K
x_lpc = batch_x_lpc.unfold(1, model_waveform.lpc, 1).unfold(1, model_waveform.seg, model_waveform.seg).permute(0,1,3,2)
# feedforward
if f_ss > 0:
if len(del_index_utt) > 0:
h_x = torch.FloatTensor(np.delete(h_x.cpu().data.numpy(), del_index_utt, axis=1)).to(device)
h_x_2 = torch.FloatTensor(np.delete(h_x_2.cpu().data.numpy(), del_index_utt, axis=1)).to(device)
if model_waveform.lpc > 0:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, x_lpc=x_lpc, h=h_x, h_2=h_x_2, shift1=False)
else:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, h=h_x, h_2=h_x_2, shift1=False)
else:
if model_waveform.lpc > 0:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, x_lpc=x_lpc, shift1=False)
else:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, shift1=False)
j = np.random.randint(0, batch_x_output.shape[0])
logging.info("%s" % (os.path.join(os.path.basename(os.path.dirname(featfile[j])),os.path.basename(featfile[j]))))
# handle short ending
if len(idx_select) > 0:
logging.info('len_idx_select: '+str(len(idx_select)))
batch_loss_ce_sum_select = 0
batch_loss_prc_sum_select = 0
for j in range(len(idx_select)):
k = idx_select[j]
slens_utt = slens_acc[k]
logging.info('%s %d' % (featfile[k], slens_utt))
batch_x_i_ = batch_x[k,:slens_utt]
batch_x_output_i_ = batch_x_output[k,:slens_utt]
# loss
batch_loss_ce_sum_select += torch.mean(criterion_ce(batch_x_output_i_, batch_x_i_))
batch_loss_prc_sum_select += torch.mean(torch.sum(100*criterion_l1(F.softmax(batch_x_output_i_, dim=-1), F.one_hot(batch_x_i_, num_classes=args.n_quantize).float()), -1))
batch_loss_ce_sum_select /= len(idx_select)
batch_loss_prc_sum_select /= len(idx_select)
total_eval_loss["eval/loss_ce-%d"%(model_waveform.seg)].append(batch_loss_ce_sum_select.item())
loss_ce[-1].append(batch_loss_ce_sum_select.item())
total_eval_loss["eval/loss_prc-%d"%(model_waveform.seg)].append(batch_loss_prc_sum_select.item())
loss_prc[-1].append(batch_loss_prc_sum_select.item())
if len(idx_select_full) > 0:
logging.info('len_idx_select_full: '+str(len(idx_select_full)))
batch_x = torch.index_select(batch_x, 0, idx_select_full)
batch_x_output = torch.index_select(batch_x_output, 0, idx_select_full)
else:
text_log = "batch loss_select %.3f %.3f " % (batch_loss_ce_sum_select, batch_loss_prc_sum_select)
logging.info("%s (%.3f sec)" % (text_log, time.time() - start))
iter_count += 1
total += time.time() - start
continue
# loss
batch_loss_ce[-1] = torch.mean(criterion_ce(batch_x_output.reshape(-1, args.n_quantize), batch_x.reshape(-1))).item()
total_eval_loss["eval/loss_ce-%d"%(model_waveform.seg)].append(batch_loss_ce[-1])
loss_ce[-1].append(batch_loss_ce[-1])
batch_loss_prc[-1] = torch.mean(torch.sum(100*criterion_l1(F.softmax(batch_x_output, dim=-1), F.one_hot(batch_x, num_classes=args.n_quantize).float()), -1)).item()
total_eval_loss["eval/loss_prc-%d"%(model_waveform.seg)].append(batch_loss_prc[-1])
loss_prc[-1].append(batch_loss_prc[-1])
text_log = "batch eval loss [%d] %d %d %d %d %d :" % (c_idx+1, max_slen, x_ss, x_bs, f_ss, f_bs)
text_log += " %.3f %.3f %% ;" % (batch_loss_ce[-1], batch_loss_prc[-1])
logging.info("%s; (%.3f sec)" % (text_log, time.time() - start))
iter_count += 1
total += time.time() - start
logging.info('sme')
for key in total_eval_loss.keys():
total_eval_loss[key] = np.mean(total_eval_loss[key])
logging.info(f"(Steps: {iter_idx}) {key} = {total_eval_loss[key]:.4f}.")
write_to_tensorboard(writer, iter_idx, total_eval_loss)
total_eval_loss = defaultdict(list)
eval_loss_ce[-1] = np.mean(loss_ce[-1])
eval_loss_ce_std[-1] = np.std(loss_ce[-1])
eval_loss_prc[-1] = np.mean(loss_prc[-1])
eval_loss_prc_std[-1] = np.std(loss_prc[-1])
text_log = "(EPOCH:%d) average evaluation loss =" % (epoch_idx + 1)
text_log += " %.6f (+- %.6f) %.6f (+- %.6f) %% ;" % (eval_loss_ce[-1], eval_loss_ce_std[-1], \
eval_loss_prc[-1], eval_loss_prc_std[-1])
logging.info("%s; (%.3f min., %.3f sec / batch)" % (text_log, total / 60.0, total / iter_count))
if (eval_loss_ce[-1]+eval_loss_ce_std[-1]) <= (min_eval_loss_ce[-1]+min_eval_loss_ce_std[-1]) \
or (eval_loss_ce[-1] <= min_eval_loss_ce[-1]):
min_eval_loss_ce[-1] = eval_loss_ce[-1]
min_eval_loss_ce_std[-1] = eval_loss_ce_std[-1]
min_eval_loss_prc[-1] = eval_loss_prc[-1]
min_eval_loss_prc_std[-1] = eval_loss_prc_std[-1]
min_idx = epoch_idx
change_min_flag = True
if change_min_flag:
text_log = "min_eval_loss ="
text_log += " %.6f (+- %.6f) %.6f (+- %.6f) %% ;" % (min_eval_loss_ce[-1], min_eval_loss_ce_std[-1], \
min_eval_loss_prc[-1], min_eval_loss_prc_std[-1])
logging.info("%s; min_idx=%d" % (text_log, min_idx + 1))
#if ((epoch_idx + 1) % args.save_interval_epoch == 0) or (epoch_min_flag):
# logging.info('save epoch:%d' % (epoch_idx+1))
# save_checkpoint(args.expdir, model_waveform, optimizer, numpy_random_state, torch_random_state, epoch_idx + 1)
logging.info('save epoch:%d' % (epoch_idx+1))
save_checkpoint(args.expdir, model_waveform, optimizer, numpy_random_state, torch_random_state, epoch_idx + 1)
prev_n_batch_utt = args.batch_size_utt
total = 0
iter_count = 0
for i in range(model_waveform.seg):
loss_ce[i] = []
loss_prc[i] = []
epoch_idx += 1
np.random.set_state(numpy_random_state)
torch.set_rng_state(torch_random_state)
model_waveform.train()
for param in model_waveform.parameters():
param.requires_grad = True
for param in model_waveform.scale_in.parameters():
param.requires_grad = False
# start next epoch
if epoch_idx < args.epoch_count:
start = time.time()
logging.info("==%d EPOCH==" % (epoch_idx+1))
logging.info("Training data")
batch_x, batch_feat, c_idx, utt_idx, featfile, x_bs, f_bs, x_ss, f_ss, n_batch_utt, \
del_index_utt, max_slen, slens_acc, idx_select, idx_select_full = next(generator)
# feedforward and backpropagate current batch
if epoch_idx < args.epoch_count:
logging.info("%d iteration [%d]" % (iter_idx+1, epoch_idx+1))
x_es = x_ss+x_bs
f_es = f_ss+f_bs
logging.info("%d %d %d %d %d" % (max_slen, x_ss, x_bs, f_ss, f_bs))
if max_slen >= x_es:
if model_waveform.lpc > 0:
# lpc from 1st group to segth group (index t-(seg-1) to t) --> index lpc: t-(seg-1)-n_lpc, seg-1+lpc = lpc_offset
if x_ss-model_waveform.lpc_offset >= 0:
batch_x_lpc = batch_x[:,x_ss-model_waveform.lpc_offset:x_es-1]
else:
batch_x_lpc = F.pad(batch_x[:,:x_es-1], (-(x_ss-model_waveform.lpc_offset), 0), "constant", args.n_quantize // 2)
# seg+seg-1 -> generate seg group samples [1st,2nd,segth_samples] in seg, seg-1+seg = seg_offset
if x_ss-model_waveform.seg_offset >= 0:
batch_x_prev = batch_x[:,x_ss-model_waveform.seg_offset:x_es-model_waveform.seg]
else:
batch_x_prev = F.pad(batch_x[:,:x_es-model_waveform.seg], (-(x_ss-model_waveform.seg_offset), 0), "constant", args.n_quantize // 2)
batch_x = batch_x[:,x_ss:x_es]
if f_ss > 0:
batch_feat = batch_feat[:,f_ss-1:f_es]
else:
batch_feat = batch_feat[:,:f_es]
else:
if model_waveform.lpc > 0:
# lpc from 1st group to segth group (index t-(seg-1) to t) --> index lpc: t-(seg-1)-n_lpc, seg-1+lpc = lpc_offset
if x_ss-model_waveform.lpc_offset >= 0:
batch_x_lpc = batch_x[:,x_ss-model_waveform.lpc_offset:-1]
else:
batch_x_lpc = F.pad(batch_x[:,:-1], (-(x_ss-model_waveform.lpc_offset), 0), "constant", args.n_quantize // 2)
# seg+seg-1 -> generate seg group samples [1st,2nd,segth_samples] in seg, seg-1+seg = seg_offset
if x_ss-model_waveform.seg_offset >= 0:
batch_x_prev = batch_x[:,x_ss-model_waveform.seg_offset:-model_waveform.seg]
else:
batch_x_prev = F.pad(batch_x[:,:-model_waveform.seg], (-(x_ss-model_waveform.seg_offset), 0), "constant", args.n_quantize // 2)
batch_x = batch_x[:,x_ss:]
if f_ss > 0:
batch_feat = batch_feat[:,f_ss-1:]
# prev ground-truth wave. for prediction calc. for each 1 shift segment
if model_waveform.lpc > 0:
# B x T --> B x T x K --> B x T_seg x K x seg --> B x T_seg x seg x K
x_lpc = batch_x_lpc[:,:-model_waveform.seg_1].unfold(1, model_waveform.lpc, 1).unfold(1, model_waveform.seg, model_waveform.seg).permute(0,1,3,2)
for i in range(1,model_waveform.seg):
if i < model_waveform.seg_1:
x_lpc = torch.cat((x_lpc, batch_x_lpc[:,i:-model_waveform.seg_1+i].unfold(1, model_waveform.lpc, 1).unfold(1, model_waveform.seg, model_waveform.seg).permute(0,1,3,2)), 0)
else:
x_lpc = torch.cat((x_lpc, batch_x_lpc[:,i:].unfold(1, model_waveform.lpc, 1).unfold(1, model_waveform.seg, model_waveform.seg).permute(0,1,3,2)), 0)
# feedforward
if f_ss > 0:
if len(del_index_utt) > 0:
idx_batch_seg_s = 0
idx_batch_seg_e = prev_n_batch_utt
# handle hidden state per group of batch (because of 1 shift even though segment output)
for i in range(model_waveform.seg):
if i > 0:
h_x_ = torch.cat((h_x_, torch.FloatTensor(np.delete(h_x[:,idx_batch_seg_s:idx_batch_seg_e].cpu().data.numpy(), del_index_utt, axis=1)).to(device)), 1)
h_x_2_ = torch.cat((h_x_2_, torch.FloatTensor(np.delete(h_x_2[:,idx_batch_seg_s:idx_batch_seg_e].cpu().data.numpy(), del_index_utt, axis=1)).to(device)), 1)
else:
h_x_ = torch.FloatTensor(np.delete(h_x[:,idx_batch_seg_s:idx_batch_seg_e].cpu().data.numpy(), del_index_utt, axis=1)).to(device)
h_x_2_ = torch.FloatTensor(np.delete(h_x_2[:,idx_batch_seg_s:idx_batch_seg_e].cpu().data.numpy(), del_index_utt, axis=1)).to(device)
idx_batch_seg_s = idx_batch_seg_e
idx_batch_seg_e += prev_n_batch_utt
h_x = h_x_
h_x_2 = h_x_2_
if model_waveform.lpc > 0:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, h=h_x, h_2=h_x_2, x_lpc=x_lpc, do=True)
else:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, h=h_x, h_2=h_x_2, do=True)
else:
if model_waveform.lpc > 0:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, x_lpc=x_lpc, do=True, first=True)
else:
batch_x_output, h_x, h_x_2 = model_waveform(batch_feat, batch_x_prev, do=True, first=True)
prev_n_batch_utt = n_batch_utt
batch_loss = 0
# handle short ending
if len(idx_select) > 0:
logging.info('len_idx_select: '+str(len(idx_select)))
batch_loss_ce_sum_select = 0
for i in range(model_waveform.seg):
batch_loss_ce_sum_select_[i] = 0
batch_loss_prc_sum_select_[i] = 0
for j in range(len(idx_select)):
k = idx_select[j]
slens_utt = slens_acc[k]
logging.info('%s %d' % (featfile[k], slens_utt))
for i in range(model_waveform.seg): #from t-(seg-1) to t for 1 shift segment grouping
batch_x_i_ = batch_x[k,:slens_utt-model_waveform.seg_1+i] # ground truth not include seg_offset at first
# discard the leading index for t-(seg-1) due to shift 1 segment output
batch_x_output_i_ = batch_x_output[k,model_waveform.seg_1-i:slens_utt]
# loss
batch_loss_ce_sum_select__ = torch.mean(criterion_ce(batch_x_output_i_, batch_x_i_))
batch_loss_ce_sum_select += batch_loss_ce_sum_select__
batch_loss_ce_sum_select_[i] += batch_loss_ce_sum_select__
batch_loss_prc_sum_select_[i] += torch.mean(torch.sum(100*criterion_l1(F.softmax(batch_x_output_i_, dim=-1), F.one_hot(batch_x_i_, num_classes=args.n_quantize).float()), -1))
batch_loss += batch_loss_ce_sum_select
for i in range(model_waveform.seg):
batch_loss_ce_sum_select_[i] /= len(idx_select)
total_train_loss["train/loss_ce-%d"%(i+1)].append(batch_loss_ce_sum_select_[i].item())
loss_ce[i].append(batch_loss_ce_sum_select_[i].item())
batch_loss_prc_sum_select_[i] /= len(idx_select)
total_train_loss["train/loss_prc-%d"%(i+1)].append(batch_loss_prc_sum_select_[i].item())
loss_prc[i].append(batch_loss_prc_sum_select_[i].item())
if len(idx_select_full) > 0:
logging.info('len_idx_select_full: '+str(len(idx_select_full)))
batch_x = torch.index_select(batch_x, 0, idx_select_full)
idx_batch_seg_s = 0
idx_batch_seg_e = n_batch_utt
for i in range(model_waveform.seg):
if i > 0:
batch_x_output_ = torch.cat((batch_x_output_, torch.index_select(batch_x_output[idx_batch_seg_s:idx_batch_seg_e], 0, idx_select_full)), 0)
else:
batch_x_output_ = torch.index_select(batch_x_output[idx_batch_seg_s:idx_batch_seg_e], 0, idx_select_full)
idx_batch_seg_s = idx_batch_seg_e
idx_batch_seg_e += n_batch_utt
batch_x_output = batch_x_output_
else:
optimizer.zero_grad()
batch_loss.backward()
flag = False
for name, param in model_waveform.named_parameters():
if param.requires_grad:
grad_norm = param.grad.norm()
if torch.isnan(grad_norm) or torch.isinf(grad_norm):
flag = True
if flag:
logging.info("explode grad")
optimizer.zero_grad()
continue
torch.nn.utils.clip_grad_norm_(model_waveform.parameters(), 10)
optimizer.step()
with torch.no_grad():
if idx_stage < args.n_stage-1 and iter_idx + 1 == t_starts[idx_stage+1]:
idx_stage += 1
if not flag_conv_s_c:
if not flag_out:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage])
else:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_out=densities_out[idx_stage], density_out_p=densities_out[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_out=densities_out[idx_stage])
else:
if not flag_out:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_conv_s_c_p=densities_conv_s_c[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_conv_s_c=densities_conv_s_c[idx_stage])
else:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_conv_s_c_p=densities_conv_s_c[idx_stage-1], \
density_out=densities_out[idx_stage], density_out_p=densities_out[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_out=densities_out[idx_stage])
text_log = "batch loss_select %lf " % (batch_loss.item())
logging.info("%s (%.3f sec)" % (text_log, time.time() - start))
iter_idx += 1
if iter_idx % args.save_interval_iter == 0:
logging.info('save iter:%d' % (iter_idx))
save_checkpoint(args.expdir, model_waveform, optimizer, np.random.get_state(), \
torch.get_rng_state(), iter_idx)
iter_count += 1
if iter_idx % args.log_interval_steps == 0:
logging.info('smt')
for key in total_train_loss.keys():
total_train_loss[key] = np.mean(total_train_loss[key])
logging.info(f"(Steps: {iter_idx}) {key} = {total_train_loss[key]:.4f}.")
write_to_tensorboard(writer, iter_idx, total_train_loss)
total_train_loss = defaultdict(list)
total += time.time() - start
continue
# batch segment handling
if len(idx_select) > 0:
n_batch_utt = len(idx_select_full)
idx_batch_seg_s = 0
idx_batch_seg_e = n_batch_utt
for i in range(model_waveform.seg): #from t-(seg-1) to t for 1 shift segment grouping
if i < model_waveform.seg_1:
batch_x_i[i] = batch_x[:,:-model_waveform.seg_1+i] # ground truth not include seg_offset at first
else:
batch_x_i[i] = batch_x
# discard the leading index for t-(seg-1) due to shift 1 segment output
batch_x_output_i[i] = batch_x_output[idx_batch_seg_s:idx_batch_seg_e,model_waveform.seg_1-i:]
idx_batch_seg_s = idx_batch_seg_e
idx_batch_seg_e += n_batch_utt
# loss
batch_loss_ce_sum = 0
for i in range(model_waveform.seg_1,-1,-1):
batch_loss_ce_ = torch.mean(criterion_ce(batch_x_output_i[i].reshape(-1, args.n_quantize), batch_x_i[i].reshape(-1)).reshape(batch_x_output_i[i].shape[0], -1), -1)
batch_loss_ce[i] = batch_loss_ce_.mean().item()
total_train_loss["train/loss_ce-%d"%(i+1)].append(batch_loss_ce[i])
loss_ce[i].append(batch_loss_ce[i])
batch_loss_ce_sum += batch_loss_ce_.sum()
batch_loss_prc[i] = torch.mean(torch.sum(100*criterion_l1(F.softmax(batch_x_output_i[i], dim=-1), F.one_hot(batch_x_i[i], num_classes=args.n_quantize).float()), -1)).item()
total_train_loss["train/loss_prc-%d"%(i+1)].append(batch_loss_prc[i])
loss_prc[i].append(batch_loss_prc[i])
if i == model_waveform.seg_1:
i = np.random.randint(0, batch_x_output_i[i].shape[0])
logging.info("%s" % (os.path.join(os.path.basename(os.path.dirname(featfile[i])),os.path.basename(featfile[i]))))
batch_loss += batch_loss_ce_sum
optimizer.zero_grad()
batch_loss.backward()
flag = False
for name, param in model_waveform.named_parameters():
if param.requires_grad:
grad_norm = param.grad.norm()
if torch.isnan(grad_norm) or torch.isinf(grad_norm):
flag = True
if flag:
logging.info("explode grad")
optimizer.zero_grad()
continue
torch.nn.utils.clip_grad_norm_(model_waveform.parameters(), 10)
optimizer.step()
with torch.no_grad():
if idx_stage < args.n_stage-1 and iter_idx + 1 == t_starts[idx_stage+1]:
idx_stage += 1
if not flag_conv_s_c:
if not flag_out:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage])
else:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_out=densities_out[idx_stage], density_out_p=densities_out[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_out=densities_out[idx_stage])
else:
if not flag_out:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_conv_s_c_p=densities_conv_s_c[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_conv_s_c=densities_conv_s_c[idx_stage])
else:
if idx_stage > 0:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], densities_p=densities[idx_stage-1], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_conv_s_c_p=densities_conv_s_c[idx_stage-1], \
density_out=densities_out[idx_stage], density_out_p=densities_out[idx_stage-1])
else:
sparsify(model_waveform, iter_idx + 1, t_starts[idx_stage], t_ends[idx_stage], args.interval, densities[idx_stage], \
density_conv_s_c=densities_conv_s_c[idx_stage], density_out=densities_out[idx_stage])
text_log = "batch loss [%d] %d %d %d %d %d :" % (c_idx+1, max_slen, x_ss, x_bs, f_ss, f_bs)
for i in range(model_waveform.seg_1,-1,-1):
text_log += " [%d] %.3f %.3f %% ;" % (i+1, batch_loss_ce[i], batch_loss_prc[i])
logging.info("%s; (%.3f sec)" % (text_log, time.time() - start))
iter_idx += 1
if iter_idx % args.save_interval_iter == 0:
logging.info('save iter:%d' % (iter_idx))
save_checkpoint(args.expdir, model_waveform, optimizer, np.random.get_state(), torch.get_rng_state(), iter_idx)
iter_count += 1
if iter_idx % args.log_interval_steps == 0:
logging.info('smt')
for key in total_train_loss.keys():
total_train_loss[key] = np.mean(total_train_loss[key])
logging.info(f"(Steps: {iter_idx}) {key} = {total_train_loss[key]:.4f}.")
write_to_tensorboard(writer, iter_idx, total_train_loss)
total_train_loss = defaultdict(list)
total += time.time() - start
# save final model
model_waveform.cpu()
torch.save({"model_waveform": model_waveform.state_dict()}, args.expdir + "/checkpoint-final.pkl")
logging.info("final checkpoint created.")
if __name__ == "__main__":
main()
| 54.5464
| 228
| 0.572415
|
15023a4765a5f5697f5bb6a63ada8e3acba50482
| 236
|
py
|
Python
|
abing/backend/abing/crud/__init__.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 5
|
2020-12-04T14:15:26.000Z
|
2020-12-30T09:11:09.000Z
|
abing/backend/abing/crud/__init__.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 8
|
2020-12-20T16:33:30.000Z
|
2021-01-06T01:56:55.000Z
|
abing/backend/abing/crud/__init__.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 1
|
2021-01-06T15:25:19.000Z
|
2021-01-06T15:25:19.000Z
|
from .crud_user import user
from .crud_experiment import experiment
from .crud_arm import arm
from .crud_feature import feature
from .crud_allocation import allocation
from .crud_event import event
from .crud_event_log import event_log
| 29.5
| 39
| 0.851695
|
6fa2cb92217d32b16a38dd87411f67b02acdf195
| 2,840
|
py
|
Python
|
MODULES/Discovery_RemoteSystemDiscovery_GetDomainIPAddress.py
|
hellorubbish/viperpython
|
42d9ea572fb914059039656b176daca7d7fd5a77
|
[
"BSD-3-Clause"
] | 1
|
2022-01-07T01:24:32.000Z
|
2022-01-07T01:24:32.000Z
|
MODULES/Discovery_RemoteSystemDiscovery_GetDomainIPAddress.py
|
hellorubbish/viperpython
|
42d9ea572fb914059039656b176daca7d7fd5a77
|
[
"BSD-3-Clause"
] | null | null | null |
MODULES/Discovery_RemoteSystemDiscovery_GetDomainIPAddress.py
|
hellorubbish/viperpython
|
42d9ea572fb914059039656b176daca7d7fd5a77
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# @File : SimplePostPowershellModule.py
# @Date : 2019/1/12
# @Desc :
from Lib.ModuleAPI import *
class PostModule(PostMSFPowershellFunctionModule):
NAME = "获取域主机的IP信息"
DESC = "默认收集所控主机的IP地址.\n如果需要收集域内其他主机,如域控或其他域用户ip,请输入主机名作为参数"
MODULETYPE = TAG2CH.Discovery
PLATFORM = ["Windows"] # 平台
PERMISSIONS = ["Administrator", "SYSTEM", ] # 所需权限
ATTCK = ["T1018"] # ATTCK向量
README = ["https://www.yuque.com/funnywolfdoc/viperdoc/hk22tf"]
REFERENCES = ["https://attack.mitre.org/techniques/T1018/"]
AUTHOR = "Viper"
OPTIONS = register_options([
OptionStr(name='ComputerName', name_tag="主机名", desc="需要查询的主机名"),
OptionBool(name='AllComputer', name_tag="所有主机", desc="查询域内所有主机的IP地址", default=False),
])
def __init__(self, sessionid, hid, custom_param):
super().__init__(sessionid, hid, custom_param)
self.set_script("PowerView_dev.ps1") # 设置目标机执行的脚本文件
def check(self):
"""执行前的检查函数"""
session = Session(self._sessionid)
if session.is_windows is not True:
return False, "此模块只支持Windows的Meterpreter"
all_computer = self.param('AllComputer')
computerName = self.param('ComputerName')
if all_computer == True:
if session.is_in_domain:
execute_string = "Get-DomainComputer | select name | Resolve-IPAddress | ConvertTo-JSON -maxDepth 2"
else:
return False, "获取域内其他主机IP地址时,此Session必须在域中"
else:
if computerName is not None:
execute_string = "Resolve-IPAddress -ComputerName {} | ConvertTo-JSON -maxDepth 2".format(computerName)
else:
execute_string = "Resolve-IPAddress|ConvertTo-JSON -maxDepth 2"
self.set_execute_string(execute_string)
return True, None
def callback(self, status, message, data):
if status:
powershell_json_output = self.deal_powershell_json_result(data)
if powershell_json_output is not None:
if isinstance(powershell_json_output, list):
for one in powershell_json_output:
ouputstr = "主机名: {} IP地址:{}".format(one.get('ComputerName'), one.get('IPAddress'))
self.log_good(ouputstr)
elif isinstance(powershell_json_output, dict):
ouputstr = "主机名: {} IP地址:{}".format(powershell_json_output.get('ComputerName'),
powershell_json_output.get('IPAddress'))
self.log_good(ouputstr)
else:
self.log_error("脚本无有效输出")
else:
self.log_error("脚本无有效输出")
else:
self.log_error("模块执行失败")
self.log_error(message)
| 39.444444
| 119
| 0.601056
|
07e85f03a301e701b835b0e0ef561fac76ce3cca
| 126
|
py
|
Python
|
024-raw-sql/student/admin.py
|
karuvally/Django-ORM-Mastery-DJ003
|
5792d717185b231449d41bd4ef82d6b4367d4722
|
[
"MIT"
] | 33
|
2021-06-08T21:49:24.000Z
|
2022-03-06T22:31:59.000Z
|
024-raw-sql/student/admin.py
|
WilliamOtieno/Django-ORM-Mastery-DJ003
|
0eca2d2408bfc1112b7092fbdce1c5f188a428d3
|
[
"MIT"
] | null | null | null |
024-raw-sql/student/admin.py
|
WilliamOtieno/Django-ORM-Mastery-DJ003
|
0eca2d2408bfc1112b7092fbdce1c5f188a428d3
|
[
"MIT"
] | 33
|
2021-06-09T12:43:17.000Z
|
2022-03-29T08:16:12.000Z
|
from django.contrib import admin
from . import models
admin.site.register(models.Student)
admin.site.register(models.Teacher)
| 25.2
| 35
| 0.825397
|
fbfc60a4538ffde2e57e490294f23492856c7edf
| 2,252
|
py
|
Python
|
LDAP/scripts/modify_add_users_to_group.py
|
IdahoLabCuttingBoard/LinuxSA
|
a4475623e34f40a47c11d87a4c642fe59810707e
|
[
"MIT"
] | null | null | null |
LDAP/scripts/modify_add_users_to_group.py
|
IdahoLabCuttingBoard/LinuxSA
|
a4475623e34f40a47c11d87a4c642fe59810707e
|
[
"MIT"
] | null | null | null |
LDAP/scripts/modify_add_users_to_group.py
|
IdahoLabCuttingBoard/LinuxSA
|
a4475623e34f40a47c11d87a4c642fe59810707e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 Battelle Energy Alliance, LLC
import subprocess
import getpass
import argparse
import sys
import re
import textwrap
from argparse import RawTextHelpFormatter
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../lib'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../SSH/lib'))
import myldap
import myssh
### Arguments ######################################################################################################################
parser = argparse.ArgumentParser(
description='Add users to existing group',
epilog=textwrap.dedent('''
Examples:
%(prog)s -g GROUP -u user1 user2 user3
'''),
formatter_class=RawTextHelpFormatter
)
parser.add_argument('-g', '--group', help="Group that users will be added to")
parser.add_argument('-u', '--users', nargs='+', help="Add space separated list for each user: -u USERNAME user1 user2")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if not (args.group):
parser.error('Must supply a group to which users will be added with -g or --group')
if not (args.users):
parser.error('Must supply a list of space separated new users for the specified group with -u or --users')
##########################################################################################################################################
### Pass #####################################################################################
gen_file = "/path/to//gen_pass.pyc"
base_pass = getpass.getpass('Please Enter Password:')
ldap_pass = subprocess.check_output(['python', gen_file, base_pass, 'SERVER']).rstrip()
ldap_obj = myldap.MyLDAP()
conn = ldap_obj.manager_login(provided_passwd=ldap_pass)
group = args.group
users = args.users
group_entries = ldap_obj.group_search(conn, group, exact=1)
if len(group_entries) !=1 :
sys.exit("\n\nCould not find exactly one group with the name of {}!\n".format(group))
group_entry_obj = group_entries[0]
dn = group_entry_obj.dn
print(dn)
for user in users:
ldap_obj.add_to_ldap_list(conn, dn, user, user)
| 32.171429
| 139
| 0.585702
|
afb75e9fe19f963cffee9c7a11e01cf4345281da
| 1,148
|
py
|
Python
|
tests/test_download.py
|
rickh94/airtable_local_backup
|
69dfc1aef30cd92a3f8e0ddc0cae8a9ba5122243
|
[
"MIT"
] | 29
|
2017-10-23T07:57:52.000Z
|
2021-11-10T01:39:24.000Z
|
tests/test_download.py
|
rickh94/airtable_local_backup
|
69dfc1aef30cd92a3f8e0ddc0cae8a9ba5122243
|
[
"MIT"
] | 2
|
2018-10-18T15:36:53.000Z
|
2019-11-22T07:28:19.000Z
|
tests/test_download.py
|
rickh94/airtable_local_backup
|
69dfc1aef30cd92a3f8e0ddc0cae8a9ba5122243
|
[
"MIT"
] | 8
|
2018-05-23T12:01:38.000Z
|
2022-03-02T17:41:16.000Z
|
from airtable import Airtable
import requests
import pytest
from airtable_local_backup import download
def rettrue(*args):
return True
def test_download_table(lots_of_fields_raw, lots_of_fields_hashes,
monkeypatch, filedata, lots_of_fields_correct):
def get_attach_patched(url):
class FakeDownload():
def __init__(self, data):
self.content = data.encode('utf-8')
return FakeDownload(filedata[url])
def get_table_data(*args):
return lots_of_fields_raw
monkeypatch.setattr(Airtable, 'validate_session', rettrue)
monkeypatch.setattr(Airtable, 'get_all', get_table_data)
monkeypatch.setattr(requests, 'get', get_attach_patched)
monkeypatch.setenv('AIRTABLE_API_KEY', '')
table = download.DownloadTable(base_key='app12345', api_key='key12345',
table_name='lots of fields')
for item in table.download():
assert item in lots_of_fields_correct
filename = item['Attachments'][0]['filename']
assert item['Attachments'][0]['md5hash'] ==\
lots_of_fields_hashes[filename]
| 34.787879
| 75
| 0.676829
|
313f6ccb08593f0843b98f80747e2eb56383770b
| 1,704
|
py
|
Python
|
main.py
|
ilyamordasov/HAP-python
|
698eb612c35b5672c4aab9d7896093924cbd358c
|
[
"Apache-2.0"
] | 1
|
2018-09-23T20:44:46.000Z
|
2018-09-23T20:44:46.000Z
|
main.py
|
ilyamordasov/HAP-python
|
698eb612c35b5672c4aab9d7896093924cbd358c
|
[
"Apache-2.0"
] | 1
|
2019-10-02T11:12:13.000Z
|
2019-10-02T11:12:13.000Z
|
main.py
|
ilyamordasov/HAP-python
|
698eb612c35b5672c4aab9d7896093924cbd358c
|
[
"Apache-2.0"
] | null | null | null |
"""An example of how to setup and start an Accessory.
This is:
1. Create the Accessory object you want.
2. Add it to an AccessoryDriver, which will advertise it on the local network,
setup a server to answer client queries, etc.
"""
import logging
import signal
from pyhap.accessory import Bridge
from pyhap.accessory_driver import AccessoryDriver
import pyhap.loader as loader
# The below package can be found in the HAP-python github repo under accessories/
from accessories.TemperatureSensor import TemperatureSensor
from accessories.BMP180 import BMP180
from accessories.G201S import G201S
logging.basicConfig(level=logging.INFO)
def get_bridge(driver):
"""Call this method to get a Bridge instead of a standalone accessory."""
bridge = Bridge(driver, 'Bridge')
# temp_sensor = TemperatureSensor(driver, 'CurrentTemperature')
# bmp180_sensor = BMP180(driver, 'CurrentPressure')
g201s_sensor = G201S(driver, 'SmartKettle')
# bridge.add_accessory(temp_sensor)
# bridge.add_accessory(bmp180_sensor)
bridge.add_accessory(g201s_sensor)
return bridge
def get_accessory(driver):
"""Call this method to get a standalone Accessory."""
return TemperatureSensor(driver, 'MyTempSensor')
# Start the accessory on port 51826
driver = AccessoryDriver(port=51826)
# Change `get_accessory` to `get_bridge` if you want to run a Bridge.
# driver.add_accessory(accessory=get_accessory(driver))
driver.add_accessory(accessory=get_bridge(driver))
# We want SIGTERM (kill) to be handled by the driver itself,
# so that it can gracefully stop the accessory, server and advertising.
signal.signal(signal.SIGTERM, driver.signal_handler)
# Start it!
driver.start()
| 31.555556
| 81
| 0.772887
|
161313e2ac15da6a06d44695b818bca255260fbe
| 17,118
|
py
|
Python
|
peering/tables.py
|
2bithacker/peering-manager
|
5953c4b1f2cff2a370b68d418a98c5c9e3037de8
|
[
"Apache-2.0"
] | null | null | null |
peering/tables.py
|
2bithacker/peering-manager
|
5953c4b1f2cff2a370b68d418a98c5c9e3037de8
|
[
"Apache-2.0"
] | 1
|
2021-11-11T22:08:22.000Z
|
2021-11-11T22:08:22.000Z
|
peering/tables.py
|
2bithacker/peering-manager
|
5953c4b1f2cff2a370b68d418a98c5c9e3037de8
|
[
"Apache-2.0"
] | null | null | null |
import django_tables2 as tables
from django.utils.safestring import mark_safe
from net.models import Connection
from utils.tables import (
BaseTable,
BooleanColumn,
ButtonsColumn,
SelectColumn,
TagColumn,
)
from .models import (
AutonomousSystem,
BGPGroup,
Community,
Configuration,
DirectPeeringSession,
Email,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
BGP_RELATIONSHIP = "{{ record.relationship.get_html }}"
COMMUNITY_TYPE = "{{ record.get_type_html }}"
ROUTING_POLICY_TYPE = "{{ record.get_type_html }}"
class BGPSessionStateColumn(tables.TemplateColumn):
def __init__(self, *args, **kwargs):
default = kwargs.pop("default", "")
verbose_name = kwargs.pop("verbose_name", "State")
template_code = kwargs.pop("template_code", "{{ record.get_bgp_state_html }}")
super().__init__(
*args,
default=default,
verbose_name=verbose_name,
template_code=template_code,
**kwargs
)
class RoutingPolicyColumn(tables.ManyToManyColumn):
def __init__(self, *args, **kwargs):
super().__init__(
*args,
default=mark_safe('<span class="text-muted">—</span>'),
separator=" ",
transform=lambda p: p.get_type_html(display_name=True),
**kwargs
)
class AutonomousSystemTable(BaseTable):
pk = SelectColumn()
asn = tables.Column(verbose_name="ASN")
name = tables.Column(linkify=True)
irr_as_set = tables.Column(verbose_name="IRR AS-SET", orderable=False)
ipv6_max_prefixes = tables.Column(verbose_name="IPv6 Max Prefixes")
ipv4_max_prefixes = tables.Column(verbose_name="IPv4 Max Prefixes")
import_routing_policies = RoutingPolicyColumn(verbose_name="Import Policies")
export_routing_policies = RoutingPolicyColumn(verbose_name="Export Policies")
directpeeringsession_count = tables.Column(
verbose_name="Direct Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
internetexchangepeeringsession_count = tables.Column(
verbose_name="IX Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
affiliated = BooleanColumn(
verbose_name="Affiliated",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:autonomoussystem_list")
buttons = ButtonsColumn(AutonomousSystem)
class Meta(BaseTable.Meta):
model = AutonomousSystem
fields = (
"pk",
"asn",
"name",
"irr_as_set",
"ipv6_max_prefixes",
"ipv4_max_prefixes",
"general_policy",
"import_routing_policies",
"export_routing_policies",
"directpeeringsession_count",
"internetexchangepeeringsession_count",
"affiliated",
"tags",
"buttons",
)
default_columns = (
"pk",
"asn",
"name",
"irr_as_set",
"directpeeringsession_count",
"internetexchangepeeringsession_count",
"buttons",
)
class BGPGroupTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
check_bgp_session_states = BooleanColumn(
verbose_name="Poll Session States",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
import_routing_policies = RoutingPolicyColumn(verbose_name="Import Policies")
export_routing_policies = RoutingPolicyColumn(verbose_name="Export Policies")
directpeeringsession_count = tables.Column(
verbose_name="Direct Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:bgpgroup_list")
buttons = ButtonsColumn(BGPGroup)
class Meta(BaseTable.Meta):
model = BGPGroup
fields = (
"pk",
"name",
"slug",
"check_bgp_session_states",
"import_routing_policies",
"export_routing_policies",
"directpeeringsession_count",
"tags",
"buttons",
)
default_columns = (
"pk",
"name",
"check_bgp_session_states",
"directpeeringsession_count",
"buttons",
)
class CommunityTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
type = tables.TemplateColumn(template_code=COMMUNITY_TYPE)
tags = TagColumn(url_name="peering:community_list")
buttons = ButtonsColumn(Community)
class Meta(BaseTable.Meta):
model = Community
fields = ("pk", "name", "slug", "value", "type", "tags", "buttons")
default_columns = ("pk", "name", "value", "type", "buttons")
class ConfigurationTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
jinja2_trim = BooleanColumn(
verbose_name="Trim",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
jinja2_lstrip = BooleanColumn(
verbose_name="Lstrip",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:configuration_list")
buttons = ButtonsColumn(Configuration)
class Meta(BaseTable.Meta):
model = Configuration
fields = (
"pk",
"name",
"jinja2_trim",
"jinja2_lstrip",
"updated",
"tags",
"buttons",
)
default_columns = ("pk", "name", "updated", "buttons")
class DirectPeeringSessionTable(BaseTable):
append_template = """
{% load helpers %}
{% if record.comments %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Peering Session Comments" data-content="{{ record.comments | markdown:True }}"><i class="fas fa-comment"></i></button>
{% endif %}
{% if record.autonomous_system.comments %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Autonomous System Comments" data-content="{{ record.autonomous_system.comments | markdown:True }}"><i class="fas fa-comments"></i></button>
{% endif %}
"""
pk = SelectColumn()
local_autonomous_system = tables.Column(verbose_name="Local AS", linkify=True)
autonomous_system = tables.Column(verbose_name="AS", linkify=True)
ip_address = tables.Column(verbose_name="IP Address", linkify=True)
bgp_group = tables.Column(
verbose_name="BGP Group", accessor="bgp_group", linkify=True
)
relationship = tables.TemplateColumn(
verbose_name="Relationship", template_code=BGP_RELATIONSHIP
)
enabled = BooleanColumn(
verbose_name="Status",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
service_reference = tables.Column(verbose_name="Service ID", linkify=True)
import_routing_policies = RoutingPolicyColumn(verbose_name="Import Policies")
export_routing_policies = RoutingPolicyColumn(verbose_name="Export Policies")
state = BGPSessionStateColumn(accessor="bgp_state")
router = tables.Column(verbose_name="Router", accessor="router", linkify=True)
tags = TagColumn(url_name="peering:directpeeringsession_list")
buttons = ButtonsColumn(DirectPeeringSession, append_template=append_template)
class Meta(BaseTable.Meta):
model = DirectPeeringSession
fields = (
"pk",
"service_reference",
"local_autonomous_system",
"autonomous_system",
"ip_address",
"bgp_group",
"relationship",
"enabled",
"import_routing_policies",
"export_routing_policies",
"state",
"last_established_state",
"received_prefix_count",
"advertised_prefix_count",
"router",
"tags",
"buttons",
)
default_columns = (
"pk",
"local_autonomous_system",
"autonomous_system",
"ip_address",
"bgp_group",
"relationship",
"enabled",
"router",
"buttons",
)
class EmailTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
jinja2_trim = BooleanColumn(
verbose_name="Trim",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
jinja2_lstrip = BooleanColumn(
verbose_name="Lstrip",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:configuration_list")
buttons = ButtonsColumn(Email)
class Meta(BaseTable.Meta):
model = Email
fields = (
"pk",
"name",
"subject",
"jinja2_trim",
"jinja2_lstrip",
"updated",
"tags",
"buttons",
)
default_columns = ("pk", "name", "updated", "buttons")
class InternetExchangeTable(BaseTable):
pk = SelectColumn()
local_autonomous_system = tables.Column(verbose_name="Local AS", linkify=True)
name = tables.Column(linkify=True)
check_bgp_session_states = BooleanColumn(
verbose_name="Check Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
bgp_session_states_update = tables.Column(verbose_name="Last Sessions Check")
import_routing_policies = RoutingPolicyColumn(verbose_name="Import Policies")
export_routing_policies = RoutingPolicyColumn(verbose_name="Export Policies")
internetexchangepeeringsession_count = tables.Column(
verbose_name="Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:internetexchange_list")
buttons = ButtonsColumn(InternetExchange)
class Meta(BaseTable.Meta):
model = InternetExchange
fields = (
"pk",
"local_autonomous_system",
"name",
"slug",
"import_routing_policies",
"export_routing_policies",
"check_bgp_session_states",
"bgp_session_states_update",
"internetexchangepeeringsession_count",
"tags",
"buttons",
)
default_columns = (
"pk",
"name",
"internetexchangepeeringsession_count",
"buttons",
)
class InternetExchangePeeringSessionTable(BaseTable):
append_template = """
{% load helpers %}
{% if record.comments %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Peering Session Comments" data-content="{{ record.comments | markdown:True }}"><i class="fas fa-comment"></i></button>
{% endif %}
{% if record.autonomous_system.comments %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Autonomous System Comments" data-content="{{ record.autonomous_system.comments | markdown:True }}"><i class="fas fa-comments"></i></button>
{% endif %}
{% if record.internet_exchange.comments %}
<button type="button" class="btn btn-xs btn-info popover-hover" data-toggle="popover" data-html="true" title="Internet Exchange Comments" data-content="{{ record.internet_exchange.comments | markdown:True }}"><i class="fas fa-comment-dots"></i></button>
{% endif %}
"""
pk = SelectColumn()
autonomous_system = tables.Column(
verbose_name="AS", accessor="autonomous_system", linkify=True
)
internet_exchange_point = tables.Column(
verbose_name="IXP",
accessor="ixp_connection__internet_exchange_point",
linkify=True,
)
ixp_connection = tables.Column(verbose_name="Connection", linkify=True)
ip_address = tables.Column(verbose_name="IP Address", linkify=True)
service_reference = tables.Column(verbose_name="Service ID", linkify=True)
is_route_server = BooleanColumn(
verbose_name="Route Server",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
enabled = BooleanColumn(
verbose_name="Enabled",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
import_routing_policies = RoutingPolicyColumn(verbose_name="Import Policies")
export_routing_policies = RoutingPolicyColumn(verbose_name="Export Policies")
state = BGPSessionStateColumn(accessor="bgp_state")
tags = TagColumn(url_name="peering:internetexchangepeeringsession_list")
buttons = ButtonsColumn(
InternetExchangePeeringSession, append_template=append_template
)
class Meta(BaseTable.Meta):
model = InternetExchangePeeringSession
fields = (
"pk",
"service_reference",
"autonomous_system",
"ixp_connection",
"internet_exchange_point",
"ip_address",
"is_route_server",
"enabled",
"import_routing_policies",
"export_routing_policies",
"state",
"last_established_state",
"received_prefix_count",
"advertised_prefix_count",
"tags",
"buttons",
)
default_columns = (
"pk",
"autonomous_system",
"ixp_connection",
"ip_address",
"is_route_server",
"enabled",
"buttons",
)
class RouterConnectionTable(BaseTable):
pk = SelectColumn()
ipv6_address = tables.Column(linkify=True, verbose_name="IPv6")
ipv4_address = tables.Column(linkify=True, verbose_name="IPv4")
internet_exchange_point = tables.LinkColumn()
buttons = ButtonsColumn(Connection)
class Meta(BaseTable.Meta):
model = Connection
fields = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"internet_exchange_point",
"interface",
"buttons",
)
default_columns = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"internet_exchange_point",
"buttons",
)
empty_text = "None"
class RouterTable(BaseTable):
pk = SelectColumn()
local_autonomous_system = tables.Column(verbose_name="Local AS", linkify=True)
name = tables.Column(linkify=True)
platform = tables.Column(linkify=True)
encrypt_passwords = BooleanColumn(
verbose_name="Encrypt Password",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
configuration_template = tables.Column(linkify=True, verbose_name="Configuration")
connection_count = tables.Column(
verbose_name="Connections",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
directpeeringsession_count = tables.Column(
verbose_name="Direct Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
internetexchangepeeringsession_count = tables.Column(
verbose_name="IX Sessions",
attrs={"td": {"class": "text-center"}, "th": {"class": "text-center"}},
)
tags = TagColumn(url_name="peering:router_list")
buttons = ButtonsColumn(Router)
class Meta(BaseTable.Meta):
model = Router
fields = (
"pk",
"local_autonomous_system",
"name",
"hostname",
"platform",
"encrypt_passwords",
"configuration_template",
"connection_count",
"directpeeringsession_count",
"internetexchangepeeringsession_count",
"device_state",
"tags",
"buttons",
)
default_columns = (
"pk",
"name",
"hostname",
"platform",
"encrypt_passwords",
"configuration_template",
"connection_count",
"device_state",
"buttons",
)
class RoutingPolicyTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
type = tables.TemplateColumn(template_code=ROUTING_POLICY_TYPE)
tags = TagColumn(url_name="peering:routingpolicy_list")
buttons = ButtonsColumn(RoutingPolicy)
class Meta(BaseTable.Meta):
model = RoutingPolicy
fields = ("pk", "name", "type", "weight", "address_family", "tags", "buttons")
default_columns = ("pk", "name", "type", "weight", "address_family", "buttons")
| 34.581818
| 257
| 0.603867
|
2e314cf11cf1c015ee618ee314645d4b295b389b
| 1,539
|
py
|
Python
|
tests/helpers_test.py
|
RobertoPrevato/Flask-three-template
|
00b7dd6dd299a1fd91f08f60007fe09235216096
|
[
"MIT"
] | 7
|
2015-11-19T20:45:16.000Z
|
2020-04-22T22:18:34.000Z
|
tests/helpers_test.py
|
RobertoPrevato/Flask-three-template
|
00b7dd6dd299a1fd91f08f60007fe09235216096
|
[
"MIT"
] | null | null | null |
tests/helpers_test.py
|
RobertoPrevato/Flask-three-template
|
00b7dd6dd299a1fd91f08f60007fe09235216096
|
[
"MIT"
] | 2
|
2016-10-08T20:09:06.000Z
|
2020-04-22T22:18:36.000Z
|
import server
import unittest
class HelpersTestCase(unittest.TestCase):
"""
Custom template helpers tests.
"""
def setUp(self):
"""
The code in the setUp() method is called before each individual test function is run.
"""
self.app = server.app.test_client()
def tearDown(self):
"""
The code in the setUp() method is called before each individual test function is run.
"""
pass
def test_resources_helper(self):
from app.helpers.resources import resources, load_resources_config
# verify that no exception happen while reading the current resources configuration
load_resources_config()
test_conf = {
"bundling": False,
"minification": False,
"sets": {
"libs": [
"/scripts/jquery.js",
"/scripts/knockout.js",
"/scripts/lodash.js"
]
}
}
a = resources("libs", test_conf)
assert a == '<script src="/scripts/jquery.js"></script>\n<script src="/scripts/knockout.js"></script>\n<script src="/scripts/lodash.js"></script>'
test_conf["bundling"] = True
a = resources("libs", test_conf)
assert a == '<script src="/scripts/libs.built.js"></script>'
test_conf["minification"] = True
a = resources("libs", test_conf)
assert a == '<script src="/scripts/libs.min.js"></script>'
assert True == True
| 29.596154
| 154
| 0.560754
|
c55dc6d2684dfbc66c856858b348c669e5be3174
| 3,747
|
py
|
Python
|
sdk/python/pulumi_azure_native/security/get_workspace_setting.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/get_workspace_setting.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/get_workspace_setting.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetWorkspaceSettingResult',
'AwaitableGetWorkspaceSettingResult',
'get_workspace_setting',
]
@pulumi.output_type
class GetWorkspaceSettingResult:
"""
Configures where to store the OMS agent data for workspaces under a scope
"""
def __init__(__self__, id=None, name=None, scope=None, type=None, workspace_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scope(self) -> str:
"""
All the VMs in this scope will send their security data to the mentioned workspace unless overridden by a setting with more specific scope
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
The full Azure ID of the workspace to save the data in
"""
return pulumi.get(self, "workspace_id")
class AwaitableGetWorkspaceSettingResult(GetWorkspaceSettingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceSettingResult(
id=self.id,
name=self.name,
scope=self.scope,
type=self.type,
workspace_id=self.workspace_id)
def get_workspace_setting(workspace_setting_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceSettingResult:
"""
Configures where to store the OMS agent data for workspaces under a scope
API Version: 2017-08-01-preview.
:param str workspace_setting_name: Name of the security setting
"""
__args__ = dict()
__args__['workspaceSettingName'] = workspace_setting_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:security:getWorkspaceSetting', __args__, opts=opts, typ=GetWorkspaceSettingResult).value
return AwaitableGetWorkspaceSettingResult(
id=__ret__.id,
name=__ret__.name,
scope=__ret__.scope,
type=__ret__.type,
workspace_id=__ret__.workspace_id)
| 32.301724
| 146
| 0.642647
|
6a727db51b330233fc53a4457786c2a8e8c28735
| 244
|
py
|
Python
|
batch_script.py
|
aarjavchauhan/web_visualization
|
7a4b8a5d22d140762ae29ec808bb02dbc79763f4
|
[
"MIT"
] | 1
|
2020-10-29T03:28:08.000Z
|
2020-10-29T03:28:08.000Z
|
batch_script.py
|
aarjavchauhan/web_visualization
|
7a4b8a5d22d140762ae29ec808bb02dbc79763f4
|
[
"MIT"
] | null | null | null |
batch_script.py
|
aarjavchauhan/web_visualization
|
7a4b8a5d22d140762ae29ec808bb02dbc79763f4
|
[
"MIT"
] | null | null | null |
import os
import sys
folder = sys.argv[1]
script = sys.argv[2]
for root, dirs, files in os.walk(folder):
for filename in files:
data_file = "{}/{}".format(root,filename)
os.system("python3 {} {}".format(script,data_file))
| 22.181818
| 59
| 0.639344
|
8128eb8f110a867e354a854e4556d9f3eb0fa4df
| 6,040
|
py
|
Python
|
pybind/nos/v7_1_0/rbridge_id/system_monitor/sfp/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/system_monitor/sfp/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/system_monitor/sfp/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import alert
class sfp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/system-monitor/sfp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__alert',)
_yang_name = 'sfp'
_rest_name = 'sfp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__alert = YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alerts for component:SFP', u'cli-compact-syntax': None, u'callpoint': u'smsetSfpAlert', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'system-monitor', u'sfp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'system-monitor', u'sfp']
def _get_alert(self):
"""
Getter method for alert, mapped from YANG variable /rbridge_id/system_monitor/sfp/alert (container)
"""
return self.__alert
def _set_alert(self, v, load=False):
"""
Setter method for alert, mapped from YANG variable /rbridge_id/system_monitor/sfp/alert (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_alert is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alert() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alerts for component:SFP', u'cli-compact-syntax': None, u'callpoint': u'smsetSfpAlert', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alert must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alerts for component:SFP', u'cli-compact-syntax': None, u'callpoint': u'smsetSfpAlert', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__alert = t
if hasattr(self, '_set'):
self._set()
def _unset_alert(self):
self.__alert = YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure alerts for component:SFP', u'cli-compact-syntax': None, u'callpoint': u'smsetSfpAlert', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
alert = __builtin__.property(_get_alert, _set_alert)
_pyangbind_elements = {'alert': alert, }
| 48.709677
| 546
| 0.711589
|
0c6d2afccf2b28d4837037a0c214d860be6087a4
| 2,365
|
py
|
Python
|
sorting-numbers/custom_training_loop.py
|
AndreMaz/dnn-attention
|
86e834b90bd419646fd00c6ff4df910ab7874910
|
[
"MIT"
] | 1
|
2020-03-11T22:52:19.000Z
|
2020-03-11T22:52:19.000Z
|
sorting-numbers/custom_training_loop.py
|
AndreMaz/dnn-attention
|
86e834b90bd419646fd00c6ff4df910ab7874910
|
[
"MIT"
] | 3
|
2021-05-21T16:15:18.000Z
|
2022-02-10T01:11:23.000Z
|
sorting-numbers/custom_training_loop.py
|
AndreMaz/dnn-attention
|
86e834b90bd419646fd00c6ff4df910ab7874910
|
[
"MIT"
] | null | null | null |
# from dataset.dataset_generator import ArtificialDataset
from dataset.generator import generateDataset
from models.pointer_network.model import EagerModel
from models.inference import runSeq2SeqInference
from utils.read_configs import get_configs
from utils.tester import tester
import tensorflow as tf
import numpy as np
import sys
def main(plotAttention=False) -> None:
# Get the configs
configs = get_configs(sys.argv)
print('Generating Dataset')
# generate training dataset
trainEncoderInput, trainDecoderInput, trainDecoderOutput = generateDataset(
configs['num_samples_training'],
configs['sample_length'],
configs['min_value'],
configs['max_value'],
configs['SOS_CODE'],
configs['EOS_CODE'],
configs['vocab_size']
)
print('Dataset Generated!')
loss_fn = tf.losses.CategoricalCrossentropy()
optimizer = tf.optimizers.Adam()
model = EagerModel(
configs['vocab_size'],
configs['embedding_dims'],
configs['lstm_units']
)
losses = []
print('Training...')
batch_size = configs['batch_size']
num_batches = int(configs['num_samples_training'] / batch_size)
for epoch in range(configs['num_epochs']):
loss_per_epoch = []
for i in range(num_batches - 1):
enc_in_batch = trainEncoderInput[i *
batch_size: (i+1) * batch_size]
dec_in_batch = trainDecoderInput[i *
batch_size: (i+1) * batch_size]
dec_out_batch = trainDecoderOutput[i *
batch_size: (i+1) * batch_size]
with tf.GradientTape() as tape:
predicted = model(enc_in_batch, dec_in_batch)
loss = loss_fn(dec_out_batch, predicted)
# Store the loss
loss_per_epoch.append(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss = np.asarray(loss_per_epoch).mean()
print(f"Epoch: {epoch+1} avg. loss: {epoch_loss}")
losses.append(epoch_loss)
# print(losses)
print('Testing...')
tester(model, configs, eager=True)
if __name__ == "__main__":
main()
| 30.714286
| 79
| 0.621987
|
fbfeb5384b522069a6909b2575e7e76984231ade
| 993
|
py
|
Python
|
TopQuarkAnalysis/TopEventProducers/test/ttDecaySubset_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
TopQuarkAnalysis/TopEventProducers/test/ttDecaySubset_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
TopQuarkAnalysis/TopEventProducers/test/ttDecaySubset_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
## add message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.ParticleListDrawer=dict()
## define input
from TopQuarkAnalysis.TopEventProducers.tqafInputFiles_cff import relValTTbar
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(relValTTbar)
)
## define maximal number of events to loop over
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
## configure process options
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
## produce decaySubset
process.load("TopQuarkAnalysis.TopEventProducers.producers.TopDecaySubset_cfi")
process.task = cms.Task(process.decaySubset)
## produce printout of particle listings (for debugging)
process.load("TopQuarkAnalysis.TopEventProducers.sequences.printGenParticles_cff")
## path
process.p = cms.Path(process.printDecaySubset, process.task)
| 29.205882
| 82
| 0.794562
|
5a45edca79d0635facf0e9de957a35fdc251c9f2
| 2,391
|
py
|
Python
|
sps/api/v1/router.py
|
tantexian/sps-2014-12-4
|
0cdab186cb3bf148656c4c214a18215643b4969c
|
[
"Apache-2.0"
] | 1
|
2018-07-27T15:16:14.000Z
|
2018-07-27T15:16:14.000Z
|
sps/api/v1/router.py
|
tantexian/sps-2014-12-4
|
0cdab186cb3bf148656c4c214a18215643b4969c
|
[
"Apache-2.0"
] | null | null | null |
sps/api/v1/router.py
|
tantexian/sps-2014-12-4
|
0cdab186cb3bf148656c4c214a18215643b4969c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sps.api.v1 import demos
from sps.common import wsgi
class API(wsgi.Router):
"""WSGI router for sps v1 API requests."""
def __init__(self, mapper):
demos_resource = demos.create_resource()
mapper.connect("/",
controller=demos_resource,
action="index")
mapper.connect("/demos",
controller=demos_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/demos",
controller=demos_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect("/demos/detail",
controller=demos_resource,
action='detail',
conditions={'method': ['GET', 'HEAD']})
mapper.connect("/demos/{id}",
controller=demos_resource,
action="meta",
conditions=dict(method=["HEAD"]))
mapper.connect("/demos/{id}",
controller=demos_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/demos/{id}",
controller=demos_resource,
action="update",
conditions=dict(method=["PUT"]))
mapper.connect("/demos/{id}",
controller=demos_resource,
action="delete",
conditions=dict(method=["DELETE"]))
#add you API route at this ,ex: define you demos2_resource = demos2.create_resource()
super(API, self).__init__(mapper)
| 37.952381
| 93
| 0.543287
|
22b29763094b61bd03b1f025ef4d6ef7fcbbef3f
| 10,595
|
py
|
Python
|
curie/reaction.py
|
jtmorrell/curie
|
cf63d7771432a58ab79ee6dfb83b9c211ee33a1f
|
[
"MIT"
] | 1
|
2021-01-15T15:33:24.000Z
|
2021-01-15T15:33:24.000Z
|
curie/reaction.py
|
jtmorrell/curie
|
cf63d7771432a58ab79ee6dfb83b9c211ee33a1f
|
[
"MIT"
] | 1
|
2021-07-07T23:25:09.000Z
|
2021-07-07T23:25:09.000Z
|
curie/reaction.py
|
jtmorrell/curie
|
cf63d7771432a58ab79ee6dfb83b9c211ee33a1f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from .data import _get_connection
from .plotting import _init_plot, _draw_plot, colormap
from .isotope import Isotope
from .library import Library
class Reaction(object):
"""Cross section data for nuclear reactions
Contains reaction cross sections as a function of incident energy,
and some useful methods for manipulating cross section data, such as
flux-averages, integrated cross-sections, and interpolation. All
cross sections (and uncertainties) are in mb, and all energies are
in MeV.
Parameters
----------
reaction_name : str
Name of the reaction, in nuclear reaction notation. E.g. '115IN(n,g)',
'235U(n,f)', '139LA(p,x)134CE', 'Ra-226(n,2n)Ra-225', 'Al-27(n,a)', etc.
library : str, optional
Name of the library to use, or 'best' (default).
Attributes
----------
target : str
The target nucleus. Some libraries support natural elements, e.g. 'natEl'.
incident : str
Incident particle. E.g. 'n', 'p', 'd'.
outgoing : str
Outgoing particle, or reaction shorthand. E.g. '2n', 'd', 'f', 'inl', 'x'.
Will always be 'x' for (TENDL) residual product libraries.
product : str
The product isotope.
eng : np.ndarray
Incident particle energy, in MeV.
xs : np.ndarray
Reaction cross section, in mb.
unc_xs : np.ndarray
Uncertainty in the cross section, in mb. If not provided by the
library, default is zeros of same shape as xs.
name : str
Name of the reaction in nuclear reaction notation.
library : ci.Library
Nuclear reaction library. printing `rx.library.name` will give the
name of the library.
TeX : str
LaTeX formatted reaction name.
Examples
--------
>>> rx = ci.Reaction('226RA(n,2n)')
>>> print(rx.library.name)
ENDF/B-VII.1
>>> rx = ci.Reaction('226RA(n,x)225RA')
>>> print(rx.library.name)
TENDL-2015
>>> rx = ci.Reaction('115IN(n,inl)')
>>> print(rx.library.name)
IRDFF-II
"""
def __init__(self, reaction_name, library='best'):
self.target, p = tuple(reaction_name.split('('))
p, self.product = tuple(p.split(')'))
self.incident, self.outgoing = tuple(p.split(','))
self.incident, self.outgoing = self.incident.lower(), self.outgoing.lower()
self._rx = [self.target, self.incident, self.outgoing, self.product]
self.name = reaction_name
if library.lower()=='best':
if self.incident=='n':
for lb in ['irdff','endf','iaea','tendl','tendl_n_rp']:
self.library = Library(lb)
if lb=='tendl_n_rp':
self._check(True)
elif self._check():
break
elif self.incident in ['p','d']:
for lb in ['iaea','tendl_'+self.incident+'_rp']:
self.library = Library(lb)
if lb=='tendl_d_rp':
self._check(True)
elif self._check():
break
else:
self.library = Library('iaea')
self._check(True)
else:
self.library = Library(library)
self._check(True)
self.name = self.library.search(*self._rx)[0]
q = self.library.retrieve(*self._rx)
self.eng = q[:,0]
self.xs = q[:,1]
if q.shape[1]==3:
self.unc_xs = q[:,2]
else:
self.unc_xs = np.zeros(len(self.xs))
self._interp = None
self._interp_unc = None
try:
if 'nat' not in self.target:
tg = Isotope(self.target).TeX
else:
tg = r'$^{nat}$'+self.target[3:].title()
prd = Isotope(self.product).TeX if self.product else ''
self.TeX = '{0}({1},{2}){3}'.format(tg, self.incident, self.outgoing, prd)
except:
self.TeX = reaction_name
def _check(self, err=False):
c = len(self.library.search(*self._rx))==1
if err and not c:
raise ValueError('Reaction '+self.name+' not found or not unique.')
return c
def __str__(self):
return self.name
def interpolate(self, energy):
"""Interpolated cross section
Linear interpolation of the reaction cross section along the
input energy grid.
Parameters
----------
energy : array_like
Incident particle energy, in MeV.
Returns
-------
cross_section : np.ndarray
Interpolated cross section, in mb.
Examples
--------
>>> rx = ci.Reaction('115IN(n,g)', 'IRDFF')
>>> print(rx.interpolate(0.5))
161.41656650941306
>>> print(rx.interpolate([0.5, 1.0, 5.0]))
[161.41646651 171.81486757 8.8822]
"""
if self._interp is None:
kind = 'linear'
i = 0
if self.library.name.lower().startswith('tendl'):
kind = 'quadratic'
ix = np.where(self.xs>0)[0]
if len(ix)>0:
i = max((ix[0]-1, 0))
self._interp = interp1d(self.eng[i:], self.xs[i:], bounds_error=False, fill_value=0.0, kind=kind)
_interp = self._interp(energy)
return np.where(_interp>0, _interp, 0.0)
def interpolate_unc(self, energy):
"""Uncertainty in interpolated cross section
Linear interpolation of the uncertainty in the reaction cross section
along the input energy grid, for libraries where uncertainties are provided.
Parameters
----------
energy : array_like
Incident particle energy, in MeV.
Returns
-------
unc_cross_section : np.ndarray
Uncertainty in the interpolated cross section, in mb.
Examples
--------
>>> rx = ci.Reaction('115IN(n,g)', 'IRDFF')
>>> print(rx.interpolate_unc(0.5))
3.9542683715745546
>>> print(rx.interpolate_unc([0.5, 1.0, 5.0]))
[3.95426837 5.88023936 0.4654]
"""
if self._interp_unc is None:
self._interp_unc = interp1d(self.eng, self.unc_xs, bounds_error=False, fill_value=0.0)
return self._interp_unc(energy)
def integrate(self, energy, flux, unc=False):
"""Reaction flux integral
Integrate the product of the cross section and flux along the input energy grid.
Parameters
----------
energy : array_like
Incident particle energy, in MeV.
flux : array_like
Incident particle flux as a function of the input energy grid.
unc : bool, optional
If `True`, returns the both the flux integral and the uncertainty. If `False`,
just the flux integral is returned. Default `False`.
Returns
-------
xs_integral : float or tuple
Reaction flux integral if `unc=False` (default), or reaction flux integral
and uncertainty, if `unc=True`.
Examples
--------
>>> x = ci.Reaction('Ni-58(n,p)')
>>> eng = np.linspace(1, 5, 20)
>>> phi = np.ones(20)
>>> print(rx.integrate(eng, phi))
833.4435915974148
>>> print(rx.integrate(eng, phi, unc=True))
(833.4435915974148, 19.91851943674977)
"""
E = np.asarray(energy)
phisig = np.asarray(flux)*self.interpolate(E)
if unc:
unc_phisig = np.asarray(flux)*self.interpolate_unc(E)
return np.sum(0.5*(E[1:]-E[:-1])*(phisig[:-1]+phisig[1:])), np.sum(0.5*(E[1:]-E[:-1])*(unc_phisig[:-1]+unc_phisig[1:]))
return np.sum(0.5*(E[1:]-E[:-1])*(phisig[:-1]+phisig[1:]))
def average(self, energy, flux, unc=False):
"""Flux averaged reaction cross section
Calculates the flux-weighted average reaction cross section, using the
input flux and energy grid.
Parameters
----------
energy : array_like
Incident particle energy, in MeV.
flux : array_like
Incident particle flux as a function of the input energy grid.
unc : bool, optional
If `True`, returns the both the flux average cross section and the uncertainty. If `False`,
just the average cross section is returned. Default `False`.
Returns
-------
average_xs : float or tuple
Flux-averaged reaction cross section if `unc=False` (default), or average
and uncertainty, if `unc=True`.
Examples
--------
>>> rx = ci.Reaction('Ni-58(n,p)')
>>> eng = np.linspace(1, 5, 20)
>>> phi = np.ones(20)
>>> print(rx.average(eng, phi))
208.3608978993537
>>> print(rx.average(eng, phi, unc=True))
(208.3608978993537, 4.979629859187442)
"""
E, phi = np.asarray(energy), np.asarray(flux)
phisig = phi*self.interpolate(E)
dE = E[1:]-E[:-1]
if unc:
unc_phisig = np.asarray(flux)*self.interpolate_unc(E)
return np.sum(0.5*dE*(phisig[:-1]+phisig[1:]))/np.sum(0.5*dE*(phi[:-1]+phi[1:])), np.sum(0.5*dE*(unc_phisig[:-1]+unc_phisig[1:]))/np.sum(0.5*dE*(phi[:-1]+phi[1:]))
return np.sum(0.5*dE*(phisig[:-1]+phisig[1:]))/np.sum(0.5*dE*(phi[:-1]+phi[1:]))
def plot(self, energy=None, label='reaction', title=False, **kwargs):
"""Plot the cross section
Plots the energy differential cross section.
Parameters
----------
energy : array_like, optional
Energy grid along which to plot the cross section. If None, the
energy grid provided by the library will be used.
label : str, optional
Axes label. If label='reaction', the label will be the reaction name.
If 'library', it will be the name of the cross section library.
If 'both', then the reaction name and library will be given. If
none of these options, pyplot will be called with `ax.plot(..., label=label)`.
title : bool, optional
Display the reaction name as the plot title. Default, False.
Other Parameters
----------------
**kwargs
Optional keyword arguments for plotting. See the
plotting section of the curie API for a complete
list of kwargs.
Examples
--------
>>> rx = ci.Reaction('115IN(n,g)')
>>> rx.plot(scale='loglog')
>>> rx = ci.Reaction('35CL(n,p)')
>>> f, ax = rx.plot(return_plot=True)
>>> rx = ci.Reaction('35CL(n,el)')
>>> rx.plot(f=f, ax=ax, scale='loglog')
"""
f, ax = _init_plot(**kwargs)
if title:
ax.set_title(self.TeX)
if label is not None:
if label.lower() in ['both','library','reaction']:
label = {'both':'{0}\n({1})'.format(self.TeX, self.library.name),'library':self.library.name,'reaction':self.TeX}[label.lower()]
unc_xs = None
if energy is None:
if self.library.name.lower().startswith('tendl'):
eng = np.linspace(min(self.eng), max(self.eng), 801)
xs = self.interpolate(eng)
else:
eng, xs = self.eng, self.xs
if np.any(self.unc_xs>0):
unc_xs = self.unc_xs
else:
eng, xs = np.asarray(energy), self.interpolate(energy)
ux = self.interpolate_unc(energy)
if np.any(ux>0):
unc_xs = ux
line, = ax.plot(eng, xs, label=label)
if unc_xs is not None:
ax.fill_between(eng, xs+unc_xs, xs-unc_xs, facecolor=line.get_color(), alpha=0.5)
if self.library.name.lower().startswith('tendl'):
wh = np.where((self.eng>=min(eng))&(self.eng<=max(eng)))
elib = self.eng[wh]
xslib = self.xs[wh]
ax.plot(elib, xslib, ls='None', marker='o', color=line.get_color())
ax.set_xlabel('Incident Energy (MeV)')
ax.set_ylabel('Cross Section (mb)')
if label:
ax.legend(loc=0)
return _draw_plot(f, ax, **kwargs)
| 28.103448
| 166
| 0.657952
|
8e298bddff6827bb150122ae810a98f8ad3bf96b
| 530
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/coucou-28726
|
9e5ec0bd3aee99dacf2ec09c309bf424572daf2e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/coucou-28726
|
9e5ec0bd3aee99dacf2ec09c309bf424572daf2e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/coucou-28726
|
9e5ec0bd3aee99dacf2ec09c309bf424572daf2e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "coucou-28726.botics.co"
site_params = {
"name": "coucou",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.384615
| 61
| 0.65283
|
d0f5d09558b808324d9e99307490339e40324bb3
| 2,894
|
py
|
Python
|
tests/single_layer_tests.py
|
godspeed20/healthCheckMasseuse
|
72cc97776a6b28f606e1192694141f249619bf8d
|
[
"MIT"
] | null | null | null |
tests/single_layer_tests.py
|
godspeed20/healthCheckMasseuse
|
72cc97776a6b28f606e1192694141f249619bf8d
|
[
"MIT"
] | null | null | null |
tests/single_layer_tests.py
|
godspeed20/healthCheckMasseuse
|
72cc97776a6b28f606e1192694141f249619bf8d
|
[
"MIT"
] | null | null | null |
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from flask import jsonify
import server
import socket
from threading import Thread
class MockEmptyServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200, "")
self.end_headers()
return
# class MockValidServerRequestHandler(BaseHTTPRequestHandler):
# def do_GET(self):
# self.send_response(200, "sss")
# self.end_headers()
# return
#
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
class SingleJsonLayerTest(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def test_missing_fields(self):
rv = self.app.get('/single-layer')
self.assertEqual(400, rv.status_code)
self.assertEqual(b'Params healthCheckUrl and appName are mandatory', rv.data)
def test_missing_appName(self):
rv = self.app.get('/single-layer?healthCheckUrl=abc')
self.assertEqual(400, rv.status_code)
self.assertEqual(b'Params healthCheckUrl and appName are mandatory', rv.data)
def test_missing_healthCheckUrl(self):
rv = self.app.get('/single-layer?appName=abc')
self.assertEqual(400, rv.status_code)
self.assertEqual(b'Params healthCheckUrl and appName are mandatory', rv.data)
def test_empty_payload(self):
mock_server_port = get_free_port()
mock_server = HTTPServer(('localhost', mock_server_port), MockEmptyServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
health_check_url = "http://localhost:" + str(mock_server_port)
print(health_check_url)
rv = self.app.get('/single-layer?healthCheckUrl=' + health_check_url + '&appName=abc')
self.assertEqual(200, rv.status_code)
self.assertEqual(b'{\n "name": "abc"\n}\n', rv.data)
# def test_valid_payload(self):
# mock_server_port = get_free_port()
# mock_server = HTTPServer(('localhost', mock_server_port), MockValidServerRequestHandler)
# mock_server_thread = Thread(target=mock_server.serve_forever)
# mock_server_thread.setDaemon(True)
# mock_server_thread.start()
#
# health_check_url = "http://localhost:" + str(mock_server_port)
# print(health_check_url)
#
# rv = self.app.get('/single-layer?healthCheckUrl=' + health_check_url + '&appName=abc')
# self.assertEqual(200, rv.status_code)
# self.assertEqual(b'{\n "name": "abc"\n}\n', rv.data)
if __name__ == '__main__':
unittest.main()
| 34.452381
| 99
| 0.662751
|
00702fdc7cf80b65a8e5fa1b0116e4e6cf1bf6a9
| 2,068
|
py
|
Python
|
zeus/datasets/common/nasbench201.py
|
shaido987/vega
|
14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6
|
[
"MIT"
] | 240
|
2020-08-15T15:11:49.000Z
|
2022-03-28T07:26:23.000Z
|
zeus/datasets/common/nasbench201.py
|
WholeG/vega
|
d1ccf1c3ce68a118bdb6775594ceed0f895911e7
|
[
"MIT"
] | 20
|
2020-08-29T06:18:21.000Z
|
2022-03-21T04:35:57.000Z
|
zeus/datasets/common/nasbench201.py
|
WholeG/vega
|
d1ccf1c3ce68a118bdb6775594ceed0f895911e7
|
[
"MIT"
] | 69
|
2020-08-15T15:41:53.000Z
|
2022-03-16T08:27:47.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for Nasbench101 dataset."""
from zeus.common import ClassFactory, ClassType
from zeus.common import FileOps
from .utils.dataset import Dataset
from nas_201_api import NASBench201API as API
VALID_OPS = ['avg_pool_3x3', 'nor_conv_1x1', 'nor_conv_3x3', 'none', 'skip_connect']
VALID_DATASET = ["cifar10", "cifar100", "ImageNet16-120"]
@ClassFactory.register(ClassType.DATASET)
class Nasbench201(Dataset):
"""Nasbench201 Dataset."""
def __init__(self):
"""Construct the Nasbench201 class."""
super(Nasbench201, self).__init__()
self.args.data_path = FileOps.download_dataset(self.args.data_path)
self.nasbench201_api = API('self.args.data_path')
def query(self, arch_str, dataset):
"""Query an item from the dataset according to the given arch_str and dataset .
:arch_str: arch_str to define the topology of the cell
:type path: str
:dataset: dataset type
:type dataset: str
:return: an item of the dataset, which contains the network info and its results like accuracy, flops and etc
:rtype: dict
"""
if dataset not in VALID_DATASET:
raise ValueError("Only cifar10, cifar100, and Imagenet dataset is supported.")
ops_list = self.nasbench201_api.str2lists(arch_str)
for op in ops_list:
if op not in VALID_OPS:
raise ValueError("{} is not in the nasbench201 space.".format(op))
index = self.nasbench201_api.query_index_by_arch(arch_str)
results = self.nasbench201_api.query_by_index(index, dataset)
return results
| 40.54902
| 117
| 0.700193
|
f1d3aa475377c42830af025e447a45fbb644b2cd
| 14,362
|
py
|
Python
|
terminal_player.py
|
o-santi/desemprego
|
b61cfde64d4c58efd4181d4e356062454418163d
|
[
"MIT"
] | 1
|
2021-01-23T12:40:14.000Z
|
2021-01-23T12:40:14.000Z
|
terminal_player.py
|
o-santi/desemprego
|
b61cfde64d4c58efd4181d4e356062454418163d
|
[
"MIT"
] | null | null | null |
terminal_player.py
|
o-santi/desemprego
|
b61cfde64d4c58efd4181d4e356062454418163d
|
[
"MIT"
] | null | null | null |
"""
Really bad video player made to play on the terminal.
Can also print images.
"""
from PIL import Image, UnidentifiedImageError
import numpy as np
from tqdm import tqdm
import time
from pynput import keyboard
import asyncio
import aiofiles
import argparse
import mmap
import threading
import concurrent.futures
import cv2
import filetype
import sys
class TerminalPlayer:
def __init__(self, filename, mode, char, console, fps_cap):
self.filename = filename
self.filetype = filetype.guess(self.filename)
self.console = console # if True, opens a new console to show the gif
self.fps_cap = fps_cap # if False, disables fps caps and plays as fast as it can
self.char_array = [" ", ".", "-", "*", "/", "=", "#"]
self.char = char
self.supported_filetypes = ["gif", "mp4"]
self.modes = ["ascii", "color", "color216"]
assert mode in self.modes
self.mode = mode # either ascii-characters or colored
self.image_frames_array = []
self.screen_array = (
[]
) # dimensions are width by height so width * height elements
def open_pillow_image(self, gif_object):
"""
maybe here the mmaps should already be used to read and write faster
TODO: idk, test this stupid idea later, maybe it is faster
"""
self.is_animated = getattr(gif_object, "is_animated", False)
self.width = gif_object.width
self.height = gif_object.height
self.frame_count = int(getattr(gif_object, "n_frames", 1))
self.duration = gif_object.info.get("duration", 41.6) / 1000 # default to 24fps
self.sound = False
for frame in range(self.frame_count):
gif_object.seek(frame)
if self.mode == "ascii":
frame = gif_object.convert("L").resize(self.frame_size).getdata()
frame = np.reshape(frame, (*self.frame_size, 1))
elif self.mode.startswith("color"):
frame = gif_object.convert("RGB").resize(self.frame_size).getdata()
frame = np.reshape(frame, (*self.frame_size, 3))
self.image_frames_array.append(frame)
def open_opencv_image(self, cap):
"""
Opens the video using opencv-python
"""
fps = cap.get(cv2.CAP_PROP_FPS)
self.duration = 1 / fps
self.width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.is_animated = True
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, self.frame_size)
if self.mode == "ascii":
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.reshape(frame, (*self.frame_size, 1))
elif self.mode.startswith("color"):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = np.reshape(frame, (*self.frame_size, 3))
self.image_frames_array.append(frame)
cap.release()
def handle_file_types(self):
"""
handles multiple filetype
sets the following aspect for the self object:
image_frames_array -> holds the arrays of the pixel values
each frame is a numpy array
frame_count -> number of frames in video
filetype -> filetype
width
height
duration -> time to sleep between frames
TODO: play sound lol
"""
try:
self.open_pillow_image(Image.open(self.filename)) # tries to open with pillow
except UnidentifiedImageError:
cap = cv2.VideoCapture(self.filename) # if didnt open, try it with cv2
if cap.isOpened():
self.open_opencv_image(cap)
else:
print("Sua imagem/video não pôde ser aberta.")
raise TypeError # TODO: create error type, this is just a place-holder
def create_terminal_window(self):
"""
creates the console screen buffer that can be printed to
this only works on windows now, but should be easier to implement on unix cuz windows KEKW
also, only worked for fixed console widths and heights, I still wasnt able to change it to
arbitrary size, so all the gifs will be stretched to this ratio for now
TODO: change the fucking window size
"""
if sys.platform == "win32":
import pywintypes
import win32gui, win32ui, win32api, win32console
if self.console:
win32console.FreeConsole() # detaches the process from the one that was used
win32console.AllocConsole() # and creates a new one
self.window = (
win32console.GetConsoleWindow()
) # gets the new window, just for grabs, not actually being used
self.console_handle = win32console.CreateConsoleScreenBuffer()
console_mode = self.console_handle.GetConsoleMode()
console_info = self.console_handle.GetConsoleScreenBufferInfo()
self.console_handle.SetConsoleMode(
int(console_mode | 0x004)
) # sets the Enable Virtual Terminal Console Mode
self.frame_size = (console_info["Size"].X, console_info["Size"].Y)
"""
elif sys.platform.startswith('linux'):
import curses
screen_curses_object = curses.initscr()
self.frame_size = self.screen_curses_object.getmaxyx()[:-1]
screen_curses_object.endwin()
"""
def map_video_buffer_to_threads(self):
"""
maps the video frames to the processing threads, to generate the frames
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
self.screen_array = list(
tqdm(
executor.map(
self.create_gif_buffer,
range(self.frame_count),
self.image_frames_array,
),
total=self.frame_count,
)
)
def create_gif_buffer(self, frame_index, pixel_array):
"""
creates the array that hold the actual pixel values (the colors of the chars)
this function only need the pixel_data, so that it is not dependent to any format
as long as it is properly formatted (correct width and height) it will work
"""
screen_array = ""
if self.mode == "color":
last_pixel = 0, 0, 0
elif self.mode == "color216":
last_pixel = 0
descriptor = len(f"{self.filename} -- 000 -- fps: 000") # descriptor of the gif
for line_index, line in enumerate(pixel_array):
if line_index == 0 and self.is_animated:
# write descriptor at first line
screen_array += (
f"\033[7m{self.filename} -- {frame_index:03d} -- fps: 000\033[0m"
)
line = line[descriptor:]
for pixel in line:
if self.mode == "color":
r, g, b = pixel
if np.any(last_pixel != pixel):
pixel_string = f"\033[48;2;{r};{g};{b}m" # color the background with the coloring
screen_array += pixel_string
last_pixel = pixel
screen_array += self.char
elif self.mode == "color216":
r, g, b = pixel
r_index = int(6 * r / 256)
g_index = int(6 * g / 256)
b_index = int(6 * b / 256)
number_code = 16 + (r_index * 36 + g_index * 6 + b_index)
if last_pixel != number_code:
pixel_string = f"\033[48;5;{number_code}m"
screen_array += pixel_string
last_pixel = number_code
screen_array += self.char
elif self.mode == "ascii":
pixel = pixel[0]
char = self.char_array[int(len(self.char_array) * pixel / 256)]
screen_array += char
#screen_array += "\n"
screen_array += "\033[H"
return screen_array
def create_frame_bytes(self):
"""
uses mmaps to read/write faster
@o_santi -> actually i wanted to create a virtual memory mapping of the stdout, so that the maximum read/write speed could be reached
but i think this would put me in the gray area of the programmers because it seems like a really shitty idea
either way, I will find a way to do this >:(
"""
self.frames_bytes = []
for index, string in enumerate(self.screen_array):
# bytes_array.write(bytes(self.screen_array[index], encoding='utf-8'))
mmap_buffer = mmap.mmap(
-1,
length=len(bytes(string, encoding="utf-8")),
access=mmap.ACCESS_WRITE,
) # create the map
mmap_buffer[:] = bytes(string, encoding="utf-8") # write the frame to it
mmap_buffer.flush() # flush it so that it is written to memory
self.frames_bytes.append(mmap_buffer)
async def blit_screen(self, frame_index):
"""
prints the index-th frame of the screen_array to the screen
"""
t1 = time.perf_counter_ns()
frame_mmap = self.frames_bytes[frame_index] # get current frame
frame_mmap.seek(0) # seek to the start of the frame
await self.file_object.write(frame_mmap.read()) # print
if (delta := (time.perf_counter_ns() - t1) / 10 ** 9) < self.duration and self.fps_cap:
await asyncio.sleep(
self.duration - delta
)
delta += self.duration - delta
# make sure that it only sleeps when it actually is printing faster than it should
# >>>perf_counter_ns is post python 3.7<<<
if self.is_animated:
fps = f"{int(1/delta):03d}"
self.frames_bytes[(frame_index + 1) % self.frame_count][
self.descriptor_len : self.descriptor_len + 3
] = bytes(
fps, encoding="utf-8"
) # write fps to position on the next frame
async def play_animated(self):
def on_key_press_stop(key):
if key == keyboard.Key.esc:
self.is_playing = False
listener.stop()
async with aiofiles.open("CONOUT$", "wb") as self.file_object:
self.descriptor_len = len(f"{self.filename} -- 000 -- fps: 000") + 1
# for some reason, the actual file-position is one char further than the actual len
# maybe because of all the ansi-code that is being written, honestly idk
if self.is_animated:
listener = keyboard.Listener(on_press=on_key_press_stop)
listener.start()
frame_index = 0
self.is_playing = True
while self.is_playing:
await self.blit_screen(frame_index)
frame_index += 1
frame_index %= self.frame_count
else:
await self.blit_screen(0) # print the first frame so that still images can also be shown
with keyboard.Listener(on_press=on_key_press_stop) as listener:
listener.join() # waits for the key to be pressed so that you can see the image
async def draw_to_screen_main(self):
self.create_terminal_window()
self.handle_file_types()
self.map_video_buffer_to_threads()
self.create_frame_bytes()
self.console_handle.SetConsoleActiveScreenBuffer()
await self.play_animated()
def close(self):
"""
stops everything that needs stopin'
and closes everything that needs closin'
"""
[mmap_obj.close() for mmap_obj in self.frames_bytes]
self.console_handle.Close()
print("Terminado com sucesso")
def play(self):
"""
runs the main loop to draw the gif and show it in the console
pretty simple
no biggies
"""
try:
asyncio.run(self.draw_to_screen_main())
except KeyboardInterrupt:
print(
"Interrupted by user. The intended way of closing is with the ESC key"
)
finally:
self.close()
def main():
parser = argparse.ArgumentParser(
description=__doc__,
epilog="written by @o_santi, follow me on twitter @o_santi_",
)
parser.add_argument(
"-c",
"-C",
"--console",
help="whether or not a new console is created to show the gif",
action="store_true",
)
parser.add_argument("filename", help="filename to the video or image to be shown")
parser.add_argument(
"mode",
help="'color' for 24-bit colors (best to display images); \
'color216' for 6-bit coloring (best to play videos); \
'ascii' for black and white text (best for aesthetics)",
)
parser.add_argument(
"--char", help="char to print when in colored mode", default=" "
)
parser.add_argument(
"--fps-cap",
"-f",
help="whether or not the video's normal fps should be respected. defaults to false",
action="store_false",
)
args = parser.parse_args()
if args.filename:
terminal_player = TerminalPlayer(args.filename, args.mode, args.char, args.console, args.fps_cap)
terminal_player.play()
if __name__ == "__main__":
main()
| 40.229692
| 142
| 0.561412
|
7753be620e491966a793ac56834450cf639191e2
| 2,121
|
py
|
Python
|
starter/migrations/0003_auto_20161017_0406.py
|
dwetterau/starter
|
e23bfe1c86a2c20687a763f359198f5903e655d4
|
[
"MIT"
] | 1
|
2021-03-30T11:36:50.000Z
|
2021-03-30T11:36:50.000Z
|
starter/migrations/0003_auto_20161017_0406.py
|
dwetterau/starter
|
e23bfe1c86a2c20687a763f359198f5903e655d4
|
[
"MIT"
] | 4
|
2020-09-04T10:38:37.000Z
|
2021-05-07T12:22:02.000Z
|
starter/migrations/0003_auto_20161017_0406.py
|
dwetterau/starter
|
e23bfe1c86a2c20687a763f359198f5903e655d4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-17 04:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('starter', '0002_tag_task'),
]
operations = [
migrations.AddField(
model_name='task',
name='state',
field=models.SmallIntegerField(default=0, verbose_name='The current state of the task'),
preserve_default=False,
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=64, verbose_name='descriptive name of the tag'),
),
migrations.AlterField(
model_name='task',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='authored_tasks', to=settings.AUTH_USER_MODEL, verbose_name='Original author of the task'),
),
migrations.AlterField(
model_name='task',
name='description',
field=models.TextField(max_length=2000, verbose_name='Description of the task'),
),
migrations.AlterField(
model_name='task',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_tasks', to=settings.AUTH_USER_MODEL, verbose_name='Current owner of the task'),
),
migrations.AlterField(
model_name='task',
name='priority',
field=models.SmallIntegerField(verbose_name='The assigned priority of the task'),
),
migrations.AlterField(
model_name='task',
name='tags',
field=models.ManyToManyField(to='starter.Tag', verbose_name='The tags for the task'),
),
migrations.AlterField(
model_name='task',
name='title',
field=models.CharField(max_length=128, verbose_name='Title of the task'),
),
]
| 35.949153
| 185
| 0.612918
|
0f0dd9cac7600a14780a806d78fbfa73bb300b23
| 2,482
|
py
|
Python
|
analytics/management/commands/analyze_user_activity.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
analytics/management/commands/analyze_user_activity.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-06-06T00:51:42.000Z
|
2022-02-10T21:38:40.000Z
|
analytics/management/commands/analyze_user_activity.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import datetime
from typing import Any, Dict
from django.core.management.base import BaseCommand, CommandParser
from django.utils.timezone import utc
from zerver.lib.statistics import seconds_usage_between
from zerver.models import UserProfile
def analyze_activity(options: Dict[str, Any]) -> None:
day_start = datetime.datetime.strptime(options["date"], "%Y-%m-%d").replace(tzinfo=utc)
day_end = day_start + datetime.timedelta(days=options["duration"])
user_profile_query = UserProfile.objects.all()
if options["realm"]:
user_profile_query = user_profile_query.filter(realm__string_id=options["realm"])
print("Per-user online duration:\n")
total_duration = datetime.timedelta(0)
for user_profile in user_profile_query:
duration = seconds_usage_between(user_profile, day_start, day_end)
if duration == datetime.timedelta(0):
continue
total_duration += duration
print("%-*s%s" % (37, user_profile.email, duration,))
print("\nTotal Duration: %s" % (total_duration,))
print("\nTotal Duration in minutes: %s" % (total_duration.total_seconds() / 60.,))
print("Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,))
class Command(BaseCommand):
help = """Report analytics of user activity on a per-user and realm basis.
This command aggregates user activity data that is collected by each user using Yakkl. It attempts
to approximate how much each user has been using Yakkl per day, measured by recording each 15 minute
period where some activity has occurred (mouse move or keyboard activity).
It will correctly not count server-initiated reloads in the activity statistics.
The duration flag can be used to control how many days to show usage duration for
Usage: ./manage.py analyze_user_activity [--realm=yakkl] [--date=2013-09-10] [--duration=1]
By default, if no date is selected 2013-09-10 is used. If no realm is provided, information
is shown for all realms"""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('--realm', action='store')
parser.add_argument('--date', action='store', default="2013-09-06")
parser.add_argument('--duration', action='store', default=1, type=int,
help="How many days to show usage information for")
def handle(self, *args: Any, **options: Any) -> None:
analyze_activity(options)
| 43.54386
| 100
| 0.707897
|
5c426a8e9a4e42d8f5f2bc9ac6cf1da87542e736
| 1,764
|
py
|
Python
|
personal/migrations/0001_initial.py
|
Mosesvalei/Personal-gallery-application
|
fb910cb720914e017ba3506ebb6d9e39126c629e
|
[
"MIT"
] | null | null | null |
personal/migrations/0001_initial.py
|
Mosesvalei/Personal-gallery-application
|
fb910cb720914e017ba3506ebb6d9e39126c629e
|
[
"MIT"
] | null | null | null |
personal/migrations/0001_initial.py
|
Mosesvalei/Personal-gallery-application
|
fb910cb720914e017ba3506ebb6d9e39126c629e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-09-14 09:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='SOMETHING STRONG', upload_to='images/')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('author', models.CharField(max_length=30)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('update', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personal.category')),
('location', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='personal.location')),
],
options={
'ordering': ['-timestamp'],
},
),
]
| 37.531915
| 128
| 0.565193
|
37561cba2e6d01c2f626110dc1ce7fc05a0ec556
| 279
|
py
|
Python
|
app/worker/tasks/get_current_count.py
|
toh995/fastapi-faust-example
|
f0dd84fb7d5fe1edfe8f2ce4c3178315a3b9ea8b
|
[
"MIT"
] | 36
|
2020-11-29T05:28:35.000Z
|
2022-02-18T12:39:11.000Z
|
app/worker/tasks/get_current_count.py
|
toh995/fastapi-faust-example
|
f0dd84fb7d5fe1edfe8f2ce4c3178315a3b9ea8b
|
[
"MIT"
] | 2
|
2021-01-28T02:56:27.000Z
|
2022-01-19T15:55:33.000Z
|
app/worker/tasks/get_current_count.py
|
toh995/fastapi-faust-example
|
f0dd84fb7d5fe1edfe8f2ce4c3178315a3b9ea8b
|
[
"MIT"
] | 8
|
2020-12-01T11:35:06.000Z
|
2022-03-05T16:58:36.000Z
|
from app.worker import get_faust_app
from app.worker.tables.count_table import count_table
faust_app = get_faust_app()
topic = faust_app.topic("get_current_count")
@faust_app.agent(topic)
async def agent(stream):
async for _ in stream:
yield count_table["count"]
| 21.461538
| 53
| 0.763441
|
6d36b404bc9618e06ae720713cc42d8af605bd5a
| 10,165
|
py
|
Python
|
tests/build/scipy/scipy/weave/size_check.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
tests/build/scipy/scipy/weave/size_check.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/build/scipy/scipy/weave/size_check.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
from __future__ import absolute_import, print_function
from numpy import ones, ndarray, array, asarray, concatenate, zeros, shape, \
alltrue, equal, divide, arccos, arcsin, arctan, cos, cosh, \
sin, sinh, exp, ceil, floor, fabs, log, log10, sqrt, argmin, \
argmax, argsort, around, absolute, sign, negative, float32
import sys
numericTypes = (int, long, float, complex)
def isnumeric(t):
return isinstance(t, numericTypes)
def time_it():
import time
expr = "ex[:,1:,1:] = ca_x[:,1:,1:] * ex[:,1:,1:]" \
"+ cb_y_x[:,1:,1:] * (hz[:,1:,1:] - hz[:,:-1,1:])" \
"- cb_z_x[:,1:,1:] * (hy[:,1:,1:] - hy[:,1:,:-1])"
ex = ones((10,10,10),dtype=float32)
ca_x = ones((10,10,10),dtype=float32)
cb_y_x = ones((10,10,10),dtype=float32)
cb_z_x = ones((10,10,10),dtype=float32)
hz = ones((10,10,10),dtype=float32)
hy = ones((10,10,10),dtype=float32)
N = 1
t1 = time.time()
for i in range(N):
passed = check_expr(expr,locals())
t2 = time.time()
print('time per call:', (t2 - t1)/N)
print('passed:', passed)
def check_expr(expr,local_vars,global_vars={}):
""" Currently only checks expressions (not suites).
Doesn't check that lhs = rhs. checked by compiled func though
"""
values = {}
# first handle the globals
for var,val in global_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
elif isnumeric(val):
values[var] = val
# now handle the locals
for var,val in local_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
if isnumeric(val):
values[var] = val
exec(expr,values)
try:
exec(expr,values)
except:
try:
eval(expr,values)
except:
return 0
return 1
empty = array(())
empty_slice = slice(None)
def make_same_length(x,y):
try:
Nx = len(x)
except:
Nx = 0
try:
Ny = len(y)
except:
Ny = 0
if Nx == Ny == 0:
return empty,empty
elif Nx == Ny:
return asarray(x),asarray(y)
else:
diff = abs(Nx - Ny)
front = ones(diff, int)
if Nx > Ny:
return asarray(x), concatenate((front,y))
elif Ny > Nx:
return concatenate((front,x)),asarray(y)
def binary_op_size(xx,yy):
""" This returns the resulting size from operating on xx, and yy
with a binary operator. It accounts for broadcasting, and
throws errors if the array sizes are incompatible.
"""
x,y = make_same_length(xx,yy)
res = zeros(len(x))
for i in range(len(x)):
if x[i] == y[i]:
res[i] = x[i]
elif x[i] == 1:
res[i] = y[i]
elif y[i] == 1:
res[i] = x[i]
else:
# offer more information here about which variables.
raise ValueError("frames are not aligned")
return res
class dummy_array(object):
def __init__(self,ary,ary_is_shape=0,name=None):
self.name = name
if ary_is_shape:
self.shape = ary
# self.shape = asarray(ary)
else:
try:
self.shape = shape(ary)
except:
self.shape = empty
# self.value = ary
def binary_op(self,other):
try:
x = other.shape
except AttributeError:
x = empty
new_shape = binary_op_size(self.shape,x)
return dummy_array(new_shape,1)
def __cmp__(self,other):
# This isn't an exact compare, but does work for ==
# cluge for Numeric
if isnumeric(other):
return 0
if len(self.shape) == len(other.shape) == 0:
return 0
return not alltrue(equal(self.shape,other.shape),axis=0)
def __add__(self,other):
return self.binary_op(other)
def __radd__(self,other):
return self.binary_op(other)
def __sub__(self,other):
return self.binary_op(other)
def __rsub__(self,other):
return self.binary_op(other)
def __mul__(self,other):
return self.binary_op(other)
def __rmul__(self,other):
return self.binary_op(other)
def __div__(self,other):
return self.binary_op(other)
def __rdiv__(self,other):
return self.binary_op(other)
def __mod__(self,other):
return self.binary_op(other)
def __rmod__(self,other):
return self.binary_op(other)
def __lshift__(self,other):
return self.binary_op(other)
def __rshift__(self,other):
return self.binary_op(other)
# unary ops
def __neg__(self,other):
return self
def __pos__(self,other):
return self
def __abs__(self,other):
return self
def __invert__(self,other):
return self
# Not sure what to do with coersion ops. Ignore for now.
#
# not currently supported by compiler.
# __divmod__
# __pow__
# __rpow__
# __and__
# __or__
# __xor__
# item access and slicing
def __setitem__(self,indices,val):
# ignore for now
pass
def __len__(self):
return self.shape[0]
def __getslice__(self,i,j):
i = max(i, 0)
j = max(j, 0)
return self.__getitem__((slice(i,j),))
def __getitem__(self,indices):
# ayeyaya this is a mess
# print indices, type(indices), indices.shape
if not isinstance(indices, tuple):
indices = (indices,)
if Ellipsis in indices:
raise IndexError("Ellipsis not currently supported")
new_dims = []
dim = 0
for index in indices:
try:
dim_len = self.shape[dim]
except IndexError:
raise IndexError("To many indices specified")
# if (type(index) is SliceType and index.start == index.stop == index.step):
if (index is empty_slice):
slc_len = dim_len
elif isinstance(index, slice):
beg,end,step = index.start,index.stop,index.step
# handle if they are dummy arrays
# if hasattr(beg,'value') and type(beg.value) != ndarray:
# beg = beg.value
# if hasattr(end,'value') and type(end.value) != ndarray:
# end = end.value
# if hasattr(step,'value') and type(step.value) != ndarray:
# step = step.value
if beg is None:
beg = 0
if end == sys.maxint or end is None:
end = dim_len
if step is None:
step = 1
if beg < 0:
beg += dim_len
if end < 0:
end += dim_len
# the following is list like behavior,
# which isn't adhered to by arrays.
# FIX THIS ANOMOLY IN NUMERIC!
if beg < 0:
beg = 0
if beg > dim_len:
beg = dim_len
if end < 0:
end = 0
if end > dim_len:
end = dim_len
# This is rubbish.
if beg == end:
beg,end,step = 0,0,1
elif beg >= dim_len and step > 0:
beg,end,step = 0,0,1
# elif index.step > 0 and beg <= end:
elif step > 0 and beg <= end:
pass # slc_len = abs(divide(end-beg-1,step)+1)
# handle [::-1] and [-1::-1] correctly
# elif index.step > 0 and beg > end:
elif step > 0 and beg > end:
beg,end,step = 0,0,1
elif(step < 0 and index.start is None and index.stop is None):
beg,end,step = 0,dim_len,-step
elif(step < 0 and index.start is None):
# +1 because negative stepping is inclusive
beg,end,step = end+1,dim_len,-step
elif(step < 0 and index.stop is None):
beg,end,step = 0,beg+1,-step
elif(step < 0 and beg > end):
beg,end,step = end,beg,-step
elif(step < 0 and beg < end):
beg,end,step = 0,0,-step
slc_len = abs(divide(end-beg-1,step)+1)
new_dims.append(slc_len)
else:
if index < 0:
index += dim_len
if index >= 0 and index < dim_len:
# this reduces the array dimensions by one
pass
else:
raise IndexError("Index out of range")
dim += 1
new_dims.extend(self.shape[dim:])
if 0 in new_dims:
raise IndexError("Zero length slices not currently supported")
return dummy_array(new_dims,1)
def __repr__(self):
val = str((self.name, str(self.shape)))
return val
def unary(ary):
return ary
def not_implemented(ary):
return ary
# all imported from Numeric and need to be reassigned.
unary_op = [arccos, arcsin, arctan, cos, cosh, sin, sinh,
exp,ceil,floor,fabs,log,log10,sqrt]
unsupported = [argmin,argmax, argsort,around, absolute,sign,negative,floor]
for func in unary_op:
func = unary
for func in unsupported:
func = not_implemented
def reduction(ary,axis=0):
if axis < 0:
axis += len(ary.shape)
if axis < 0 or axis >= len(ary.shape):
raise ValueError("Dimension not in array")
new_dims = list(ary.shape[:axis]) + list(ary.shape[axis+1:])
return dummy_array(new_dims,1)
# functions currently not supported by compiler
# reductions are gonna take some array reordering for the general case,
# so this is gonna take some thought (probably some tree manipulation).
def take(ary,axis=0):
raise NotImplemented
# and all the rest
| 29.635569
| 88
| 0.532907
|
500de0d61513c89f0eec538e534a0e21b2b6aab8
| 3,821
|
py
|
Python
|
models/num2eng.py
|
martinjwkim/numbers_api
|
eb06df2424aee0c77d86fadb378d6fa2493948aa
|
[
"MIT"
] | null | null | null |
models/num2eng.py
|
martinjwkim/numbers_api
|
eb06df2424aee0c77d86fadb378d6fa2493948aa
|
[
"MIT"
] | null | null | null |
models/num2eng.py
|
martinjwkim/numbers_api
|
eb06df2424aee0c77d86fadb378d6fa2493948aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''Convert number to English words
$./num2eng.py 1411893848129211
one quadrillion, four hundred and eleven trillion, eight hundred and ninety
three billion, eight hundred and forty eight million, one hundred and twenty
nine thousand, two hundred and eleven
$
Algorithm from http://mini.net/tcl/591
'''
# modified to exclude the "and" between hundreds and tens - mld
__author__ = 'Miki Tebeka <tebeka@cs.bgu.ac.il>'
__version__ = '$Revision: 7281 $'
# $Source$
import math
# Tokens from 1000 and up
_PRONOUNCE = [
'vigintillion',
'novemdecillion',
'octodecillion',
'septendecillion',
'sexdecillion',
'quindecillion',
'quattuordecillion',
'tredecillion',
'duodecillion',
'undecillion',
'decillion',
'nonillion',
'octillion',
'septillion',
'sextillion',
'quintillion',
'quadrillion',
'trillion',
'billion',
'million ',
'thousand ',
''
]
# Tokens up to 90
_SMALL = {
'0' : '',
'1' : 'one',
'2' : 'two',
'3' : 'three',
'4' : 'four',
'5' : 'five',
'6' : 'six',
'7' : 'seven',
'8' : 'eight',
'9' : 'nine',
'10' : 'ten',
'11' : 'eleven',
'12' : 'twelve',
'13' : 'thirteen',
'14' : 'fourteen',
'15' : 'fifteen',
'16' : 'sixteen',
'17' : 'seventeen',
'18' : 'eighteen',
'19' : 'nineteen',
'20' : 'twenty',
'30' : 'thirty',
'40' : 'forty',
'50' : 'fifty',
'60' : 'sixty',
'70' : 'seventy',
'80' : 'eighty',
'90' : 'ninety'
}
def get_num(num):
'''Get token <= 90, return '' if not matched'''
return _SMALL.get(num, '')
def triplets(l):
'''Split list to triplets. Pad last one with '' if needed'''
res = []
for i in range(int(math.ceil(len(l) / 3.0))):
sect = l[i * 3 : (i + 1) * 3]
if len(sect) < 3: # Pad last section
sect += [''] * (3 - len(sect))
res.append(sect)
return res
def norm_num(num):
"""Normelize number (remove 0's prefix). Return number and string"""
n = int(num)
return n, str(n)
def small2eng(num):
'''English representation of a number <= 999'''
n, num = norm_num(num)
hundred = ''
ten = ''
if len(num) == 3: # Got hundreds
hundred = get_num(num[0]) + ' hundred'
num = num[1:]
n, num = norm_num(num)
if (n > 20) and (n != (n / 10 * 10)): # Got ones
tens = get_num(num[0] + '0')
ones = get_num(num[1])
ten = tens + '-' + ones
else:
ten = get_num(num)
if hundred and ten:
return hundred + ' ' + ten
#return hundred + ' and ' + ten
else: # One of the below is empty
return hundred + ten
#FIXME: Currently num2eng(1012) -> 'one thousand, twelve'
# do we want to add last 'and'?
def num2eng(num):
'''English representation of a number'''
num = str(long(num)) # Convert to string, throw if bad number
if (len(num) / 3 >= len(_PRONOUNCE)): # Sanity check
raise ValueError('Number too big')
if num == '0': # Zero is a special case
return 'zero'
# Create reversed list
x = list(num)
x.reverse()
pron = [] # Result accumolator
ct = len(_PRONOUNCE) - 1 # Current index
for a, b, c in triplets(x): # Work on triplets
p = small2eng(c + b + a)
if p:
pron.append(p + ' ' + _PRONOUNCE[ct])
ct -= 1
# Create result
pron.reverse()
# TODO: remove strip hack
return ', '.join(pron).strip()
if __name__ == '__main__':
from sys import argv, exit
from os.path import basename
if len(argv) < 2:
print 'usage: %s NUMBER[s]' % basename(argv[0])
exit(1)
for n in argv[1:]:
try:
print num2eng(n)
except ValueError, e:
print 'Error: %s' % e
| 24.49359
| 76
| 0.544098
|
7d5d2c96fb21c8c283ba5fb98bf19b17a8fa2308
| 396
|
py
|
Python
|
dlflow/utils/check.py
|
12860/dlflow
|
6fb974fd800649af82b20c5f4e40aea123559d10
|
[
"Apache-2.0"
] | 156
|
2020-04-22T10:59:26.000Z
|
2022-02-28T09:09:01.000Z
|
dlflow/utils/check.py
|
12860/dlflow
|
6fb974fd800649af82b20c5f4e40aea123559d10
|
[
"Apache-2.0"
] | 5
|
2020-07-10T05:39:48.000Z
|
2022-03-15T14:38:23.000Z
|
dlflow/utils/check.py
|
12860/dlflow
|
6fb974fd800649af82b20c5f4e40aea123559d10
|
[
"Apache-2.0"
] | 31
|
2020-04-22T12:51:32.000Z
|
2022-03-15T07:02:05.000Z
|
from dlflow.utils.locale import i18n
def env_version_check():
import tensorflow
import sys
v_major = sys.version_info[0]
v_minor = sys.version_info[1]
assert (v_major == 3 and v_minor >= 6) or v_major > 3, \
i18n("This program requires at least Python 3.6")
assert tensorflow.__version__.startswith("2."), \
i18n("This program require Tensorflow 2.0")
| 26.4
| 60
| 0.674242
|
d247fa8d7ab635f284fcd8cfc1ad4e62e036c080
| 2,075
|
py
|
Python
|
scripts/onnx/marian_to_onnx_example.py
|
delong-coder/marian-dev
|
2ef018e829e08a688eb02d3a56f29d23b284b901
|
[
"MIT"
] | 829
|
2017-06-05T12:14:34.000Z
|
2022-03-29T17:24:03.000Z
|
scripts/onnx/marian_to_onnx_example.py
|
delong-coder/marian-dev
|
2ef018e829e08a688eb02d3a56f29d23b284b901
|
[
"MIT"
] | 732
|
2017-07-21T15:32:27.000Z
|
2022-03-22T10:26:09.000Z
|
scripts/onnx/marian_to_onnx_example.py
|
delong-coder/marian-dev
|
2ef018e829e08a688eb02d3a56f29d23b284b901
|
[
"MIT"
] | 192
|
2017-06-27T10:17:26.000Z
|
2022-03-28T05:33:11.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Example program demonstrating how to convert a Marian model using the marian_to_onnx library
to a self-contained ONNX model that implements greedy search.
"""
import os, sys
import marian_to_onnx as mo
# The following variables would normally be command-line arguments.
# We use constants here to keep it simple. They reflect an example use. You must adjust these.
my_dir = os.path.expanduser("~/young/wngt 2019/")
marian_npz = my_dir + "model.base.npz" # path to the Marian model to convert
num_decoder_layers = 6 # number of decoder layers
marian_vocs = [my_dir + "en-de.wl"] * 2 # path to the vocabularies for source and target
onnx_model_path = my_dir + "model.base.opt.onnx" # resulting model gets written here
quantize_to_bits = 8 # None for no quantization
# export Marian model as multiple ONNX models
partial_models = mo.export_marian_model_components(marian_npz, marian_vocs)
# quantize if desired
if quantize_to_bits:
mo.quantize_models_in_place(partial_models, to_bits=quantize_to_bits)
# use the ONNX models in a greedy-search
# The result is a fully self-contained model that implements greedy search.
onnx_model = mo.compose_model_components_with_greedy_search(partial_models, num_decoder_layers)
# save as ONNX file
onnx_model.save(onnx_model_path)
# run a test sentence
w2is = [{ word.rstrip(): id for id, word in enumerate(open(voc_path, "r").readlines()) } for voc_path in marian_vocs]
i2ws = [{ id: tok for tok, id in w2i.items() } for w2i in w2is]
src_tokens = "▁Republican ▁leaders ▁justifie d ▁their ▁policy ▁by ▁the ▁need ▁to ▁combat ▁electoral ▁fraud ▁.".split()
src_ids = [w2is[0][tok] for tok in src_tokens]
print(src_tokens)
print(src_ids)
Y = mo.apply_model(greedy_search_fn=onnx_model,
source_ids=src_ids + [w2is[0]["</s>"]],
target_eos_id=w2is[1]["</s>"])
print(Y.shape, Y)
tgt_tokens = [i2ws[1][y] for y in Y]
print(" ".join(tgt_tokens))
| 43.229167
| 118
| 0.718072
|
c4bb9f52891757a3ba9a7cd0febb5d253b492118
| 4,388
|
py
|
Python
|
sahara/plugins/mapr/versions/v4_0_1_mrv1/version_handler.py
|
citrix-openstack-build/sahara
|
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
|
[
"Apache-2.0"
] | null | null | null |
sahara/plugins/mapr/versions/v4_0_1_mrv1/version_handler.py
|
citrix-openstack-build/sahara
|
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
|
[
"Apache-2.0"
] | null | null | null |
sahara/plugins/mapr/versions/v4_0_1_mrv1/version_handler.py
|
citrix-openstack-build/sahara
|
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sahara import context
from sahara.plugins.mapr.util import cluster_helper as clh_utils
import sahara.plugins.mapr.util.config_utils as cu
import sahara.plugins.mapr.util.names as n
from sahara.plugins.mapr.util import scaling
from sahara.plugins.mapr.util import start_helper as start_helper
import sahara.plugins.mapr.util.validation_utils as vu
import sahara.plugins.mapr.versions.base_context as bc
from sahara.plugins.mapr.versions import base_version_handler as bvh
import sahara.plugins.mapr.versions.v4_0_1_mrv1.cluster_configurer as cc
import sahara.plugins.utils as u
version = '4.0.1.mrv1'
SIXTY_SECONDS = 60
class VersionHandler(bvh.BaseVersionHandler):
def get_plugin_version(self):
return version
def start_cluster(self, cluster):
start_helper.exec_configure_sh_on_cluster(
cluster, self.get_configure_sh_string(cluster))
start_helper.wait_for_mfs_unlock(cluster, self.get_waiting_script())
start_helper.setup_maprfs_on_cluster(
cluster, self.get_disk_setup_script())
start_helper.start_zookeeper_nodes_on_cluster(cluster)
start_helper.start_warden_on_cldb_nodes(cluster)
context.sleep(SIXTY_SECONDS)
start_helper.start_warden_on_other_nodes(cluster)
start_helper.start_ecosystem(self.get_context(cluster))
def get_waiting_script(self):
return 'plugins/mapr/util/resources/waiting_script.sh'
def scale_cluster(self, cluster, instances):
scaling.scale_cluster(cluster, instances, self.get_disk_setup_script(),
self.get_waiting_script(),
self.get_context(cluster),
self.get_configure_sh_string(cluster), True)
def decommission_nodes(self, cluster, instances):
scaling.decommission_nodes(
cluster, instances, self.get_configure_sh_string(cluster))
def get_cluster_configurer(self, cluster, plugin_spec):
return cc.ClusterConfigurer(cluster, plugin_spec)
def get_cluster_validation_rules(self, cluster):
return [vu.not_less_than_count_component_vr(n.ZOOKEEPER, 1),
vu.not_less_than_count_component_vr(n.CLDB, 1),
vu.not_less_than_count_component_vr(n.TASK_TRACKER, 1),
vu.not_less_than_count_component_vr(n.FILE_SERVER, 1),
vu.not_more_than_count_component_vr(n.OOZIE, 1),
vu.not_more_than_count_component_vr(n.WEB_SERVER, 1),
vu.equal_count_component_vr(n.JOBTRACKER, 1),
vu.node_dependency_satisfied_vr(n.TASK_TRACKER, n.FILE_SERVER),
vu.node_dependency_satisfied_vr(n.CLDB, n.FILE_SERVER)]
def get_scaling_validation_rules(self):
return []
def get_edp_validation_rules(self):
return []
def get_configure_sh_string(self, cluster):
return ('/opt/mapr/server/configure.sh'
' -C ' + clh_utils.get_cldb_nodes_ip(cluster)
+ ' -Z ' + clh_utils.get_zookeeper_nodes_ip(cluster)
+ ' -f')
def get_context(self, cluster):
return Context(cluster)
class Context(bc.BaseContext):
m7_enabled_config = n.IS_M7_ENABLED
hive_version_config = 'Hive Version'
oozie_version_config = 'Oozie Version'
def __init__(self, cluster):
self.cluster = cluster
def get_cluster(self):
return self.cluster
def is_m7_enabled(self):
configs = cu.get_cluster_configs(self.get_cluster())
return configs[n.GENERAL][Context.m7_enabled_config]
def get_hadoop_version(self):
return '0.20.2'
def get_rm_instance(self):
return u.get_instance(self.get_cluster(), n.JOBTRACKER)
def get_rm_port(self):
return '9001'
| 38.156522
| 79
| 0.708067
|
4e74a3c9b1f670998305f28a74c16da2ed70dd8d
| 2,378
|
py
|
Python
|
sympy/utilities/tests/test_matchpy_connector.py
|
caley/sympy
|
7dfa5ceadf8b1500119583b33c70b618b59ca7ac
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_matchpy_connector.py
|
caley/sympy
|
7dfa5ceadf8b1500119583b33c70b618b59ca7ac
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_matchpy_connector.py
|
caley/sympy
|
7dfa5ceadf8b1500119583b33c70b618b59ca7ac
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import symbols, cos, sin
from sympy.external import import_module
from sympy.utilities.matchpy_connector import WildDot, WildPlus, WildStar
matchpy = import_module("matchpy")
x, y, z = symbols("x y z")
def _get_first_match(expr, pattern):
from matchpy import ManyToOneMatcher, Pattern
matcher = ManyToOneMatcher()
matcher.add(Pattern(pattern))
return next(iter(matcher.match(expr)))
def test_matchpy_connector():
if matchpy is None:
return
from multiset import Multiset
from matchpy import Pattern, Substitution
w_ = WildDot("w_")
w__ = WildPlus("w__")
w___ = WildStar("w___")
expr = x + y
pattern = x + w_
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w_': y})
expr = x + y + z
pattern = x + w__
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w__': Multiset([y, z])})
expr = x + y + z
pattern = x + y + z + w___
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w___': Multiset()})
def test_matchpy_optional():
if matchpy is None:
return
from matchpy import Pattern, Substitution
from matchpy import ManyToOneReplacer, ReplacementRule
p = WildDot("p", optional=1)
q = WildDot("q", optional=0)
pattern = p*x + q
expr1 = 2*x
pa, subst = _get_first_match(expr1, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 2, 'q': 0})
expr2 = x + 3
pa, subst = _get_first_match(expr2, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 1, 'q': 3})
expr3 = x
pa, subst = _get_first_match(expr3, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 1, 'q': 0})
expr4 = x*y + z
pa, subst = _get_first_match(expr4, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': y, 'q': z})
replacer = ManyToOneReplacer()
replacer.add(ReplacementRule(Pattern(pattern), lambda p, q: sin(p)*cos(q)))
assert replacer.replace(expr1) == sin(2)*cos(0)
assert replacer.replace(expr2) == sin(1)*cos(3)
assert replacer.replace(expr3) == sin(1)*cos(0)
assert replacer.replace(expr4) == sin(y)*cos(z)
| 27.651163
| 79
| 0.6455
|
7991d86db1a558f195424797c3c192bb8b58b635
| 808
|
py
|
Python
|
iopipe/send_report.py
|
skeptycal/iopipe-python
|
f6afba36663751779cba55ce53c0e1f2042df0d7
|
[
"Apache-2.0"
] | 74
|
2016-08-18T14:26:50.000Z
|
2021-11-21T10:58:32.000Z
|
iopipe/send_report.py
|
vemel/iopipe-python
|
46c277f9447ddb00e544437ceaa7ba263a759c1d
|
[
"Apache-2.0"
] | 198
|
2016-08-18T18:52:43.000Z
|
2021-05-09T10:01:14.000Z
|
iopipe/send_report.py
|
vemel/iopipe-python
|
46c277f9447ddb00e544437ceaa7ba263a759c1d
|
[
"Apache-2.0"
] | 23
|
2016-08-04T23:22:21.000Z
|
2020-01-20T13:54:27.000Z
|
import logging
try:
import requests
except ImportError:
from botocore.vendored import requests
logger = logging.getLogger(__name__)
session = requests.Session()
def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully")
| 26.064516
| 80
| 0.657178
|
6b9cdefdca5da3b8f1f146fa0faa7d5e89ab271f
| 14,769
|
py
|
Python
|
lib/datasets/pascal_voc.py
|
user-never-lose/fpn.pytorch
|
21b15e1414db9bb8fb8c5574cfc0ce0d40e01286
|
[
"MIT"
] | null | null | null |
lib/datasets/pascal_voc.py
|
user-never-lose/fpn.pytorch
|
21b15e1414db9bb8fb8c5574cfc0ce0d40e01286
|
[
"MIT"
] | null | null | null |
lib/datasets/pascal_voc.py
|
user-never-lose/fpn.pytorch
|
21b15e1414db9bb8fb8c5574cfc0ce0d40e01286
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
import cPickle
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
from .imdb import imdb
from .imdb import ROOT_DIR
import ds_utils
from .voc_eval import voc_eval
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from model.utils.config import cfg
# <<<< obsolete
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print 'loading {}'.format(filename)
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print '-----------------------------------------------------'
print 'Computing results with the official MATLAB eval code.'
print '-----------------------------------------------------'
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 39.701613
| 88
| 0.54716
|
8e73b2f34cc2e341b374b89c5369f817de6f3340
| 27,156
|
py
|
Python
|
rest_meets_djongo/serializers.py
|
jino-cod/rest_meets_djongo
|
bec3ee0843222d6c62b189673d07e426c789af01
|
[
"MIT"
] | 22
|
2019-10-01T15:31:42.000Z
|
2021-05-04T08:01:08.000Z
|
rest_meets_djongo/serializers.py
|
jino-cod/rest_meets_djongo
|
bec3ee0843222d6c62b189673d07e426c789af01
|
[
"MIT"
] | 10
|
2019-12-07T17:21:50.000Z
|
2021-06-10T17:49:53.000Z
|
rest_meets_djongo/serializers.py
|
jino-cod/rest_meets_djongo
|
bec3ee0843222d6c62b189673d07e426c789af01
|
[
"MIT"
] | 5
|
2020-05-12T16:11:06.000Z
|
2022-01-02T02:08:23.000Z
|
import copy
from collections import namedtuple
import traceback
from django.db import models as dja_fields
from djongo.models import fields as djm_fields
from rest_framework import fields as drf_fields
from rest_framework import serializers as drf_ser
from rest_framework.settings import api_settings
from rest_framework.utils.field_mapping import get_nested_relation_kwargs
from .fields import EmbeddedModelField, ArrayModelField, ObjectIdField
from .meta_manager import get_model_meta
from rest_meets_djongo import meta_manager, kwarg_manager
# Object to track and manage nested field customization attributes
Customization = namedtuple("Customization", [
'fields',
'exclude',
'extra_kwargs',
'validate_methods'
])
def raise_errors_on_nested_writes(method_name, serializer, validated_data):
"""
Replacement for DRF, allows for Djongo fields to not throw errors
"""
# Make sure the field is a format which can be managed by the method
for field in serializer._writable_fields:
assert not (
isinstance(field, drf_ser.BaseSerializer) and
(field.source in validated_data) and
isinstance(validated_data[field.source], (list, dict)) and not
(isinstance(field, EmbeddedModelSerializer) or
isinstance(field, drf_ser.ListSerializer) or
isinstance(field, drf_fields.ListField)
)), (
'The method `{method_name}` does not support serialization of '
'`{field_name}` fields in writable nested field by default.\n'
'Write a custom version of the method for `{module}.{class_name}` '
'or set the field to `read_only=True`'.format(
field_name=field.__class__.__name__,
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
# Make sure dotted-source fields weren't passed
assert not any(
'.' in field.source and
(key in validated_data) and
isinstance(validated_data[key], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'dotted-source serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
class DjongoModelSerializer(drf_ser.ModelSerializer):
"""
A modification of DRF's ModelSerializer to allow for EmbeddedModelFields
to be easily handled.
Automatically generates fields for the model, accounting for embedded
model fields in the process
"""
serializer_field_mapping = {
# Original DRF field mappings (Django Derived)
dja_fields.AutoField: drf_fields.IntegerField,
dja_fields.BigIntegerField: drf_fields.IntegerField,
dja_fields.BooleanField: drf_fields.BooleanField,
dja_fields.CharField: drf_fields.CharField,
dja_fields.CommaSeparatedIntegerField: drf_fields.CharField,
dja_fields.DateField: drf_fields.DateField,
dja_fields.DateTimeField: drf_fields.DateTimeField,
dja_fields.DecimalField: drf_fields.DecimalField,
dja_fields.EmailField: drf_fields.EmailField,
dja_fields.Field: drf_fields.ModelField,
dja_fields.FileField: drf_fields.FileField,
dja_fields.FloatField: drf_fields.FloatField,
dja_fields.ImageField: drf_fields.ImageField,
dja_fields.IntegerField: drf_fields.IntegerField,
dja_fields.NullBooleanField: drf_fields.NullBooleanField,
dja_fields.PositiveIntegerField: drf_fields.IntegerField,
dja_fields.PositiveSmallIntegerField: drf_fields.IntegerField,
dja_fields.SlugField: drf_fields.SlugField,
dja_fields.SmallIntegerField: drf_fields.IntegerField,
dja_fields.TextField: drf_fields.CharField,
dja_fields.TimeField: drf_fields.TimeField,
dja_fields.URLField: drf_fields.URLField,
dja_fields.GenericIPAddressField: drf_fields.IPAddressField,
dja_fields.FilePathField: drf_fields.FilePathField,
# REST-meets-Djongo field mappings (Djongo Derived)
djm_fields.ArrayField: ArrayModelField,
djm_fields.EmbeddedField: EmbeddedModelField,
djm_fields.ObjectIdField: ObjectIdField,
}
# Class for creating fields for embedded models w/o a serializer
serializer_generic_embed = EmbeddedModelField
# Class for creating array model fields w/o a serializer
serializer_array_embed = ArrayModelField
# Class for creating nested fields for embedded model fields
# Defaults to our version of EmbeddedModelField or ArrayModelField
serializer_nested_embed = None
# Easy trigger variable for use in inherited classes (EmbeddedModels)
_saving_instances = True
def build_instance_data(self, validated_data, instance=None):
"""
Recursively traverses provided validated data, creating a
dictionary describing the target model in the process
Returns a dictionary of model data, for use w/ creating or
updating instances of the target model
"""
# Validated data = None -> Embedded Model with blank=True.
# As such, just return None (effectively resetting the field)
if validated_data is None:
return None
obj_data = {}
for key, val in validated_data.items():
try:
field = self.fields[key]
# Special case; null values can be None, regardless of type
if val is None and field.allow_null:
obj_data[key] = None
# For other embedded models, recursively build their fields too
elif isinstance(field, EmbeddedModelSerializer):
embed_instance = None
if instance:
field_obj = get_model_meta(instance).get_field(key)
embed_instance = field_obj.value_from_object(instance)
if embed_instance:
obj_data[key] = field.update(embed_instance, val)
else:
obj_data[key] = field.create(val)
# Build defaults for EmbeddedModelFields
elif isinstance(field, EmbeddedModelField):
obj_data[key] = field.model_field(**val)
# For lists of embedded models, build each object as above
elif ((isinstance(field, drf_ser.ListSerializer) or
isinstance(field, drf_ser.ListField)) and
isinstance(field.child, EmbeddedModelSerializer)):
obj_data[key] = []
for datum in val:
embed_instance = field.child.create(datum)
obj_data[key].append(embed_instance)
# Other values, such as common datatypes, assume the data is correct
else:
obj_data[key] = val
# Dynamic data (Shouldn't exist with current Djongo, but may
# appear in future)
except KeyError:
obj_data = val
return obj_data
def create(self, validated_data):
"""
Build a new instance of the target model w/ attributes matching
validated data for the model
"""
raise_errors_on_nested_writes('create', self, validated_data)
model_class = self.Meta.model
try:
data = self.build_instance_data(validated_data)
instance = model_class._default_manager.create(**data)
return instance
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.%s.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.%s.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
model_class.__name__,
model_class._default_manager.name,
model_class.__name__,
model_class._default_manager.name,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
def update(self, instance, validated_data):
"""
Update an existing instance of the target model w/ attributes
provided from validated data
"""
raise_errors_on_nested_writes('update', self, validated_data)
data = self.build_instance_data(validated_data, instance)
for key, val in data.items():
setattr(instance, key, val)
instance.save()
return instance
def to_internal_value(self, data):
"""
Borrows DRF's implementation, but creates initial and validated
data for EmbeddedModels so `build_instance_data` can use them
Arbitrary data is silently dropped from validated data, as to
avoid issues down the line (assignment to an attribute which
doesn't exist)
"""
# Initial pass through for initial data writing
for field in self._writable_fields:
if (isinstance(field, EmbeddedModelSerializer) and
field.field_name in data):
field.initial_data = data[field.field_name]
ret = super(DjongoModelSerializer, self).to_internal_value(data)
# Secondary, post conversion pass to add initial data to validated data
for field in self._writable_fields:
if (isinstance(field, EmbeddedModelSerializer) and
field.field_name in ret):
field._validated_data = ret[field.field_name]
return ret
def to_representation(self, instance):
super_repr = super().to_representation(instance)
return dict(super_repr)
def get_fields(self):
"""
An override of DRF's `get_fields` to enable EmbeddedModelFields
to be correctly caught and constructed
"""
if self.url_field_name is None:
self.url_field_name = api_settings.URL_FIELD_NAME
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
"Class {serializer_name} missing `Meta.model` attribute".format(
serializer_name=self.__class__.__name__
)
)
if meta_manager.is_model_abstract(self.Meta.model) and self._saving_instances:
raise ValueError(
"Cannot use DjongoModelSerializer w/ Abstract Models.\n"
"Consider using an EmbeddedModelSerializer instead."
)
# Fetch and check useful metadata parameters
declared_fields = copy.deepcopy(self._declared_fields)
model = getattr(self.Meta, 'model')
rel_depth = getattr(self.Meta, 'depth', 0)
emb_depth = getattr(self.Meta, 'embed_depth', 5)
assert rel_depth >= 0, "'depth' may not be negative"
assert rel_depth <= 10, "'depth' may not be greater than 10"
assert emb_depth >= 0, "'embed_depth' may not be negative"
# Fetch information about the fields for our model class
info = meta_manager.get_field_info(model)
field_names = self.get_field_names(declared_fields, info)
# Determine extra field arguments + hidden fields that need to
# be included
extra_kwargs = self.get_extra_kwargs()
extra_kwargs, hidden_fields = self.get_uniqueness_extra_kwargs(
field_names, declared_fields, extra_kwargs
)
# Find fields which are required for the serializer
fields = {}
for field_name in field_names:
# Fields explicitly declared should use those declared settings
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
extra_field_kwargs = extra_kwargs.get(field_name, {})
source = extra_field_kwargs.get('source', field_name)
if source == '*':
source = field_name
# Determine field class and keyword arguments
field_class, field_kwargs = self.build_field(
source, info, model, rel_depth, emb_depth
)
# Fetch any extra_kwargs specified by the meta
field_kwargs = self.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field
fields[field_name] = field_class(**field_kwargs)
# Update with any hidden fields
fields.update(hidden_fields)
return fields
def get_field_names(self, declared_fields, info):
"""
Override of DRF's `get_field_names` function, enabling
EmbeddedModelFields to be caught and handled.
Some slight optimization is also provided. (Useful given how
many nested model fields may need to be iterated over)
Will include only direct children of the serializer; no
grandchildren are included by default
"""
fields = getattr(self.Meta, 'fields', None)
exclude = getattr(self.Meta, 'exclude', None)
# Confirm that both were not provided, which is invalid
assert not (fields and exclude), (
"Cannot set both 'fields' and 'exclude' options on "
"serializer {serializer_class}.".format(
serializer_class=self.__class__.__name__
)
)
# Construct the list of fields to be serialized
if fields is not None:
# If the user just wants all fields...
if fields == drf_ser.ALL_FIELDS:
return self.get_default_field_names(declared_fields, info)
# If the user specified fields explicitly...
elif isinstance(fields, (list, tuple)):
# Check to make sure all declared fields (required for creation)
# were specified by the user
required_field_names = set(declared_fields)
for cls in self.__class__.__bases__:
required_field_names -= set(getattr(cls, '_declared_fields', []))
for field_name in required_field_names:
assert field_name in fields, (
"The field '{field_name}' was declared on serializer "
"{serializer_class}, but has not been included in the "
"'fields' option.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
# If the user didn't provide a field set in the proper format...
else:
raise TypeError(
'The `fields` option must be a list or tuple or "__all__". '
'Got {cls_name}.'.format(cls_name=type(fields).__name__)
)
# Strip out designated fields for serialization
elif exclude is not None:
fields = self.get_default_field_names(declared_fields, info)
# Ignore nested field customization; they're handled later
for field_name in [name for name in exclude if '.' not in name]:
assert field_name not in self._declared_fields, (
"Cannot both declare the field '{field_name}' and include "
"it in the {serializer_class} 'exclude' option. Remove the "
"field or, if inherited from a parent serializer, disable "
"with `{field_name} = None`.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
assert field_name in fields, (
"The field '{field_name}' was included on serializer "
"{serializer_class} in the 'exclude' option, but does "
"not match any model field.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
fields.remove(field_name)
# If the user failed to specify a set of fields to include/exclude
else:
raise AssertionError(
"Creating a ModelSerializer without either the 'fields' attribute "
"or the 'exclude' attribute has been deprecated and is now "
"disallowed. Add an explicit fields = '__all__' to the "
"{serializer_class} serializer.".format(
serializer_class=self.__class__.__name__
)
)
# Filter out child fields, which would be contained in the child
# instance anyways
return [name for name in fields if '.' not in name]
def get_default_field_names(self, declared_fields, model_info):
"""Provide the list of fields included when `__all__` is used"""
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys()) +
list(model_info.embedded.keys())
)
def get_nested_field_customization(self, field_name):
"""
Fetches nested customization for Djongo unique fields
Extracts fields, exclude, extra_kwargs, and validation methods
for the parent serializer, related to the attributes of field
Used to enable automatic writable nested field construction
This should be called after self.get_fields(). Therefore, we
assume that most field validation has already been done
"""
fields = getattr(self.Meta, 'fields', None)
exclude = getattr(self.Meta, 'exclude', None)
# String used to identify nested fields
leading_str = field_name + '.'
# Get nested fields/exclusions
if fields is not None:
nested_exclude = None
if fields == drf_ser.ALL_FIELDS:
nested_fields = drf_ser.ALL_FIELDS
else:
nested_fields = [field[len(leading_str):] for
field in fields if
field.startswith(leading_str)]
else:
nested_fields = None
nested_exclude = [field[len(leading_str):] for
field in exclude if
field.startswith(leading_str)]
# Get any user specified kwargs (including read-only)
extra_kwargs = self.get_extra_kwargs()
nested_extra_kwargs = {key[len(leading_str):]: value for
key, value in extra_kwargs.items() if
key.startswith(leading_str)}
# Fetch nested validations methods for the field
# Renames them so that they may be added to the serializer's
# validation dictionary without conflicts
nested_validate_methods = {}
for attr in dir(self.__class__):
valid_lead_str = 'validate_{}__'.format(field_name.replace('.', '__'))
if attr.startswith(valid_lead_str):
method = getattr(self.__class__, attr)
method_name = 'validate' + attr[len(valid_lead_str):]
nested_validate_methods[method_name] = method
return Customization(nested_fields, nested_exclude, nested_extra_kwargs,
nested_validate_methods)
# TODO: Make this use self instead of a serializer
# or move to a utility function
def apply_customization(self, serializer, customization):
"""
Applies customization from nested fields to the serializer
Assumes basic verification has already been done
"""
if customization.fields:
serializer.Meta.fields = customization.fields
elif customization.exclude:
serializer.Meta.exclude = customization.exclude
# Apply extra_kwargs
if customization.extra_kwargs is not None:
serializer.Meta.extra_kwargs = customization.extra_kwargs
# Apply validation methods
for method_name, method in customization.validate_methods.items():
setattr(serializer, method_name, method)
def build_field(self, field_name, info, model_class, nested_depth, embed_depth):
# Basic field construction
if field_name in info.fields_and_pk:
model_field = info.fields_and_pk[field_name]
return self.build_standard_field(field_name, model_field)
# Relational field construction
elif field_name in info.relations:
relation_info = info.relations[field_name]
if not nested_depth:
return self.build_relational_field(field_name, relation_info)
else:
return self.build_nested_relation_field(field_name, relation_info, nested_depth)
# Embedded field construction
elif field_name in info.embedded:
embed_info = info.embedded[field_name]
# If the field is in the deepest depth,
if embed_depth == 0:
return self.build_root_embed_field(field_name, embed_info)
else:
return self.build_nested_embed_field(field_name, embed_info, embed_depth)
# Property field construction
elif hasattr(model_class, field_name):
return self.build_property_field(field_name, model_class)
# URL field construction
elif field_name == self.url_field_name:
return self.build_url_field(field_name, model_class)
# If all mapping above fails,
return self.build_unknown_field(field_name, model_class)
def build_nested_relation_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward/reverse relations
Slight tweak of DRF's variant, as to allow the nested serializer
to use our specified field mappings
"""
class NestedRelationSerializer(DjongoModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedRelationSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
def build_root_embed_field(self, field_name, embed_info):
"""Build a field instance for when the max `embed_depth` is reached"""
if embed_info.is_array:
field_class = self.serializer_array_embed
else:
field_class = self.serializer_generic_embed
field_kwargs = kwarg_manager.get_generic_embed_kwargs(embed_info)
return field_class, field_kwargs
def build_nested_embed_field(self, field_name, embed_info, depth):
"""Create a serializer for nested embedded model fields"""
subclass = self.serializer_nested_embed or EmbeddedModelSerializer
class EmbeddedSerializer(subclass):
class Meta:
model = embed_info.model_field.model_container
fields = '__all__'
embed_depth = depth - 1
# Apply customization to the nested field, if any is provided
customization = self.get_nested_field_customization(field_name)
self.apply_customization(EmbeddedSerializer, customization)
field_class = EmbeddedSerializer
field_kwargs = kwarg_manager.get_nested_embed_kwargs(field_name, embed_info)
return field_class, field_kwargs
def get_unique_for_date_validators(self):
# Not currently supported
return []
class EmbeddedModelSerializer(DjongoModelSerializer):
_saving_instances = False
def get_default_field_names(self, declared_fields, model_info):
"""Modified to not include the `pk` attribute"""
return (
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys()) +
list(model_info.embedded.keys())
)
def create(self, validated_data):
"""
Slight tweak to not push to directly to database; the containing
model does this for us
"""
raise_errors_on_nested_writes('create', self, validated_data)
model_class = self.Meta.model
try:
data = self.build_instance_data(validated_data)
return model_class(**data)
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.%s.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.%s.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
model_class.__name__,
model_class._default_manager.name,
model_class.__name__,
model_class._default_manager.name,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
def update(self, instance, validated_data):
"""
Does not push the updated model to the database; the containing
instance will do this for us.
"""
data = self.build_instance_data(validated_data, instance)
# If the validated data is None, the instance is being set to None
# This can occur when blank=True, and effectively resets the value
if data is None:
return None
# Otherwise the instance is being updated
for key, val in data.items():
setattr(instance, key, val)
return instance
def get_unique_together_validators(self):
# Skip these validators (may be added again in future)
return []
| 41.083207
| 96
| 0.617838
|
8a83c0d98d1158831f856ff51b50e69059146382
| 3,055
|
py
|
Python
|
venv/lib/python3.8/site-packages/sympy/strategies/core.py
|
gaabgonca/pseudocode_parser
|
b8c75856ca6fe2d213a681c966ba2f7a1f9f8524
|
[
"Apache-2.0"
] | 2
|
2021-01-09T23:11:25.000Z
|
2021-01-11T15:04:22.000Z
|
venv/lib/python3.8/site-packages/sympy/strategies/core.py
|
gaabgonca/pseudocode_parser
|
b8c75856ca6fe2d213a681c966ba2f7a1f9f8524
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/sympy/strategies/core.py
|
gaabgonca/pseudocode_parser
|
b8c75856ca6fe2d213a681c966ba2f7a1f9f8524
|
[
"Apache-2.0"
] | 2
|
2021-01-08T23:03:23.000Z
|
2021-01-13T18:57:02.000Z
|
""" Generic SymPy-Independent Strategies """
from __future__ import print_function, division
from sympy.core.compatibility import get_function_name
identity = lambda x: x
def exhaust(rule):
""" Apply a rule repeatedly until it has no effect """
def exhaustive_rl(expr):
new, old = rule(expr), expr
while new != old:
new, old = rule(new), new
return new
return exhaustive_rl
def memoize(rule):
""" Memoized version of a rule """
cache = {}
def memoized_rl(expr):
if expr in cache:
return cache[expr]
else:
result = rule(expr)
cache[expr] = result
return result
return memoized_rl
def condition(cond, rule):
""" Only apply rule if condition is true """
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
else:
return expr
return conditioned_rl
def chain(*rules):
"""
Compose a sequence of rules so that they apply to the expr sequentially
"""
def chain_rl(expr):
for rule in rules:
expr = rule(expr)
return expr
return chain_rl
def debug(rule, file=None):
""" Print out before and after expressions each time rule is used """
if file is None:
from sys import stdout
file = stdout
def debug_rl(*args, **kwargs):
expr = args[0]
result = rule(*args, **kwargs)
if result != expr:
file.write("Rule: %s\n" % get_function_name(rule))
file.write("In: %s\nOut: %s\n\n"%(expr, result))
return result
return debug_rl
def null_safe(rule):
""" Return original expr if rule returns None """
def null_safe_rl(expr):
result = rule(expr)
if result is None:
return expr
else:
return result
return null_safe_rl
def tryit(rule, exception):
""" Return original expr if rule raises exception """
def try_rl(expr):
try:
return rule(expr)
except exception:
return expr
return try_rl
def do_one(*rules):
""" Try each of the rules until one works. Then stop. """
def do_one_rl(expr):
for rl in rules:
result = rl(expr)
if result != expr:
return result
return expr
return do_one_rl
def switch(key, ruledict):
""" Select a rule based on the result of key called on the function """
def switch_rl(expr):
rl = ruledict.get(key(expr), identity)
return rl(expr)
return switch_rl
def minimize(*rules, objective=identity):
""" Select result of rules that minimizes objective
>>> from sympy.strategies import minimize
>>> inc = lambda x: x + 1
>>> dec = lambda x: x - 1
>>> rl = minimize(inc, dec)
>>> rl(4)
3
>>> rl = minimize(inc, dec, objective=lambda x: -x) # maximize
>>> rl(4)
5
"""
def minrule(expr):
return min([rule(expr) for rule in rules], key=objective)
return minrule
| 26.336207
| 75
| 0.583633
|
a5e65150dbb6e46719fedb095fad5d23e8965ca9
| 1,502
|
py
|
Python
|
LinkGeneratorMain.py
|
lukaszs02/SublimeLinkGenerator
|
9df0a266b7c408a4a2c5f28d2ec7440c88689b4b
|
[
"MIT"
] | null | null | null |
LinkGeneratorMain.py
|
lukaszs02/SublimeLinkGenerator
|
9df0a266b7c408a4a2c5f28d2ec7440c88689b4b
|
[
"MIT"
] | null | null | null |
LinkGeneratorMain.py
|
lukaszs02/SublimeLinkGenerator
|
9df0a266b7c408a4a2c5f28d2ec7440c88689b4b
|
[
"MIT"
] | null | null | null |
from os.path import dirname, realpath
import os
import re
import sublime
import sublime_plugin
class GeneratelinkCommand(sublime_plugin.TextCommand):
def run(self, edit):
link = self.generateLink()
print(link)
sublime.set_clipboard(link)
def generateLink(self):
settings = sublime.load_settings('LinkGenerator.sublime-settings')
if(re.search("gitlab|github", settings.get("url"))):
link = settings.get("url") + self.projectName() + "/blob/" + self.getBranch(settings) + self.filePath() + settings.get("lineNumberSeparator") + str(self.currentLine())
else:
link = settings.get("url") + self.projectName() + self.filePath() + settings.get("lineNumberSeparator") + str(self.currentLine())
return link
def currentLine(self):
return (self.view.rowcol(self.view.sel()[0].begin())[0]) + 1
def projectName(self):
projectName = re.search(r'[^(\\|/)]*$', str(self.view.window().extract_variables()['folder'])).group(0)
return projectName
def filePath(self):
project_folder = self.projectName()
file_patch = re.search(r'(?:' + project_folder + r')(.*$)', str(self.view.window().extract_variables()['file'])).group(1)
return file_patch
def getBranch(self, settings):
branch = settings.get("branch", "master")
if(branch == "_auto"):
branch = "master" #TODO add geting current branch form repository
return branch
| 39.526316
| 179
| 0.637816
|
70ae38c98d1c979f36babf6d223e3c40e197b4cc
| 2,305
|
py
|
Python
|
book_crawler/tests/str2num.py
|
zheng-zy/book_crawler
|
a092607a097986e9cd242809066e0948e64d8bcb
|
[
"Apache-2.0"
] | null | null | null |
book_crawler/tests/str2num.py
|
zheng-zy/book_crawler
|
a092607a097986e9cd242809066e0948e64d8bcb
|
[
"Apache-2.0"
] | null | null | null |
book_crawler/tests/str2num.py
|
zheng-zy/book_crawler
|
a092607a097986e9cd242809066e0948e64d8bcb
|
[
"Apache-2.0"
] | null | null | null |
#!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2016/11/17.
CN_NUM = {
u'〇': 0,
u'一': 1,
u'二': 2,
u'三': 3,
u'四': 4,
u'五': 5,
u'六': 6,
u'七': 7,
u'八': 8,
u'九': 9,
u'零': 0,
u'壹': 1,
u'贰': 2,
u'叁': 3,
u'肆': 4,
u'伍': 5,
u'陆': 6,
u'柒': 7,
u'捌': 8,
u'玖': 9,
u'貮': 2,
u'两': 2,
}
CN_UNIT = {
u'十': 10,
u'拾': 10,
u'百': 100,
u'佰': 100,
u'千': 1000,
u'仟': 1000,
u'万': 10000,
u'萬': 10000,
u'亿': 100000000,
u'億': 100000000,
u'兆': 1000000000000,
}
def cn2dig(cn):
lcn = list(cn)
unit = 0 # 当前的单位
ldig = [] # 临时数组
while lcn:
cndig = lcn.pop()
if CN_UNIT.has_key(cndig):
unit = CN_UNIT.get(cndig)
if unit == 10000:
ldig.append('w') # 标示万位
unit = 1
elif unit == 100000000:
ldig.append('y') # 标示亿位
unit = 1
elif unit == 1000000000000: # 标示兆位
ldig.append('z')
unit = 1
continue
else:
dig = CN_NUM.get(cndig)
if unit:
dig = dig * unit
unit = 0
ldig.append(dig)
if unit == 10: # 处理10-19的数字
ldig.append(10)
ret = 0
tmp = 0
while ldig:
x = ldig.pop()
if x == 'w':
tmp *= 10000
ret += tmp
tmp = 0
elif x == 'y':
tmp *= 100000000
ret += tmp
tmp = 0
elif x == 'z':
tmp *= 1000000000000
ret += tmp
tmp = 0
else:
tmp += x
ret += tmp
return ret
# ldig.reverse()
# print ldig
# print CN_NUM[u'七']
if __name__ == '__main__':
test_dig = [u'九',
u'十一',
u'一百二十三',
u'一千二百零三',
u'一万一千一百零一',
u'十万零三千六百零九',
u'一百二十三万四千五百六十七',
u'一千一百二十三万四千五百六十七',
u'一亿一千一百二十三万四千五百六十七',
u'一百零二亿五千零一万零一千零三十八',
u'一千一百一十一亿一千一百二十三万四千五百六十七',
u'一兆一千一百一十一亿一千一百二十三万四千五百六十七',
]
for cn in test_dig:
print cn2dig(cn)
| 17.730769
| 47
| 0.385249
|
327a80aacb749d8a042c3b4ef313cdacd356b5d2
| 832
|
py
|
Python
|
senti_analysis_per_product.py
|
DiptarkBose/AmazoNLP
|
301e9bd4c14a6e696bdd2b7b2924f20999f0f2b9
|
[
"MIT"
] | 1
|
2018-08-13T02:53:27.000Z
|
2018-08-13T02:53:27.000Z
|
senti_analysis_per_product.py
|
DiptarkBose/AmazoNLP
|
301e9bd4c14a6e696bdd2b7b2924f20999f0f2b9
|
[
"MIT"
] | null | null | null |
senti_analysis_per_product.py
|
DiptarkBose/AmazoNLP
|
301e9bd4c14a6e696bdd2b7b2924f20999f0f2b9
|
[
"MIT"
] | null | null | null |
from textblob import TextBlob
from nltk import sent_tokenize
#repeat this code snippet for 5 times for top 5 products obtained
review_list=[]
n=0
review_count=0
total=[]
for i in review_list:
review=sent_tokenize(i)
pos=[]
count=0
for line in review:
line=TextBlob(line)
count=count+1
pos.append(line.sentiment.polarity)
polar_mean=0
for i in pos:
polar_mean=polar_mean+i
polar_mean=polar_mean/count
print ("Positivity for review"+ str(n+1)+ "= " + str(polar_mean*100) +"%")
total.append(polar_mean*100)
n=n+1
#will introduce parameters for positive, negative, neutral soon
#will include subjectivity
#textblob makes it so easy!!!
overall_score=0
for i in total:
overall_score=overall_score+i
overall_score=overall_score/(n)
print("Overall user satisfaction for this product: "+ str(overall_score) +"%")
| 22.486486
| 78
| 0.753606
|
3e462b408fa74b7edbe801fd4a9cc4fcfb47c0a1
| 6,151
|
py
|
Python
|
notebooks/Detection/1_Build_Dataset/Shapes/utils.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | 3
|
2018-11-03T20:01:12.000Z
|
2019-05-20T12:57:51.000Z
|
notebooks/Detection/1_Build_Dataset/Shapes/utils.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/Detection/1_Build_Dataset/Shapes/utils.py
|
hadim/maskflow
|
6a70725ba26c6e65189936fd5c242c5ab15d6952
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import cv2
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum supression and returns indicies of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indicies into ixs[1:], so add 1 to get
# indicies into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indicies of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def random_shape(height, width, class_names):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = np.random.choice(class_names)
# Color
color = tuple([np.random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = np.random.randint(buffer, height - buffer - 1)
x = np.random.randint(buffer, width - buffer - 1)
# Size
s = np.random.randint(10, height // 5)
return shape, color, (x, y, s)
def random_image(height, width, min_n, max_n, class_names):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([np.random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = np.random.randint(min_n, max_n + 1)
for _ in range(N):
shape, color, dims = random_shape(height, width, class_names)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y - s, x - s, y + s, x + s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
if N > 0:
keep_ixs = non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
def draw_shape(image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
image = cv2.rectangle(image, (x - s, y - s),
(x + s, y + s), color, -1)
elif shape == "circle":
image = cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y - s),
(x - s / np.sin(np.radians(60)), y + s),
(x + s / np.sin(np.radians(60)), y + s),
]], dtype=np.int32)
image = cv2.fillPoly(image, points, color)
else:
raise Exception(f"Wrong shape name {shape}")
return image
def generate_image(bg_color, height, width, shapes):
bg_color = np.array(bg_color).reshape([1, 1, 3])
image = np.ones([height, width, 3], dtype=np.uint8)
image = image * bg_color.astype(np.uint8)
for shape, color, dims in shapes:
image = draw_shape(image, shape, dims, color)
return image
def generate_mask(bg_color, height, width, shapes, class_names):
"""
"""
count = len(shapes)
masks = np.zeros([height, width, count], dtype=np.uint8)
for i, (shape, _, dims) in enumerate(shapes):
masks[:, :, i:i + 1] = draw_shape(masks[:, :, i:i + 1].copy(), shape, dims, 1)
# Handle occlusions
if masks.shape[-1] > 0:
occlusion = np.logical_not(masks[:, :, -1]).astype(np.uint8)
for i in range(count - 2, -1, -1):
masks[:, :, i] = masks[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(masks[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([class_names.index(s[0]) for s in shapes])
masks = masks.swapaxes(0, 2)
masks = masks.swapaxes(1, 2)
# Sometime mask are empty (drop them).
cleaned_masks = []
cleaned_class_ids = []
for i, mask in enumerate(masks):
n_pixels = np.sum(mask > 0)
if n_pixels > 0:
cleaned_masks.append(mask)
cleaned_class_ids.append(class_ids[i])
return np.array(cleaned_masks), np.array(cleaned_class_ids)
| 36.613095
| 86
| 0.595513
|
d272c48a21249f7da033aeb95602d5e682274ca7
| 1,271
|
py
|
Python
|
tests/test_PT020_deprecated_yield_fixture.py
|
kianmeng/flake8-pytest-style
|
ea50dfeaa83cd0b102f1d149ffa9567eeabe4a8e
|
[
"MIT"
] | 125
|
2019-05-23T12:44:51.000Z
|
2022-03-31T17:29:32.000Z
|
tests/test_PT020_deprecated_yield_fixture.py
|
kianmeng/flake8-pytest-style
|
ea50dfeaa83cd0b102f1d149ffa9567eeabe4a8e
|
[
"MIT"
] | 169
|
2020-01-18T13:13:46.000Z
|
2022-03-30T02:21:52.000Z
|
tests/test_PT020_deprecated_yield_fixture.py
|
kianmeng/flake8-pytest-style
|
ea50dfeaa83cd0b102f1d149ffa9567eeabe4a8e
|
[
"MIT"
] | 12
|
2020-02-03T18:34:51.000Z
|
2022-03-09T15:33:39.000Z
|
from flake8_plugin_utils import assert_error, assert_not_error
from flake8_pytest_style.config import DEFAULT_CONFIG
from flake8_pytest_style.errors import DeprecatedYieldFixture
from flake8_pytest_style.visitors import FixturesVisitor
def test_ok_no_parameters():
code = """
import pytest
@pytest.fixture()
def my_fixture():
return 0
"""
assert_not_error(FixturesVisitor, code, config=DEFAULT_CONFIG)
def test_ok_without_parens():
code = """
import pytest
@pytest.fixture
def my_fixture():
return 0
"""
config = DEFAULT_CONFIG._replace(fixture_parentheses=False)
assert_not_error(FixturesVisitor, code, config=config)
def test_error_without_parens():
code = """
import pytest
@pytest.yield_fixture()
def my_fixture():
return 0
"""
assert_error(FixturesVisitor, code, DeprecatedYieldFixture, config=DEFAULT_CONFIG)
def test_error_with_parens():
code = """
import pytest
@pytest.yield_fixture
def my_fixture():
return 0
"""
config = DEFAULT_CONFIG._replace(fixture_parentheses=False)
assert_error(FixturesVisitor, code, DeprecatedYieldFixture, config=config)
| 24.442308
| 86
| 0.683714
|
7fe4e8c5c63d5091fedbe3ae32f73d8c88374680
| 542
|
py
|
Python
|
circulos.py
|
GabrielSilva2y3d/Pygame-BOOTCAMP-IGTI
|
4fb3c57a7f14c4dba7a60fd894bd97a7f7a86c44
|
[
"MIT"
] | 1
|
2021-09-21T22:54:38.000Z
|
2021-09-21T22:54:38.000Z
|
circulos.py
|
GabrielSilva2y3d/Snake-game-python-igti
|
4fb3c57a7f14c4dba7a60fd894bd97a7f7a86c44
|
[
"MIT"
] | null | null | null |
circulos.py
|
GabrielSilva2y3d/Snake-game-python-igti
|
4fb3c57a7f14c4dba7a60fd894bd97a7f7a86c44
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from sys import exit
from random import *
pygame.init()
screen = pygame.display.set_mode((640,480),0,32)
for _ in range(25):
random_color = (randint(0,255),randint(0,255),randint(0,255))
random_pos = (randint(0,639),randint(0,479))
random_radius = randint(1,200)
pygame.draw.circle(screen,random_color,random_pos,random_radius)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
quit()
| 25.809524
| 68
| 0.673432
|
ef783119c0d413cfa7d1c801964c9a2aee3e5705
| 10,474
|
py
|
Python
|
toontown/toon/DistributedNPCToonAI.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 3
|
2020-01-02T08:43:36.000Z
|
2020-07-05T08:59:02.000Z
|
toontown/toon/DistributedNPCToonAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/toon/DistributedNPCToonAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from otp.ai.AIBaseGlobal import *
from direct.task.Task import Task
from pandac.PandaModules import *
from toontown.toon.DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCToonAI(DistributedNPCToonBaseAI):
FourthGagVelvetRopeBan = config.GetBool('want-ban-fourth-gag-velvet-rope', 0)
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
self.task = None
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
if self.isBusy():
self.freeAvatar(avId)
return
self.busy = avId
self.air.questManager.requestInteract(avId, self)
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(20, self.sendTimeoutMovie, self.task)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if self.pendingQuests is None:
self.notify.warning('chooseQuest: not expecting a quest choice from this avatar: %s' % avId)
self.air.writeServerEvent('suspicious', avId, 'unexpected chooseQuest')
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(self)
self.cancelChoseQuest(avId)
return
if questId == 401:
av = self.air.getDo(avId)
if not av:
self.notify.warning('chooseQuest: av not present: %s' % avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if self.pendingTracks is None:
self.notify.warning('chooseTrack: not expecting a track choice from this avatar: %s' % avId)
self.air.writeServerEvent('suspicious', avId, 'unexpected chooseTrack')
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.task)
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime(bits=16)])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(540.0, self.sendTimeoutMovie, self.task)
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(540.0, self.sendTimeoutMovie, self.task)
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(540.0, self.sendTimeoutMovie, self.task)
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(20.0, self.sendTimeoutMovie, self.task)
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(20.0, self.sendTimeoutMovie, self.task)
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.task)
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
self.clearTasks()
self.task = self.uniqueName('clearMovie')
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.task)
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
self.clearTasks()
self.sendClearMovie(None)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
self.clearTasks()
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
def clearTasks(self):
if self.task:
taskMgr.remove(self.task)
self.task = None
| 38.3663
| 119
| 0.619916
|
881f4c9531a9269963aa4d53aeda30d3f27e1909
| 1,897
|
py
|
Python
|
npass_console.py
|
appath/NPass
|
ca19db2657bec506e602269d2e5f11dff8dbd5af
|
[
"MIT"
] | null | null | null |
npass_console.py
|
appath/NPass
|
ca19db2657bec506e602269d2e5f11dff8dbd5af
|
[
"MIT"
] | null | null | null |
npass_console.py
|
appath/NPass
|
ca19db2657bec506e602269d2e5f11dff8dbd5af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import uuid
import random
import argparse
parser = argparse.ArgumentParser(prog="NPass Console", formatter_class=argparse.RawDescriptionHelpFormatter, description="""\
To prevent your passwords from being hacked by social engineering,
brute force or dictionary attack method, and keep your
online accounts safe, you should notice that
* Always use a unique password for each account.
* Do not use personal information in your passwords. Names, birthdays and the like.
* Passwords must be at least 15, contain letters, numbers, symbols
* Avoid fairly weak, and commonly used passwords
* Do not use similar passwords that change only one word or any other character.
2020 Wizard Packed, Free Software
GitHub: https://github.com/appath""")
parser.add_argument("-v", "--version", help="print version", action="version", version="%(prog)s [version 1.12.0]")
parser.add_argument("-l", dest="length", type=int, help="length of password in integer, default is 8", default=8)
parser.add_argument("-c", dest="count", type=int, help="number of passwords to generate", default=1)
(options) = parser.parse_args()
length = options.length
count = options.count
def get_password():
salt = uuid.uuid4().hex
password_characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
return "".join(random.choice(salt + password_characters) for i in range(length))
def info_password_0():
print("Strong passwords are unique and random.\n")
def info_password_1():
print(f"\n:: Generate {count} a random string of fixed length")
print(f" Use the sampling function when you don’t want to repeat {length} characters in a random . . .")
if __name__ == "__main__":
info_password_0()
for str_count in range(count):
print(get_password())
info_password_1()
| 44.116279
| 126
| 0.712704
|
c64e64c31ced9837ccfffa0d1a66aa50fbd68f56
| 4,298
|
py
|
Python
|
optim/MNIST_example.py
|
201419/PersonalCodeRepository
|
e79ac1489fa424f1334e74aab74ea25d1246b40e
|
[
"Apache-2.0"
] | null | null | null |
optim/MNIST_example.py
|
201419/PersonalCodeRepository
|
e79ac1489fa424f1334e74aab74ea25d1246b40e
|
[
"Apache-2.0"
] | null | null | null |
optim/MNIST_example.py
|
201419/PersonalCodeRepository
|
e79ac1489fa424f1334e74aab74ea25d1246b40e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torchvision.datasets as dsets
import numpy as np
import random
import copy
import time
from svrg import SVRG
from data import load_mnist
# Get MNIST using autograd data loader
# Couldn't download MNIST on my system with pytorch built in dataloader
N, train_images, train_labels, test_images, test_labels = load_mnist()
# Convert training data from Numpy arrays to pytorch tensors
x, y = torch.from_numpy(train_images), torch.from_numpy(train_labels)
x, y = x.type(torch.FloatTensor), y.type(torch.FloatTensor)
# Convert training data from Numpy arrays to pytorch tensors
x_test, y_test = torch.from_numpy(test_images), torch.from_numpy(test_labels)
x_test, y_test = x_test.type(torch.FloatTensor), y_test.type(torch.FloatTensor)
# MLP dimensions
D_in, H1, H2, D_out = 784, 200, 100, 10
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H1),
torch.nn.ReLU(),
torch.nn.Linear(H1,H2),
torch.nn.ReLU(),
torch.nn.Linear(H2,D_out)
)
model2 = torch.nn.Sequential(
torch.nn.Linear(D_in, H1),
torch.nn.ReLU(),
torch.nn.Linear(H1,H2),
torch.nn.ReLU(),
torch.nn.Linear(H2,D_out)
)
## model2 = copy.deepcopy(model) # Use same initial weights for both networks
loss_fn = torch.nn.CrossEntropyLoss()
alpha = 1e-2
freq = 100 # how often to recompute large gradient
# The optimizer will not update model parameters on iterations
# where the large batches are calculated
lg_batch = 3000 # size of large gradient batch
min_batch = 300 # size of mini batch
optimizer = SVRG(model.parameters(), lr = alpha, freq = freq)
optimizer2 = torch.optim.SGD(model2.parameters(), lr = alpha)
epochs = 50
iterations = int (epochs * (60000/min_batch))
# SVRG Training
counter = 0
total = time.time()
while(counter < iterations):
# compute large batch gradient
temp = np.random.choice(x.size()[0], lg_batch, replace = False) # calculate batch indices
indices = torch.from_numpy(temp)
temp2 = torch.index_select(x, 0, indices)
y_pred = model(temp2)
yt = torch.index_select(y, 0, indices).type(torch.LongTensor)
loss = loss_fn(y_pred, torch.max(yt, 1)[1])
optimizer.zero_grad()
loss.backward()
counter+=1
optimizer.step()
# update models using mini batch gradients
for i in range(freq-1):
temp = np.random.choice(x.size()[0], min_batch, replace = False)
indices = torch.from_numpy(temp)
temp2 = torch.index_select(x, 0, indices)
y_pred = model(temp2)
yt = torch.index_select(y, 0, indices).type(torch.LongTensor)
loss = loss_fn(y_pred, torch.max(yt, 1)[1])
optimizer.zero_grad()
loss.backward()
optimizer.step()
counter += 1
if (counter == iterations):
break
print('time for SVRG ' + str(iterations) +' steps')
print(time.time()-total)
print('')
# SGD Training
total = time.time()
for t in range(iterations):
temp = np.random.choice(x.size()[0], min_batch, replace = False)
indices = torch.from_numpy(temp)
temp2 = torch.index_select(x, 0, indices)
y_pred = model2(temp2)
yt = torch.index_select(y, 0, indices).type(torch.LongTensor)
loss = loss_fn(y_pred, torch.max(yt, 1)[1])
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
print('time for SGD ' + str(iterations) +' steps')
print(time.time()-total)
print('')
# print train accuracy SVRG
y_predn = model(x).data.numpy()
yn = y.data.numpy()
pred = np.argmax(y_predn, axis = 1)
goal = np.argmax(yn, axis = 1)
acc = np.sum(pred == goal)/60000
print('train acc SVRG')
print(acc)
print('')
# print train accuracy SGD
y_predn = model2(x).data.numpy()
yn = y.data.numpy()
pred = np.argmax(y_predn, axis = 1)
goal = np.argmax(yn, axis = 1)
acc = np.sum(pred == goal)/60000
print('train acc SGD')
print(acc)
print('')
# print test accuracy SVRG
y_predn = model(x_test).data.numpy()
yn = y_test.data.numpy()
pred = np.argmax(y_predn, axis = 1)
goal = np.argmax(yn, axis = 1)
acc = np.sum(pred == goal)/10000
print('test acc SVRG')
print(acc)
print('')
# print test accuracy SGD
y_predn = model2(x_test).data.numpy()
yn = y_test.data.numpy()
pred = np.argmax(y_predn, axis = 1)
goal = np.argmax(yn, axis = 1)
acc = np.sum(pred == goal)/10000
print('test acc SGD')
print(acc)
| 29.040541
| 94
| 0.681712
|
3a3bca643839e95716721f438fb83ceded6c3cf8
| 3,140
|
py
|
Python
|
extractor/AICityReID/extract_features.py
|
muzhial/deep_sort
|
7d2d56e8c72c7cb447c0622192a798924c340174
|
[
"MIT"
] | null | null | null |
extractor/AICityReID/extract_features.py
|
muzhial/deep_sort
|
7d2d56e8c72c7cb447c0622192a798924c340174
|
[
"MIT"
] | null | null | null |
extractor/AICityReID/extract_features.py
|
muzhial/deep_sort
|
7d2d56e8c72c7cb447c0622192a798924c340174
|
[
"MIT"
] | null | null | null |
import os
from shutil import copyfile
import time
import math
import yaml
import argparse
from tqdm import tqdm
import numpy as np
import scipy.io
from sklearn.cluster import DBSCAN
from PIL import Image
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, models, transforms
import torch.optim as optim
from torch.optim import lr_scheduler
from .model import (
ft_net, ft_net_angle, ft_net_dense,
ft_net_NAS, PCB, PCB_test, CPB)
from .evaluate_gpu import calculate_result
from .evaluate_rerank import calculate_result_rerank
from .re_ranking import re_ranking, re_ranking_one
from .utils import load_network
from .losses import L2Normalization
class AICityReIDExtractor(object):
def __init__(self, model_path=None, cfg=None, use_cuda=True):
super().__init__()
self.device = 'cuda:0' if use_cuda else 'cpu'
if use_cuda:
torch.backends.cudnn.benchmark = True
self.height, self.width = cfg.AICITYREID.INPUT.SIZE_TEST
if self.height == self.width:
self.data_transforms = transforms.Compose([
transforms.Resize(
(round(cfg.AICITYREID.INPUTSIZE * 1.1),
round(cfg.AICITYREID.INPUTSIZE * 1.1)),
interpolation=3),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.data_transforms = transforms.Compose([
transforms.Resize(
(round(self.height * 1.1), round(self.width * 1.1)),
interpolation=3),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.net, _ = load_network(None, cfg)
self.net.classifier.classifier = nn.Sequential()
self.net.to(self.device).eval()
def _proprocess(self, im_crops):
im_crops_ = [
Image.fromarray(np.uint8(dat)).convert('RGB')
for dat in im_crops]
im_batch = torch.cat(
[self.data_transforms(im).unsqueeze(0)
for im in im_crops_], dim=0).float()
return im_batch
def _fliplr(self, img):
"""flip horizontal
"""
inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().to(self.device)
img_flip = img.index_select(3, inv_idx)
return img_flip
def _extract_feature(self, data):
with torch.no_grad():
n, c, h, w = data.size()
data = data.to(self.device)
# ff = torch.tensor((n, 512), dtype=torch.float32).zero_().to(self.device)
flip_data = self._fliplr(data)
f = self.net(flip_data)
ff = f + self.net(data)
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
return ff.cpu().numpy()
def __call__(self, img_crops):
im_batch = self._proprocess(img_crops)
feats = self._extract_feature(im_batch)
return feats
| 32.371134
| 86
| 0.600637
|
361b80df56417d3fc763cc15d17ff2d284853c52
| 4,146
|
py
|
Python
|
examples/mxnet/tagcn/train.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | 6
|
2020-04-27T16:31:53.000Z
|
2022-03-24T16:27:51.000Z
|
examples/mxnet/tagcn/train.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | null | null | null |
examples/mxnet/tagcn/train.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | 4
|
2020-03-17T11:21:56.000Z
|
2020-07-02T09:42:24.000Z
|
import argparse, time
import numpy as np
import networkx as nx
import mxnet as mx
from mxnet import gluon
from dgl import DGLGraph
from dgl.data import register_data_args, load_data
from tagcn import TAGCN
def evaluate(model, features, labels, mask):
pred = model(features).argmax(axis=1)
accuracy = ((pred == labels) * mask).sum() / mask.sum().asscalar()
return accuracy.asscalar()
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
train_mask = mx.nd.array(data.train_mask)
val_mask = mx.nd.array(data.val_mask)
test_mask = mx.nd.array(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar()))
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
# create TAGCN model
model = TAGCN(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
mx.nd.relu,
args.dropout)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(model.collect_params(), 'adam',
{'learning_rate': args.lr, 'wd': args.weight_decay})
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
pred = model(features)
loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
loss = loss.sum() / n_train_samples
loss.backward()
trainer.step(batch_size=1)
if epoch >= 3:
loss.asscalar()
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(
epoch, np.mean(dur), loss.asscalar(), acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, val_mask)
print("Test accuracy {:.2%}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TAGCN')
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden tagcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden tagcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
parser.add_argument("--self-loop", action='store_true',
help="graph self-loop (default=False)")
parser.set_defaults(self_loop=False)
args = parser.parse_args()
print(args)
main(args)
| 32.390625
| 90
| 0.604438
|
acb6c8c217ab422b4a9d85ce070f1f7e7bba55ff
| 90,710
|
py
|
Python
|
test/functional/p2p-segwit.py
|
magnumopusnetwork/Alchimia
|
324e2aefd1b25061fd36e2472581004a31dd84d5
|
[
"MIT"
] | null | null | null |
test/functional/p2p-segwit.py
|
magnumopusnetwork/Alchimia
|
324e2aefd1b25061fd36e2472581004a31dd84d5
|
[
"MIT"
] | null | null | null |
test/functional/p2p-segwit.py
|
magnumopusnetwork/Alchimia
|
324e2aefd1b25061fd36e2472581004a31dd84d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
# Alchimia: Blocks with nVersion < VB_TOP_BITS are rejected
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# Alchimia: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.start_node(node_id, extra_args=[])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def test_reject_blocks(self):
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(nVersion=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_reject_blocks()
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
# Alchimia: Disable test due to occasional travis issue
#self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| 46.304237
| 150
| 0.658626
|
0282ad7623f559db43460425abbffb5401f91337
| 59
|
py
|
Python
|
ocr_cipher_solver/data_formats/colors.py
|
ocr-cipher-solver/ocr-cipher-solver
|
0932a631149164efb2abc28274b65ded6df79caf
|
[
"MIT"
] | null | null | null |
ocr_cipher_solver/data_formats/colors.py
|
ocr-cipher-solver/ocr-cipher-solver
|
0932a631149164efb2abc28274b65ded6df79caf
|
[
"MIT"
] | 1
|
2021-10-09T16:20:24.000Z
|
2021-10-09T16:20:24.000Z
|
ocr_cipher_solver/data_formats/colors.py
|
ocr-cipher-solver/ocr-cipher-solver
|
0932a631149164efb2abc28274b65ded6df79caf
|
[
"MIT"
] | null | null | null |
from typing import Tuple
RGBA = Tuple[int, int, int, int]
| 14.75
| 32
| 0.711864
|
2db0e5e9ed129382a1cba7c7e53a50291124ac5c
| 26,530
|
py
|
Python
|
hpeOneView/connection.py
|
LaudateCorpus1/oneview-python
|
8373b144f0774f6b2193fe5bafdd099d1d21796a
|
[
"Apache-2.0"
] | 18
|
2019-12-13T16:55:14.000Z
|
2022-03-24T15:37:32.000Z
|
hpeOneView/connection.py
|
LaudateCorpus1/oneview-python
|
8373b144f0774f6b2193fe5bafdd099d1d21796a
|
[
"Apache-2.0"
] | 55
|
2019-12-20T09:56:18.000Z
|
2022-01-28T06:42:04.000Z
|
hpeOneView/connection.py
|
LaudateCorpus1/oneview-python
|
8373b144f0774f6b2193fe5bafdd099d1d21796a
|
[
"Apache-2.0"
] | 33
|
2019-11-12T08:53:22.000Z
|
2022-03-07T11:20:14.000Z
|
# -*- coding: utf-8 -*
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
"""
connection.py
~~~~~~~~~~~~~~
This module maintains communication with the appliance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
import http.client
import json
import logging
import shutil # for shutil.copyfileobj()
import mmap # so we can upload the iso without having to load it in memory
import os
import ssl
import time
import traceback
from hpeOneView.exceptions import HPEOneViewException
logger = logging.getLogger(__name__)
class connection(object):
def __init__(self, applianceIp, api_version=None, sslBundle=False, timeout=None):
self._session = None
self._host = applianceIp
self._cred = None
self._proxyHost = None
self._proxyPort = None
self._doProxy = False
self._sslTrustAll = True
self._sslBundle = sslBundle
self._sslTrustedBundle = self.set_trusted_ssl_bundle(sslBundle)
self._nextPage = None
self._prevPage = None
self._numTotalRecords = 0
self._numDisplayedRecords = 0
self._validateVersion = False
self._timeout = timeout
if not api_version:
api_version = self.get_default_api_version()
self._apiVersion = int(api_version)
self._headers = {
'X-API-Version': self._apiVersion,
'Accept': 'application/json',
'Content-Type': 'application/json'}
def get_default_api_version(self):
self._headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'}
version = self.get(uri['version'])
return version['currentVersion']
def validateVersion(self):
version = self.get(uri['version'])
if 'minimumVersion' in version:
if self._apiVersion < version['minimumVersion']:
raise HPEOneViewException('Unsupported API Version')
if 'currentVersion' in version:
if self._apiVersion > version['currentVersion']:
raise HPEOneViewException('Unsupported API Version')
self._validateVersion = True
def set_proxy(self, proxyHost, proxyPort):
self._proxyHost = proxyHost
self._proxyPort = proxyPort
self._doProxy = True
def set_trusted_ssl_bundle(self, sslBundle):
if sslBundle:
self._sslTrustAll = False
return sslBundle
def get_session(self):
return self._session
def get_session_id(self):
return self._headers.get('auth')
def set_session_id(self, session_id):
self._headers['auth'] = session_id
self._session = True
def get_host(self):
return self._host
def get_by_uri(self, xuri):
return self.get(xuri)
def make_url(self, path):
return 'https://%s%s' % (self._host, path)
def do_http(self, method, path, body, custom_headers=None):
http_headers = self._headers.copy()
if custom_headers:
http_headers.update(custom_headers)
bConnected = False
conn = None
while bConnected is False:
try:
conn = self.get_connection()
conn.request(method, path, body, http_headers)
resp = conn.getresponse()
tempbytes = ''
try:
tempbytes = resp.read()
tempbody = tempbytes.decode('utf-8')
except UnicodeDecodeError: # Might be binary data
tempbody = tempbytes
conn.close()
bConnected = True
return resp, tempbody
if tempbody:
try:
body = json.loads(tempbody)
except ValueError:
body = tempbody
conn.close()
bConnected = True
except http.client.BadStatusLine:
logger.warning('Bad Status Line. Trying again...')
if conn:
conn.close()
time.sleep(1)
continue
except http.client.HTTPException:
raise HPEOneViewException('Failure during login attempt.\n %s' % traceback.format_exc())
return resp, body
def download_to_stream(self, stream_writer, url, body='', method='GET', custom_headers=None):
http_headers = self._headers.copy()
if custom_headers:
http_headers.update(custom_headers)
chunk_size = 4096
conn = None
successful_connected = False
while not successful_connected:
try:
conn = self.get_connection()
conn.request(method, url, body, http_headers)
resp = conn.getresponse()
if resp.status >= 400:
self.__handle_download_error(resp, conn)
if resp.status == 302:
return self.download_to_stream(stream_writer=stream_writer,
url=resp.getheader('Location'),
body=body,
method=method,
custom_headers=http_headers)
tempbytes = True
while tempbytes:
tempbytes = resp.read(chunk_size)
if tempbytes: # filter out keep-alive new chunks
stream_writer.write(tempbytes)
conn.close()
successful_connected = True
except http.client.BadStatusLine:
logger.warning('Bad Status Line. Trying again...')
if conn:
conn.close()
time.sleep(1)
continue
except http.client.HTTPException:
raise HPEOneViewException('Failure during login attempt.\n %s' % traceback.format_exc())
return successful_connected
def __handle_download_error(self, resp, conn):
try:
tempbytes = resp.read()
tempbody = tempbytes.decode('utf-8')
try:
body = json.loads(tempbody)
except ValueError:
body = tempbody
except UnicodeDecodeError: # Might be binary data
body = tempbytes
conn.close()
if not body:
body = "Error " + str(resp.status)
conn.close()
raise HPEOneViewException(body)
def get_connection(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
if self._sslTrustAll is False:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(self._sslTrustedBundle)
if self._doProxy is False:
conn = http.client.HTTPSConnection(self._host,
context=context,
timeout=self._timeout)
else:
conn = http.client.HTTPSConnection(self._proxyHost,
self._proxyPort,
context=context,
timeout=self._timeout)
conn.set_tunnel(self._host, 443)
else:
context.verify_mode = ssl.CERT_NONE
if self._doProxy is False:
conn = http.client.HTTPSConnection(self._host,
context=context,
timeout=self._timeout)
else:
conn = http.client.HTTPSConnection(self._proxyHost,
self._proxyPort,
context=context,
timeout=self._timeout)
conn.set_tunnel(self._host, 443)
return conn
def _open(self, name, mode):
return open(name, mode)
def encode_multipart_formdata(self, fields, files, baseName, verbose=False):
"""
Fields is a sequence of (name, value) elements for regular form fields.
Files is a sequence of (name, filename, value) elements for data
to be uploaded as files
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
if verbose is True:
print(('Encoding ' + baseName + ' for upload...'))
fin = self._open(files, 'rb')
fout = self._open(files + '.b64', 'wb')
fout.write(bytearray('--' + BOUNDARY + CRLF, 'utf-8'))
fout.write(bytearray('Content-Disposition: form-data'
'; name="file"; filename="' + baseName + '"' + CRLF, "utf-8"))
fout.write(bytearray('Content-Type: application/octet-stream' + CRLF,
'utf-8'))
fout.write(bytearray(CRLF, 'utf-8'))
shutil.copyfileobj(fin, fout)
fout.write(bytearray(CRLF, 'utf-8'))
fout.write(bytearray('--' + BOUNDARY + '--' + CRLF, 'utf-8'))
fout.write(bytearray(CRLF, 'utf-8'))
fout.close()
fin.close()
return content_type
def post_multipart_with_response_handling(self, uri, file_path, baseName):
resp, body = self.post_multipart(uri, None, file_path, baseName)
if resp.status == 202:
task = self.__get_task_from_response(resp, body)
return task, body
if self.__body_content_is_task(body):
return body, body
return None, body
def post_multipart(self, uri, fields, files, baseName, verbose=False):
content_type = self.encode_multipart_formdata(fields, files, baseName,
verbose)
inputfile = self._open(files + '.b64', 'rb')
mappedfile = mmap.mmap(inputfile.fileno(), 0, access=mmap.ACCESS_READ)
if verbose is True:
print(('Uploading ' + files + '...'))
conn = self.get_connection()
# conn.set_debuglevel(1)
conn.connect()
conn.putrequest('POST', uri)
conn.putheader('uploadfilename', baseName)
conn.putheader('auth', self._headers['auth'])
conn.putheader('Content-Type', content_type)
totalSize = os.path.getsize(files + '.b64')
conn.putheader('Content-Length', totalSize)
conn.putheader('X-API-Version', self._apiVersion)
conn.endheaders()
while mappedfile.tell() < mappedfile.size():
# Send 1MB at a time
# NOTE: Be careful raising this value as the read chunk
# is stored in RAM
readSize = 1048576
conn.send(mappedfile.read(readSize))
if verbose is True:
print('%d bytes sent... \r' % mappedfile.tell())
mappedfile.close()
inputfile.close()
os.remove(files + '.b64')
response = conn.getresponse()
body = response.read().decode('utf-8')
if body:
try:
body = json.loads(body)
except ValueError:
body = response.read().decode('utf-8')
conn.close()
if response.status >= 400:
raise HPEOneViewException(body)
return response, body
###########################################################################
# Utility functions for making requests - the HTTP verbs
###########################################################################
def get(self, uri, custom_headers=None):
resp, body = self.do_http('GET', uri, '', custom_headers=custom_headers)
if resp.status >= 400:
raise HPEOneViewException(body)
if resp.status == 302:
body = self.get(resp.getheader('Location'))
if type(body) is dict:
if 'nextPageUri' in body:
self._nextPage = body['nextPageUri']
if 'prevPageUri' in body:
self._prevPage = body['prevPageUri']
if 'total' in body:
self._numTotalRecords = body['total']
if 'count' in body:
self._numDisplayedRecords = body['count']
return body
def getNextPage(self):
body = self.get(self._nextPage)
return get_members(body)
def getPrevPage(self):
body = self.get(self._prevPage)
return get_members(body)
def getLastPage(self):
while self._nextPage is not None:
members = self.getNextPage()
return members
def getFirstPage(self):
while self._prevPage is not None:
members = self.getPrevPage()
return members
def delete(self, uri, custom_headers=None):
return self.__do_rest_call('DELETE', uri, {}, custom_headers=custom_headers)
def put(self, uri, body, custom_headers=None):
return self.__do_rest_call('PUT', uri, body, custom_headers=custom_headers)
def post(self, uri, body, custom_headers=None):
return self.__do_rest_call('POST', uri, body, custom_headers=custom_headers)
def patch(self, uri, body, custom_headers=None):
return self.__do_rest_call('PATCH', uri, body, custom_headers=custom_headers)
def __body_content_is_task(self, body):
return isinstance(body, dict) and 'category' in body and body['category'] == 'tasks'
def __get_task_from_response(self, response, body):
location = response.getheader('Location')
if location:
task = self.get(location)
elif 'taskState' in body:
# This check is needed to handle a status response 202 without the location header,
# as is for PowerDevices. We are not sure if there are more resources with the same behavior.
task = body
else:
# For the resource Label the status is 202 but the response not contains a task.
task = None
return task
def __do_rest_call(self, http_method, uri, body, custom_headers):
resp, body = self.do_http(method=http_method,
path=uri,
body=json.dumps(body),
custom_headers=custom_headers)
if resp.status >= 400:
raise HPEOneViewException(body)
if resp.status == 304:
if body and not isinstance(body, dict):
try:
body = json.loads(body)
except Exception:
pass
elif resp.status == 202:
task = self.__get_task_from_response(resp, body)
return task, body
if self.__body_content_is_task(body):
return body, body
return None, body
###########################################################################
# EULA
###########################################################################
def get_eula_status(self):
return self.get(uri['eulaStatus'])
def set_eula(self, supportAccess='yes'):
eula = make_eula_dict(supportAccess)
self.post(uri['eulaSave'], eula)
return
###########################################################################
# Initial Setup
###########################################################################
def change_initial_password(self, newPassword):
password = make_initial_password_change_dict('Administrator',
'admin', newPassword)
# This will throw an exception if the password is already changed
self.post(uri['changePassword'], password)
###########################################################################
# Login/Logout to/from appliance
###########################################################################
def login(self, cred, verbose=False):
try:
if self._validateVersion is False:
self.validateVersion()
except Exception:
raise(HPEOneViewException('Failure during login attempt.\n %s' % traceback.format_exc()))
cred['loginMsgAck'] = True # This will handle the login acknowledgement message
self._cred = cred
try:
if self._cred.get("sessionID"):
self.set_session_id(self._cred["sessionID"])
task, body = self.put(uri['loginSessions'], None)
else:
self._cred.pop("sessionID", None)
task, body = self.post(uri['loginSessions'], self._cred)
except HPEOneViewException:
logger.exception('Login failed')
raise
auth = body['sessionID']
# Add the auth ID to the headers dictionary
self._headers['auth'] = auth
self._session = True
if verbose is True:
print(('Session Key: ' + auth))
logger.info('Logged in successfully')
def logout(self, verbose=False):
# resp, body = self.do_http(method, uri['loginSessions'] \
# , body, self._headers)
try:
self.delete(uri['loginSessions'])
except HPEOneViewException:
logger.exception('Logout failed')
raise
if verbose is True:
print('Logged Out')
del self._headers['auth']
self._session = False
logger.info('Logged out successfully')
return None
def enable_etag_validation(self):
"""
Enable the concurrency control for the PUT and DELETE requests, in which the requests are conditionally
processed only if the provided entity tag in the body matches the latest entity tag stored for the resource.
The eTag validation is enabled by default.
"""
self._headers.pop('If-Match', None)
def disable_etag_validation(self):
"""
Disable the concurrency control for the PUT and DELETE requests. The requests will be forced without specifying
an explicit ETag. This method sets an If-Match header of "*".
"""
self._headers['If-Match'] = '*'
uri = {
# ------------------------------------
# Settings
# ------------------------------------
'globalSettings': '/rest/global-settings',
'vol-tmplate-policy': '/rest/global-settings/StorageVolumeTemplateRequired',
'eulaStatus': '/rest/appliance/eula/status',
'eulaSave': '/rest/appliance/eula/save',
'serviceAccess': '/rest/appliance/settings/enableServiceAccess',
'service': '/rest/appliance/settings/serviceaccess',
'applianceNetworkInterfaces': '/rest/appliance/network-interfaces',
'healthStatus': '/rest/appliance/health-status',
'version': '/rest/version',
'supportDump': '/rest/appliance/support-dumps',
'backups': '/rest/backups',
'archive': '/rest/backups/archive',
'dev-read-community-str': '/rest/appliance/device-read-community-string',
'licenses': '/rest/licenses',
'nodestatus': '/rest/appliance/nodeinfo/status',
'nodeversion': '/rest/appliance/nodeinfo/version',
'shutdown': '/rest/appliance/shutdown',
'trap': '/rest/appliance/trap-destinations',
'restores': '/rest/restores',
'domains': '/rest/domains',
'schema': '/rest/domains/schema',
'progress': '/rest/appliance/progress',
'appliance-firmware': '/rest/appliance/firmware/image',
'fw-pending': '/rest/appliance/firmware/pending',
# ------------------------------------
# Security
# ------------------------------------
'activeSessions': '/rest/active-user-sessions',
'loginSessions': '/rest/login-sessions',
'users': '/rest/users',
'userRole': '/rest/users/role',
'changePassword': '/rest/users/changePassword',
'roles': '/rest/roles',
'category-actions': '/rest/authz/category-actions',
'role-category-actions': '/rest/authz/role-category-actions',
'validator': '/rest/authz/validator',
# ------------------------------------
# Facilities
# ------------------------------------
'datacenters': '/rest/datacenters',
'powerDevices': '/rest/power-devices',
'powerDevicesDiscover': '/rest/power-devices/discover',
'racks': '/rest/racks',
# ------------------------------------
# Systems
# ------------------------------------
'servers': '/rest/server-hardware',
'server-hardware-types': '/rest/server-hardware-types',
'enclosures': '/rest/enclosures',
'enclosureGroups': '/rest/enclosure-groups',
'enclosurePreview': '/rest/enclosure-preview',
'fwUpload': '/rest/firmware-bundles',
'fwDrivers': '/rest/firmware-drivers',
# ------------------------------------
# Connectivity
# ------------------------------------
'conn': '/rest/connections',
'ct': '/rest/connection-templates',
'enet': '/rest/ethernet-networks',
'fcnet': '/rest/fc-networks',
'nset': '/rest/network-sets',
'li': '/rest/logical-interconnects',
'lig': '/rest/logical-interconnect-groups',
'ic': '/rest/interconnects',
'ictype': '/rest/interconnect-types',
'uplink-sets': '/rest/uplink-sets',
'ld': '/rest/logical-downlinks',
'idpool': '/rest/id-pools',
'vmac-pool': '/rest/id-pools/vmac',
'vwwn-pool': '/rest/id-pools/vwwn',
'vsn-pool': '/rest/id-pools/vsn',
# ------------------------------------
# Server Profiles
# ------------------------------------
'profiles': '/rest/server-profiles',
'profile-templates': '/rest/server-profile-templates',
'profile-networks': '/rest/server-profiles/available-networks',
'profile-networks-schema': '/rest/server-profiles/available-networks/schema',
'profile-available-servers': '/rest/server-profiles/available-servers',
'profile-available-servers-schema': '/rest/server-profiles/available-servers/schema',
'profile-available-storage-system': '/rest/server-profiles/available-storage-system',
'profile-available-storage-systems': '/rest/server-profiles/available-storage-systems',
'profile-available-targets': '/rest/server-profiles/available-targets',
'profile-messages-schema': '/rest/server-profiles/messages/schema',
'profile-ports': '/rest/server-profiles/profile-ports',
'profile-ports-schema': '/rest/server-profiles/profile-ports/schema',
'profile-schema': '/rest/server-profiles/schema',
# ------------------------------------
# Health
# ------------------------------------
'alerts': '/rest/alerts',
'events': '/rest/events',
'audit-logs': '/rest/audit-logs',
'audit-logs-download': '/rest/audit-logs/download',
# ------------------------------------
# Certificates
# ------------------------------------
'certificates': '/rest/certificates',
'ca': '/rest/certificates/ca',
'crl': '/rest/certificates/ca/crl',
'rabbitmq-kp': '/rest/certificates/client/rabbitmq/keypair',
'rabbitmq': '/rest/certificates/client/rabbitmq',
'cert-https': '/rest/certificates/https',
# ------------------------------------
# Searching and Indexing
# ------------------------------------
'resource': '/rest/index/resources',
'association': '/rest/index/associations',
'tree': '/rest/index/trees',
'search-suggestion': '/rest/index/search-suggestions',
# ------------------------------------
# Logging and Tracking
# ------------------------------------
'task': '/rest/tasks',
# ------------------------------------
# Storage
# ------------------------------------
'storage-pools': '/rest/storage-pools',
'storage-systems': '/rest/storage-systems',
'storage-volumes': '/rest/storage-volumes',
'vol-templates': '/rest/storage-volume-templates',
'connectable-vol': '/rest/storage-volume-templates/connectable-volume-templates',
'attachable-volumes': '/rest/storage-volumes/attachable-volumes',
# ------------------------------------
# FC-SANS
# ------------------------------------
'device-managers': '/rest/fc-sans/device-managers',
'managed-sans': '/rest/fc-sans/managed-sans',
'providers': '/rest/fc-sans/providers',
# ------------------------------------
# Metrcs
# ------------------------------------
'metricsCapabilities': '/rest/metrics/capability',
'metricsConfiguration': '/rest/metrics/configuration',
# ------------------------------------
# Uncategorized
# ------------------------------------
'unmanaged-devices': '/rest/unmanaged-devices',
# ------------------------------------
# Hypervisors
# ------------------------------------
'hypervisor-managers': '/rest/hypervisor-managers'
}
############################################################################
# Utility to print resource to standard output
############################################################################
def get_members(mlist):
if not mlist:
return []
if not mlist['members']:
return []
return mlist['members']
def get_member(mlist):
if not mlist:
return None
if not mlist['members']:
return None
return mlist['members'][0]
def make_eula_dict(supportAccess):
return {'supportAccess': supportAccess}
def make_initial_password_change_dict(userName, oldPassword, newPassword):
return {
'userName': userName,
'oldPassword': oldPassword,
'newPassword': newPassword}
| 38.449275
| 119
| 0.547908
|
f62bdac439a6b0a8eef843ff2cbbfa436af1f101
| 81,991
|
py
|
Python
|
qcodes/dataset/sqlite_base.py
|
joe1gi/Qcodes
|
62f18d7ac1aaf50eb17b9c5e5e5fb1f803cabf05
|
[
"MIT"
] | null | null | null |
qcodes/dataset/sqlite_base.py
|
joe1gi/Qcodes
|
62f18d7ac1aaf50eb17b9c5e5e5fb1f803cabf05
|
[
"MIT"
] | null | null | null |
qcodes/dataset/sqlite_base.py
|
joe1gi/Qcodes
|
62f18d7ac1aaf50eb17b9c5e5e5fb1f803cabf05
|
[
"MIT"
] | null | null | null |
import sys
from contextlib import contextmanager
import logging
import sqlite3
import time
import io
import warnings
from typing import (Any, List, Optional, Tuple, Union, Dict, cast, Callable,
Sequence, DefaultDict)
import itertools
from functools import wraps
from collections import defaultdict
from tqdm import tqdm
from numbers import Number
from numpy import ndarray
import numpy as np
from distutils.version import LooseVersion
import wrapt
import qcodes as qc
import unicodedata
from qcodes.dataset.dependencies import InterDependencies
from qcodes.dataset.descriptions import RunDescriber
from qcodes.dataset.param_spec import ParamSpec
from qcodes.dataset.guids import generate_guid, parse_guid
log = logging.getLogger(__name__)
# represent the type of data we can/want map to sqlite column
VALUE = Union[str, Number, List, ndarray, bool]
VALUES = List[VALUE]
# Functions decorated as 'upgrader' are inserted into this dict
# The newest database version is thus determined by the number of upgrades
# in this module
# The key is the TARGET VERSION of the upgrade, i.e. the first key is 1
_UPGRADE_ACTIONS: Dict[int, Callable] = {}
_experiment_table_schema = """
CREATE TABLE IF NOT EXISTS experiments (
-- this will autoncrement by default if
-- no value is specified on insert
exp_id INTEGER PRIMARY KEY,
name TEXT,
sample_name TEXT,
start_time INTEGER,
end_time INTEGER,
-- this is the last counter registered
-- 1 based
run_counter INTEGER,
-- this is the formatter strin used to cosntruct
-- the run name
format_string TEXT
-- TODO: maybe I had a good reason for this doulbe primary key
-- PRIMARY KEY (exp_id, start_time, sample_name)
);
"""
_runs_table_schema = """
CREATE TABLE IF NOT EXISTS runs (
-- this will autoincrement by default if
-- no value is specified on insert
run_id INTEGER PRIMARY KEY,
exp_id INTEGER,
-- friendly name for the run
name TEXT,
-- the name of the table which stores
-- the actual results
result_table_name TEXT,
-- this is the run counter in its experiment 0 based
result_counter INTEGER,
---
run_timestamp INTEGER,
completed_timestamp INTEGER,
is_completed BOOL,
parameters TEXT,
-- metadata fields are added dynamically
FOREIGN KEY(exp_id)
REFERENCES
experiments(exp_id)
);
"""
_layout_table_schema = """
CREATE TABLE IF NOT EXISTS layouts (
layout_id INTEGER PRIMARY KEY,
run_id INTEGER,
-- name matching column name in result table
parameter TEXT,
label TEXT,
unit TEXT,
inferred_from TEXT,
FOREIGN KEY(run_id)
REFERENCES
runs(run_id)
);
"""
_dependencies_table_schema = """
CREATE TABLE IF NOT EXISTS dependencies (
dependent INTEGER,
independent INTEGER,
axis_num INTEGER
);
"""
_unicode_categories = ('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nd', 'Pc', 'Pd', 'Zs')
# in the current version, these are the standard columns of the "runs" table
# Everything else is metadata
RUNS_TABLE_COLUMNS = ["run_id", "exp_id", "name", "result_table_name",
"result_counter", "run_timestamp", "completed_timestamp",
"is_completed", "parameters", "guid",
"run_description"]
def sql_placeholder_string(n: int) -> str:
"""
Return an SQL value placeholder string for n values.
Example: sql_placeholder_string(5) returns '(?,?,?,?,?)'
"""
return '(' + ','.join('?'*n) + ')'
class ConnectionPlus(wrapt.ObjectProxy):
"""
A class to extend the sqlite3.Connection object. Since sqlite3.Connection
has no __dict__, we can not directly add attributes to its instance
directly.
It is not allowed to instantiate a new `ConnectionPlus` object from a
`ConnectionPlus` object.
Attributes:
atomic_in_progress: a bool describing whether the connection is
currently in the middle of an atomic block of transactions, thus
allowing to nest `atomic` context managers
"""
atomic_in_progress: bool = False
def __init__(self, sqlite3_connection: sqlite3.Connection):
super(ConnectionPlus, self).__init__(sqlite3_connection)
if isinstance(sqlite3_connection, ConnectionPlus):
raise ValueError('Attempted to create `ConnectionPlus` from a '
'`ConnectionPlus` object which is not allowed.')
def upgrader(func: Callable[[ConnectionPlus], None]):
"""
Decorator for database version upgrade functions. An upgrade function
must have the name `perform_db_upgrade_N_to_M` where N = M-1. For
simplicity, an upgrade function must take a single argument of type
`ConnectionPlus`. The upgrade function must either perform the upgrade
and return (no return values allowed) or fail to perform the upgrade,
in which case it must raise a RuntimeError. A failed upgrade must be
completely rolled back before the RuntimeError is raises.
The decorator takes care of logging about the upgrade and managing the
database versioning.
"""
name_comps = func.__name__.split('_')
if not len(name_comps) == 6:
raise NameError('Decorated function not a valid upgrader. '
'Must have name "perform_db_upgrade_N_to_M"')
if not ''.join(name_comps[:3]+[name_comps[4]]) == 'performdbupgradeto':
raise NameError('Decorated function not a valid upgrader. '
'Must have name "perform_db_upgrade_N_to_M"')
from_version = int(name_comps[3])
to_version = int(name_comps[5])
if not to_version == from_version+1:
raise ValueError(f'Invalid upgrade versions in function name: '
f'{func.__name__}; upgrade from version '
f'{from_version} to version {to_version}.'
' Can only upgrade from version N'
' to version N+1')
@wraps(func)
def do_upgrade(conn: ConnectionPlus) -> None:
log.info(f'Starting database upgrade version {from_version} '
f'to {to_version}')
start_version = get_user_version(conn)
if start_version != from_version:
log.info(f'Skipping upgrade {from_version} -> {to_version} as'
f' current database version is {start_version}.')
return
# This function either raises or returns
func(conn)
set_user_version(conn, to_version)
log.info(f'Succesfully performed upgrade {from_version} '
f'-> {to_version}')
_UPGRADE_ACTIONS[to_version] = do_upgrade
return do_upgrade
# utility function to allow sqlite/numpy type
def _adapt_array(arr: ndarray) -> sqlite3.Binary:
"""
See this:
https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def _convert_array(text: bytes) -> ndarray:
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
this_session_default_encoding = sys.getdefaultencoding()
def _convert_numeric(value: bytes) -> Union[float, int, str]:
"""
This is a converter for sqlite3 'numeric' type class.
This converter is capable of deducting whether a number is a float or an
int.
Note sqlite3 allows to save data to columns even if their type is not
compatible with the table type class (for example, it is possible to save
integers into 'text' columns). Due to this fact, and for the reasons of
flexibility, the numeric converter is also made capable of handling
strings. An obvious exception to this is 'nan' (case insensitive) which
gets converted to `np.nan`.
"""
try:
# First, try to convert bytes to float
numeric = float(value)
except ValueError as e:
# If an exception has been raised, we first need to find out
# if the reason was the conversion to float, and, if so, we are sure
# that we need to return a string
if "could not convert string to float" in str(e):
return str(value, encoding=this_session_default_encoding)
else:
# otherwise, the exception is forwarded up the stack
raise e
# If that worked, e.g. did not raise an exception, then we check if the
# outcome is 'nan'
if np.isnan(numeric):
return numeric
# If it is not 'nan', then we need to see if the value is really an
# integer or with floating point digits
numeric_int = int(numeric)
if numeric != numeric_int:
return numeric
else:
return numeric_int
def _adapt_float(fl: float) -> Union[float, str]:
if np.isnan(fl):
return "nan"
return float(fl)
def one(curr: sqlite3.Cursor, column: Union[int, str]) -> Any:
"""Get the value of one column from one row
Args:
curr: cursor to operate on
column: name of the column
Returns:
the value
"""
res = curr.fetchall()
if len(res) > 1:
raise RuntimeError("Expected only one row")
elif len(res) == 0:
raise RuntimeError("Expected one row")
else:
return res[0][column]
def many(curr: sqlite3.Cursor, *columns: str) -> List[Any]:
"""Get the values of many columns from one row
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of values
"""
res = curr.fetchall()
if len(res) > 1:
raise RuntimeError("Expected only one row")
else:
return [res[0][c] for c in columns]
def many_many(curr: sqlite3.Cursor, *columns: str) -> List[List[Any]]:
"""Get all values of many columns
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of lists of values
"""
res = curr.fetchall()
results = []
for r in res:
results.append([r[c] for c in columns])
return results
def connect(name: str, debug: bool = False,
version: int = -1) -> ConnectionPlus:
"""
Connect or create database. If debug the queries will be echoed back.
This function takes care of registering the numpy/sqlite type
converters that we need.
Args:
name: name or path to the sqlite file
debug: whether or not to turn on tracing
version: which version to create. We count from 0. -1 means 'latest'.
Should always be left at -1 except when testing.
Returns:
conn: connection object to the database (note, it is
`ConnectionPlus`, not `sqlite3.Connection`
"""
# register numpy->binary(TEXT) adapter
# the typing here is ignored due to what we think is a flaw in typeshed
# see https://github.com/python/typeshed/issues/2429
sqlite3.register_adapter(np.ndarray, _adapt_array) # type: ignore
# register binary(TEXT) -> numpy converter
# for some reasons mypy complains about this
sqlite3.register_converter("array", _convert_array)
sqlite3_conn = sqlite3.connect(name, detect_types=sqlite3.PARSE_DECLTYPES)
conn = ConnectionPlus(sqlite3_conn)
latest_supported_version = _latest_available_version()
db_version = get_user_version(conn)
if db_version > latest_supported_version:
raise RuntimeError(f"Database {name} is version {db_version} but this "
f"version of QCoDeS supports up to "
f"version {latest_supported_version}")
# sqlite3 options
conn.row_factory = sqlite3.Row
# Make sure numpy ints and floats types are inserted properly
for numpy_int in [
np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64
]:
sqlite3.register_adapter(numpy_int, int)
sqlite3.register_converter("numeric", _convert_numeric)
for numpy_float in [np.float, np.float16, np.float32, np.float64]:
sqlite3.register_adapter(numpy_float, _adapt_float)
if debug:
conn.set_trace_callback(print)
init_db(conn)
perform_db_upgrade(conn, version=version)
return conn
def perform_db_upgrade(conn: ConnectionPlus, version: int=-1) -> None:
"""
This is intended to perform all upgrades as needed to bring the
db from version 0 to the most current version (or the version specified).
All the perform_db_upgrade_X_to_Y functions must raise if they cannot
upgrade and be a NOOP if the current version is higher than their target.
Args:
conn: object for connection to the database
version: Which version to upgrade to. We count from 0. -1 means
'newest version'
"""
version = _latest_available_version() if version == -1 else version
current_version = get_user_version(conn)
if current_version < version:
log.info("Commencing database upgrade")
for target_version in sorted(_UPGRADE_ACTIONS)[:version]:
_UPGRADE_ACTIONS[target_version](conn)
@upgrader
def perform_db_upgrade_0_to_1(conn: ConnectionPlus) -> None:
"""
Perform the upgrade from version 0 to version 1
Add a GUID column to the runs table and assign guids for all existing runs
"""
sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='runs'"
cur = atomic_transaction(conn, sql)
n_run_tables = len(cur.fetchall())
if n_run_tables == 1:
with atomic(conn) as conn:
sql = "ALTER TABLE runs ADD COLUMN guid TEXT"
transaction(conn, sql)
# now assign GUIDs to existing runs
cur = transaction(conn, 'SELECT run_id FROM runs')
run_ids = [r[0] for r in many_many(cur, 'run_id')]
for run_id in run_ids:
query = f"""
SELECT run_timestamp
FROM runs
WHERE run_id == {run_id}
"""
cur = transaction(conn, query)
timestamp = one(cur, 'run_timestamp')
timeint = int(np.round(timestamp*1000))
sql = f"""
UPDATE runs
SET guid = ?
where run_id == {run_id}
"""
sampleint = 3736062718 # 'deafcafe'
cur.execute(sql, (generate_guid(timeint=timeint,
sampleint=sampleint),))
else:
raise RuntimeError(f"found {n_run_tables} runs tables expected 1")
@upgrader
def perform_db_upgrade_1_to_2(conn: ConnectionPlus) -> None:
"""
Perform the upgrade from version 1 to version 2
Add two indeces on the runs table, one for exp_id and one for GUID
"""
sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='runs'"
cur = atomic_transaction(conn, sql)
n_run_tables = len(cur.fetchall())
if n_run_tables == 1:
_IX_runs_exp_id = """
CREATE INDEX
IF NOT EXISTS IX_runs_exp_id
ON runs (exp_id DESC)
"""
_IX_runs_guid = """
CREATE INDEX
IF NOT EXISTS IX_runs_guid
ON runs (guid DESC)
"""
with atomic(conn) as conn:
transaction(conn, _IX_runs_exp_id)
transaction(conn, _IX_runs_guid)
else:
raise RuntimeError(f"found {n_run_tables} runs tables expected 1")
def _2to3_get_result_tables(conn: ConnectionPlus) -> Dict[int, str]:
rst_query = "SELECT run_id, result_table_name FROM runs"
cur = conn.cursor()
cur.execute(rst_query)
data = cur.fetchall()
cur.close()
results = {}
for row in data:
results[row['run_id']] = row['result_table_name']
return results
def _2to3_get_layout_ids(conn: ConnectionPlus) -> DefaultDict[int, List[int]]:
query = """
select runs.run_id, layouts.layout_id
FROM layouts
INNER JOIN runs ON runs.run_id == layouts.run_id
"""
cur = conn.cursor()
cur.execute(query)
data = cur.fetchall()
cur.close()
results: DefaultDict[int, List[int]] = defaultdict(list)
for row in data:
run_id = row['run_id']
layout_id = row['layout_id']
results[run_id].append(layout_id)
return results
def _2to3_get_indeps(conn: ConnectionPlus) -> DefaultDict[int, List[int]]:
query = """
SELECT layouts.run_id, layouts.layout_id
FROM layouts
INNER JOIN dependencies
ON layouts.layout_id==dependencies.independent
"""
cur = conn.cursor()
cur.execute(query)
data = cur.fetchall()
cur.close()
results: DefaultDict[int, List[int]] = defaultdict(list)
for row in data:
run_id = row['run_id']
layout_id = row['layout_id']
results[run_id].append(layout_id)
return results
def _2to3_get_deps(conn: ConnectionPlus) -> DefaultDict[int, List[int]]:
query = """
SELECT layouts.run_id, layouts.layout_id
FROM layouts
INNER JOIN dependencies
ON layouts.layout_id==dependencies.dependent
"""
cur = conn.cursor()
cur.execute(query)
data = cur.fetchall()
cur.close()
results: DefaultDict[int, List[int]] = defaultdict(list)
for row in data:
run_id = row['run_id']
layout_id = row['layout_id']
results[run_id].append(layout_id)
return results
def _2to3_get_dependencies(conn: ConnectionPlus) -> DefaultDict[int, List[int]]:
query = """
SELECT dependent, independent
FROM dependencies
ORDER BY dependent, axis_num ASC
"""
cur = conn.cursor()
cur.execute(query)
data = cur.fetchall()
cur.close()
results: DefaultDict[int, List[int]] = defaultdict(list)
if len(data) == 0:
return results
for row in data:
dep = row['dependent']
indep = row['independent']
results[dep].append(indep)
return results
def _2to3_get_layouts(conn: ConnectionPlus) -> Dict[int,
Tuple[str, str, str, str]]:
query = """
SELECT layout_id, parameter, label, unit, inferred_from
FROM layouts
"""
cur = conn.cursor()
cur.execute(query)
results: Dict[int, Tuple[str, str, str, str]] = {}
for row in cur.fetchall():
results[row['layout_id']] = (row['parameter'],
row['label'],
row['unit'],
row['inferred_from'])
return results
def _2to3_get_paramspecs(conn: ConnectionPlus,
layout_ids: List[int],
layouts: Dict[int, Tuple[str, str, str, str]],
dependencies: Dict[int, List[int]],
deps: Sequence[int],
indeps: Sequence[int],
result_table_name: str) -> Dict[int, ParamSpec]:
paramspecs: Dict[int, ParamSpec] = {}
the_rest = set(layout_ids).difference(set(deps).union(set(indeps)))
# We ensure that we first retrieve the ParamSpecs on which other ParamSpecs
# depend, then the dependent ParamSpecs and finally the rest
for layout_id in list(indeps) + list(deps) + list(the_rest):
(name, label, unit, inferred_from) = layouts[layout_id]
# get the data type
sql = f'PRAGMA TABLE_INFO("{result_table_name}")'
c = transaction(conn, sql)
for row in c.fetchall():
if row['name'] == name:
paramtype = row['type']
break
# first possibility: another parameter depends on this parameter
if layout_id in indeps:
paramspec = ParamSpec(name=name, paramtype=paramtype,
label=label, unit=unit,
inferred_from=inferred_from)
paramspecs[layout_id] = paramspec
# second possibility: this parameter depends on another parameter
elif layout_id in deps:
setpoints = dependencies[layout_id]
depends_on = [paramspecs[idp].name for idp in setpoints]
paramspec = ParamSpec(name=name,
paramtype=paramtype,
label=label, unit=unit,
depends_on=depends_on,
inferred_from=inferred_from)
paramspecs[layout_id] = paramspec
# third possibility: no dependencies
else:
paramspec = ParamSpec(name=name,
paramtype=paramtype,
label=label, unit=unit,
depends_on=[],
inferred_from=[])
paramspecs[layout_id] = paramspec
return paramspecs
@upgrader
def perform_db_upgrade_2_to_3(conn: ConnectionPlus) -> None:
"""
Perform the upgrade from version 2 to version 3
Insert a new column, run_description, to the runs table and fill it out
for exisitng runs with information retrieved from the layouts and
dependencies tables represented as the to_json output of a RunDescriber
object
"""
no_of_runs_query = "SELECT max(run_id) FROM runs"
no_of_runs = one(atomic_transaction(conn, no_of_runs_query), 'max(run_id)')
no_of_runs = no_of_runs or 0
# If one run fails, we want the whole upgrade to roll back, hence the
# entire upgrade is one atomic transaction
with atomic(conn) as conn:
sql = "ALTER TABLE runs ADD COLUMN run_description TEXT"
transaction(conn, sql)
result_tables = _2to3_get_result_tables(conn)
layout_ids_all = _2to3_get_layout_ids(conn)
indeps_all = _2to3_get_indeps(conn)
deps_all = _2to3_get_deps(conn)
layouts = _2to3_get_layouts(conn)
dependencies = _2to3_get_dependencies(conn)
pbar = tqdm(range(1, no_of_runs+1))
pbar.set_description("Upgrading database")
for run_id in pbar:
if run_id in layout_ids_all:
result_table_name = result_tables[run_id]
layout_ids = list(layout_ids_all[run_id])
if run_id in indeps_all:
independents = tuple(indeps_all[run_id])
else:
independents = ()
if run_id in deps_all:
dependents = tuple(deps_all[run_id])
else:
dependents = ()
paramspecs = _2to3_get_paramspecs(conn,
layout_ids,
layouts,
dependencies,
dependents,
independents,
result_table_name)
interdeps = InterDependencies(*paramspecs.values())
desc = RunDescriber(interdeps=interdeps)
json_str = desc.to_json()
else:
json_str = RunDescriber(InterDependencies()).to_json()
sql = f"""
UPDATE runs
SET run_description = ?
WHERE run_id == ?
"""
cur = conn.cursor()
cur.execute(sql, (json_str, run_id))
log.debug(f"Upgrade in transition, run number {run_id}: OK")
def _latest_available_version() -> int:
"""Return latest available database schema version"""
return len(_UPGRADE_ACTIONS)
def get_db_version_and_newest_available_version(path_to_db: str) -> Tuple[int,
int]:
"""
Connect to a DB without performing any upgrades and get the version of
that database file along with the newest available version (the one that
a normal "connect" will automatically upgrade to)
Args:
path_to_db: the absolute path to the DB file
Returns:
A tuple of (db_version, latest_available_version)
"""
conn = connect(path_to_db, version=0)
db_version = get_user_version(conn)
return db_version, _latest_available_version()
def transaction(conn: ConnectionPlus,
sql: str, *args: Any) -> sqlite3.Cursor:
"""Perform a transaction.
The transaction needs to be committed or rolled back.
Args:
conn: database connection
sql: formatted string
*args: arguments to use for parameter substitution
Returns:
sqlite cursor
"""
c = conn.cursor()
if len(args) > 0:
c.execute(sql, args)
else:
c.execute(sql)
return c
def atomic_transaction(conn: ConnectionPlus,
sql: str, *args: Any) -> sqlite3.Cursor:
"""Perform an **atomic** transaction.
The transaction is committed if there are no exceptions else the
transaction is rolled back.
NB: 'BEGIN' is by default only inserted before INSERT/UPDATE/DELETE/REPLACE
but we want to guard any transaction that modifies the database (e.g. also
ALTER). 'BEGIN' marks a place to commit from/roll back to
Args:
conn: database connection
sql: formatted string
*args: arguments to use for parameter substitution
Returns:
sqlite cursor
"""
with atomic(conn) as atomic_conn:
c = transaction(atomic_conn, sql, *args)
return c
@contextmanager
def atomic(conn: ConnectionPlus):
"""
Guard a series of transactions as atomic.
If one transaction fails, all the previous transactions are rolled back
and no more transactions are performed.
NB: 'BEGIN' is by default only inserted before INSERT/UPDATE/DELETE/REPLACE
but we want to guard any transaction that modifies the database (e.g. also
ALTER)
Args:
conn: connection to guard
"""
if not isinstance(conn, ConnectionPlus):
raise ValueError('atomic context manager only accepts ConnectionPlus '
'database connection objects.')
is_outmost = not(conn.atomic_in_progress)
if conn.in_transaction and is_outmost:
raise RuntimeError('SQLite connection has uncommitted transactions. '
'Please commit those before starting an atomic '
'transaction.')
old_atomic_in_progress = conn.atomic_in_progress
conn.atomic_in_progress = True
try:
if is_outmost:
old_level = conn.isolation_level
conn.isolation_level = None
conn.cursor().execute('BEGIN')
yield conn
except Exception as e:
conn.rollback()
log.exception("Rolling back due to unhandled exception")
raise RuntimeError("Rolling back due to unhandled exception") from e
else:
if is_outmost:
conn.commit()
finally:
if is_outmost:
conn.isolation_level = old_level
conn.atomic_in_progress = old_atomic_in_progress
def make_connection_plus_from(conn: Union[sqlite3.Connection, ConnectionPlus]
) -> ConnectionPlus:
"""
Makes a ConnectionPlus connection object out of a given argument.
If the given connection is already a ConnectionPlus, then it is returned
without any changes.
Args:
conn: an sqlite database connection object
Returns:
the "same" connection but as ConnectionPlus object
"""
if not isinstance(conn, ConnectionPlus):
conn_plus = ConnectionPlus(conn)
else:
conn_plus = conn
return conn_plus
def init_db(conn: ConnectionPlus)->None:
with atomic(conn) as conn:
transaction(conn, _experiment_table_schema)
transaction(conn, _runs_table_schema)
transaction(conn, _layout_table_schema)
transaction(conn, _dependencies_table_schema)
def is_run_id_in_database(conn: ConnectionPlus,
*run_ids) -> Dict[int, bool]:
"""
Look up run_ids and return a dictionary with the answers to the question
"is this run_id in the database?"
Args:
conn: the connection to the database
run_ids: the run_ids to look up
Returns:
a dict with the run_ids as keys and bools as values. True means that
the run_id DOES exist in the database
"""
run_ids = np.unique(run_ids)
placeholders = sql_placeholder_string(len(run_ids))
query = f"""
SELECT run_id
FROM runs
WHERE run_id in {placeholders}
"""
cursor = conn.cursor()
cursor.execute(query, run_ids)
rows = cursor.fetchall()
existing_ids = [row[0] for row in rows]
return {run_id: (run_id in existing_ids) for run_id in run_ids}
def is_column_in_table(conn: ConnectionPlus, table: str, column: str) -> bool:
"""
A look-before-you-leap function to look up if a table has a certain column.
Intended for the 'runs' table where columns might be dynamically added
via `add_meta_data`/`insert_meta_data` functions.
Args:
conn: The connection
table: the table name
column: the column name
"""
cur = atomic_transaction(conn, f"PRAGMA table_info({table})")
for row in cur.fetchall():
if row['name'] == column:
return True
return False
def insert_column(conn: ConnectionPlus, table: str, name: str,
paramtype: Optional[str] = None) -> None:
"""Insert new column to a table
Args:
conn: database connection
table: destination for the insertion
name: column name
type: sqlite type of the column
"""
# first check that the column is not already there
# and do nothing if it is
query = f'PRAGMA TABLE_INFO("{table}");'
cur = atomic_transaction(conn, query)
columns = many_many(cur, "name")
if name in [col[0] for col in columns]:
return
with atomic(conn) as conn:
if paramtype:
transaction(conn,
f'ALTER TABLE "{table}" ADD COLUMN "{name}" '
f'{paramtype}')
else:
transaction(conn,
f'ALTER TABLE "{table}" ADD COLUMN "{name}"')
def select_one_where(conn: ConnectionPlus, table: str, column: str,
where_column: str, where_value: Any) -> Any:
query = f"""
SELECT {column}
FROM
{table}
WHERE
{where_column} = ?
"""
cur = atomic_transaction(conn, query, where_value)
res = one(cur, column)
return res
def select_many_where(conn: ConnectionPlus, table: str, *columns: str,
where_column: str, where_value: Any) -> Any:
_columns = ",".join(columns)
query = f"""
SELECT {_columns}
FROM
{table}
WHERE
{where_column} = ?
"""
cur = atomic_transaction(conn, query, where_value)
res = many(cur, *columns)
return res
def _massage_dict(metadata: Dict[str, Any]) -> Tuple[str, List[Any]]:
"""
{key:value, key2:value} -> ["key=?, key2=?", [value, value]]
"""
template = []
values = []
for key, value in metadata.items():
template.append(f"{key} = ?")
values.append(value)
return ','.join(template), values
def update_where(conn: ConnectionPlus, table: str,
where_column: str, where_value: Any, **updates) -> None:
_updates, values = _massage_dict(updates)
query = f"""
UPDATE
'{table}'
SET
{_updates}
WHERE
{where_column} = ?
"""
atomic_transaction(conn, query, *values, where_value)
def insert_values(conn: ConnectionPlus,
formatted_name: str,
columns: List[str],
values: VALUES,
) -> int:
"""
Inserts values for the specified columns.
Will pad with null if not all parameters are specified.
NOTE this need to be committed before closing the connection.
"""
_columns = ",".join(columns)
_values = ",".join(["?"] * len(columns))
query = f"""INSERT INTO "{formatted_name}"
({_columns})
VALUES
({_values})
"""
c = atomic_transaction(conn, query, *values)
return c.lastrowid
def insert_many_values(conn: ConnectionPlus,
formatted_name: str,
columns: List[str],
values: List[VALUES],
) -> int:
"""
Inserts many values for the specified columns.
Example input:
columns: ['xparam', 'yparam']
values: [[x1, y1], [x2, y2], [x3, y3]]
NOTE this need to be committed before closing the connection.
"""
# We demand that all values have the same length
lengths = [len(val) for val in values]
if len(np.unique(lengths)) > 1:
raise ValueError('Wrong input format for values. Must specify the '
'same number of values for all columns. Received'
f' lengths {lengths}.')
no_of_rows = len(lengths)
no_of_columns = lengths[0]
# The TOTAL number of inserted values in one query
# must be less than the SQLITE_MAX_VARIABLE_NUMBER
# Version check cf.
# "https://stackoverflow.com/questions/9527851/sqlite-error-
# too-many-terms-in-compound-select"
version = qc.SQLiteSettings.settings['VERSION']
# According to the SQLite changelog, the version number
# to check against below
# ought to be 3.7.11, but that fails on Travis
if LooseVersion(str(version)) <= LooseVersion('3.8.2'):
max_var = qc.SQLiteSettings.limits['MAX_COMPOUND_SELECT']
else:
max_var = qc.SQLiteSettings.limits['MAX_VARIABLE_NUMBER']
rows_per_transaction = int(int(max_var)/no_of_columns)
_columns = ",".join(columns)
_values = "(" + ",".join(["?"] * len(values[0])) + ")"
a, b = divmod(no_of_rows, rows_per_transaction)
chunks = a*[rows_per_transaction] + [b]
if chunks[-1] == 0:
chunks.pop()
start = 0
stop = 0
with atomic(conn) as conn:
for ii, chunk in enumerate(chunks):
_values_x_params = ",".join([_values] * chunk)
query = f"""INSERT INTO "{formatted_name}"
({_columns})
VALUES
{_values_x_params}
"""
stop += chunk
# we need to make values a flat list from a list of list
flattened_values = list(
itertools.chain.from_iterable(values[start:stop]))
c = transaction(conn, query, *flattened_values)
if ii == 0:
return_value = c.lastrowid
start += chunk
return return_value
def modify_values(conn: ConnectionPlus,
formatted_name: str,
index: int,
columns: List[str],
values: VALUES,
) -> int:
"""
Modify values for the specified columns.
If a column is in the table but not in the columns list is
left untouched.
If a column is mapped to None, it will be a null value.
"""
name_val_template = []
for name in columns:
name_val_template.append(f"{name}=?")
name_val_templates = ",".join(name_val_template)
query = f"""
UPDATE "{formatted_name}"
SET
{name_val_templates}
WHERE
rowid = {index+1}
"""
c = atomic_transaction(conn, query, *values)
return c.rowcount
def modify_many_values(conn: ConnectionPlus,
formatted_name: str,
start_index: int,
columns: List[str],
list_of_values: List[VALUES],
) -> None:
"""
Modify many values for the specified columns.
If a column is in the table but not in the column list is
left untouched.
If a column is mapped to None, it will be a null value.
"""
_len = length(conn, formatted_name)
len_requested = start_index + len(list_of_values[0])
available = _len - start_index
if len_requested > _len:
reason = f"""Modify operation Out of bounds.
Trying to modify {len(list_of_values)} results,
but therere are only {available} results.
"""
raise ValueError(reason)
for values in list_of_values:
modify_values(conn, formatted_name, start_index, columns, values)
start_index += 1
def length(conn: ConnectionPlus,
formatted_name: str
) -> int:
"""
Return the lenght of the table
Args:
conn: the connection to the sqlite database
formatted_name: name of the table
Returns:
the lenght of the table
"""
query = f"select MAX(id) from '{formatted_name}'"
c = atomic_transaction(conn, query)
_len = c.fetchall()[0][0]
if _len is None:
return 0
else:
return _len
def _build_data_query( table_name: str,
columns: List[str],
start: Optional[int] = None,
end: Optional[int] = None,
) -> str:
_columns = ",".join(columns)
query = f"""
SELECT {_columns}
FROM "{table_name}"
"""
start_specified = start is not None
end_specified = end is not None
where = ' WHERE' if start_specified or end_specified else ''
start_condition = f' rowid >= {start}' if start_specified else ''
end_condition = f' rowid <= {end}' if end_specified else ''
and_ = ' AND' if start_specified and end_specified else ''
query += where + start_condition + and_ + end_condition
return query
def get_data(conn: ConnectionPlus,
table_name: str,
columns: List[str],
start: Optional[int] = None,
end: Optional[int] = None,
) -> List[List[Any]]:
"""
Get data from the columns of a table.
Allows to specify a range of rows (1-based indexing, both ends are
included).
Args:
conn: database connection
table_name: name of the table
columns: list of columns
start: start of range; if None, then starts from the top of the table
end: end of range; if None, then ends at the bottom of the table
Returns:
the data requested in the format of list of rows of values
"""
if len(columns) == 0:
warnings.warn(
'get_data: requested data without specifying parameters/columns.'
'Returning empty list.'
)
return [[]]
query = _build_data_query(table_name, columns, start, end)
c = atomic_transaction(conn, query)
res = many_many(c, *columns)
return res
def get_parameter_data(conn: ConnectionPlus,
table_name: str,
columns: Sequence[str]=(),
start: Optional[int]=None,
end: Optional[int]=None) -> \
Dict[str, Dict[str, np.ndarray]]:
"""
Get data for one or more parameters and its dependencies. The data
is returned as numpy arrays within 2 layers of nested dicts. The keys of
the outermost dict are the requested parameters and the keys of the second
level are the loaded parameters (requested parameter followed by its
dependencies). Start and End sllows one to specify a range of rows
(1-based indexing, both ends are included).
Note that this assumes that all array type parameters have the same length.
This should always be the case for a parameter and its dependencies.
Note that all numeric data will at the moment be returned as floating point
values.
Args:
conn: database connection
table_name: name of the table
columns: list of columns
start: start of range; if None, then starts from the top of the table
end: end of range; if None, then ends at the bottom of the table
"""
sql = """
SELECT run_id FROM runs WHERE result_table_name = ?
"""
c = atomic_transaction(conn, sql, table_name)
run_id = one(c, 'run_id')
output = {}
if len(columns) == 0:
columns = get_non_dependencies(conn, run_id)
# loop over all the requested parameters
for output_param in columns:
# find all the dependencies of this param
paramspecs = get_parameter_dependencies(conn, output_param, run_id)
param_names = [param.name for param in paramspecs]
types = [param.type for param in paramspecs]
res = get_data(conn, table_name, param_names, start=start, end=end)
# if we have array type parameters expand all other parameters
# to arrays
if 'array' in types and ('numeric' in types or 'text' in types):
first_array_element = types.index('array')
numeric_elms = [i for i, x in enumerate(types)
if x == "numeric"]
text_elms = [i for i, x in enumerate(types)
if x == "text"]
for row in res:
for element in numeric_elms:
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.float)
# todo should we handle int/float types here
# we would in practice have to perform another
# loop to check that all elements of a given can be cast to
# int without loosing precision before choosing an integer
# representation of the array
for element in text_elms:
strlen = len(row[element])
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=f'U{strlen}')
# Benchmarking shows that transposing the data with python types is
# faster than transposing the data using np.array.transpose
res_t = list(map(list, zip(*res)))
output[output_param] = {name: np.array(column_data)
for name, column_data
in zip(param_names, res_t)}
return output
def get_values(conn: ConnectionPlus,
table_name: str,
param_name: str) -> List[List[Any]]:
"""
Get the not-null values of a parameter
Args:
conn: Connection to the database
table_name: Name of the table that holds the data
param_name: Name of the parameter to get the setpoints of
Returns:
The values
"""
sql = f"""
SELECT {param_name} FROM "{table_name}"
WHERE {param_name} IS NOT NULL
"""
c = atomic_transaction(conn, sql)
res = many_many(c, param_name)
return res
def get_setpoints(conn: ConnectionPlus,
table_name: str,
param_name: str) -> Dict[str, List[List[Any]]]:
"""
Get the setpoints for a given dependent parameter
Args:
conn: Connection to the database
table_name: Name of the table that holds the data
param_name: Name of the parameter to get the setpoints of
Returns:
A list of returned setpoint values. Each setpoint return value
is a list of lists of Any. The first list is a list of run points,
the second list is a list of parameter values.
"""
# TODO: We do this in no less than 5 table lookups, surely
# this number can be reduced
# get run_id
sql = """
SELECT run_id FROM runs WHERE result_table_name = ?
"""
c = atomic_transaction(conn, sql, table_name)
run_id = one(c, 'run_id')
# get the parameter layout id
sql = """
SELECT layout_id FROM layouts
WHERE parameter = ?
and run_id = ?
"""
c = atomic_transaction(conn, sql, param_name, run_id)
layout_id = one(c, 'layout_id')
# get the setpoint layout ids
sql = """
SELECT independent FROM dependencies
WHERE dependent = ?
"""
c = atomic_transaction(conn, sql, layout_id)
indeps = many_many(c, 'independent')
indeps = [idp[0] for idp in indeps]
# get the setpoint names
sql = f"""
SELECT parameter FROM layouts WHERE layout_id
IN {str(indeps).replace('[', '(').replace(']', ')')}
"""
c = atomic_transaction(conn, sql)
setpoint_names_temp = many_many(c, 'parameter')
setpoint_names = [spn[0] for spn in setpoint_names_temp]
setpoint_names = cast(List[str], setpoint_names)
# get the actual setpoint data
output: Dict[str, List[List[Any]]] = {}
for sp_name in setpoint_names:
sql = f"""
SELECT {sp_name}
FROM "{table_name}"
WHERE {param_name} IS NOT NULL
"""
c = atomic_transaction(conn, sql)
sps = many_many(c, sp_name)
output[sp_name] = sps
return output
def get_runid_from_expid_and_counter(conn: ConnectionPlus, exp_id: int,
counter: int) -> int:
"""
Get the run_id of a run in the specified experiment with the specified
counter
Args:
conn: connection to the database
exp_id: the exp_id of the experiment containing the run
counter: the intra-experiment run counter of that run
"""
sql = """
SELECT run_id
FROM runs
WHERE result_counter= ? AND
exp_id = ?
"""
c = transaction(conn, sql, counter, exp_id)
run_id = one(c, 'run_id')
return run_id
def get_runid_from_guid(conn: ConnectionPlus, guid: str) -> Union[int, None]:
"""
Get the run_id of a run based on the guid
Args:
conn: connection to the database
guid: the guid to look up
Returns:
The run_id if found, else -1.
Raises:
RuntimeError if more than one run with the given GUID exists
"""
query = """
SELECT run_id
FROM runs
WHERE guid = ?
"""
cursor = conn.cursor()
cursor.execute(query, (guid,))
rows = cursor.fetchall()
if len(rows) == 0:
run_id = -1
elif len(rows) > 1:
errormssg = ('Critical consistency error: multiple runs with'
f' the same GUID found! {len(rows)} runs have GUID '
f'{guid}')
log.critical(errormssg)
raise RuntimeError(errormssg)
else:
run_id = int(rows[0]['run_id'])
return run_id
def get_layout(conn: ConnectionPlus,
layout_id) -> Dict[str, str]:
"""
Get the layout of a single parameter for plotting it
Args:
conn: The database connection
layout_id: The run_id as in the layouts table
Returns:
A dict with name, label, and unit
"""
sql = """
SELECT parameter, label, unit FROM layouts WHERE layout_id=?
"""
c = atomic_transaction(conn, sql, layout_id)
t_res = many(c, 'parameter', 'label', 'unit')
res = dict(zip(['name', 'label', 'unit'], t_res))
return res
def get_layout_id(conn: ConnectionPlus,
parameter: Union[ParamSpec, str],
run_id: int) -> int:
"""
Get the layout id of a parameter in a given run
Args:
conn: The database connection
parameter: A ParamSpec or the name of the parameter
run_id: The run_id of the run in question
"""
# get the parameter layout id
sql = """
SELECT layout_id FROM layouts
WHERE parameter = ?
and run_id = ?
"""
if isinstance(parameter, ParamSpec):
name = parameter.name
elif isinstance(parameter, str):
name = parameter
else:
raise ValueError('Wrong parameter type, must be ParamSpec or str, '
f'received {type(parameter)}.')
c = atomic_transaction(conn, sql, name, run_id)
res = one(c, 'layout_id')
return res
def get_dependents(conn: ConnectionPlus,
run_id: int) -> List[int]:
"""
Get dependent layout_ids for a certain run_id, i.e. the layout_ids of all
the dependent variables
"""
sql = """
SELECT layout_id FROM layouts
WHERE run_id=? and layout_id in (SELECT dependent FROM dependencies)
"""
c = atomic_transaction(conn, sql, run_id)
res = [d[0] for d in many_many(c, 'layout_id')]
return res
def get_dependencies(conn: ConnectionPlus,
layout_id: int) -> List[List[int]]:
"""
Get the dependencies of a certain dependent variable (indexed by its
layout_id)
Args:
conn: connection to the database
layout_id: the layout_id of the dependent variable
"""
sql = """
SELECT independent, axis_num FROM dependencies WHERE dependent=?
"""
c = atomic_transaction(conn, sql, layout_id)
res = many_many(c, 'independent', 'axis_num')
return res
def get_non_dependencies(conn: ConnectionPlus,
run_id: int) -> List[str]:
"""
Return all parameters for a given run that are not dependencies of
other parameters.
Args:
conn: connection to the database
run_id: The run_id of the run in question
Returns:
A list of the parameter names.
"""
parameters = get_parameters(conn, run_id)
maybe_independent = []
dependent = []
dependencies: List[str] = []
for param in parameters:
if len(param.depends_on) == 0:
maybe_independent.append(param.name)
else:
dependent.append(param.name)
dependencies.extend(param.depends_on.split(', '))
independent_set = set(maybe_independent) - set(dependencies)
dependent_set = set(dependent)
result = independent_set.union(dependent_set)
return sorted(list(result))
# Higher level Wrappers
def get_parameter_dependencies(conn: ConnectionPlus, param: str,
run_id: int) -> List[ParamSpec]:
"""
Given a parameter name return a list of ParamSpecs where the first
element is the ParamSpec of the given parameter and the rest of the
elements are ParamSpecs of its dependencies.
Args:
conn: connection to the database
param: the name of the parameter to look up
run_id: run_id: The run_id of the run in question
Returns:
List of ParameterSpecs of the parameter followed by its dependencies.
"""
layout_id = get_layout_id(conn, param, run_id)
deps = get_dependencies(conn, layout_id)
parameters = [get_paramspec(conn, run_id, param)]
for dep in deps:
depinfo = get_layout(conn, dep[0])
parameters.append(get_paramspec(conn, run_id, depinfo['name']))
return parameters
def new_experiment(conn: ConnectionPlus,
name: str,
sample_name: str,
format_string: Optional[str]="{}-{}-{}",
start_time: Optional[float]=None,
end_time: Optional[float]=None,
) -> int:
"""
Add new experiment to container.
Args:
conn: database connection
name: the name of the experiment
sample_name: the name of the current sample
format_string: basic format string for table-name
must contain 3 placeholders.
start_time: time when the experiment was started. Do not supply this
unless you have a very good reason to do so.
end_time: time when the experiment was completed. Do not supply this
unless you have a VERY good reason to do so
Returns:
id: row-id of the created experiment
"""
query = """
INSERT INTO experiments
(name, sample_name, format_string,
run_counter, start_time, end_time)
VALUES
(?,?,?,?,?,?)
"""
start_time = start_time or time.time()
values = (name, sample_name, format_string, 0, start_time, end_time)
curr = atomic_transaction(conn, query, *values)
return curr.lastrowid
# TODO(WilliamHPNielsen): we should remove the redundant
# is_completed
def mark_run_complete(conn: ConnectionPlus, run_id: int):
""" Mark run complete
Args:
conn: database connection
run_id: id of the run to mark complete
complete: wether the run is completed or not
"""
query = """
UPDATE
runs
SET
completed_timestamp=?,
is_completed=?
WHERE run_id=?;
"""
atomic_transaction(conn, query, time.time(), True, run_id)
def completed(conn: ConnectionPlus, run_id)->bool:
""" Check if the run scomplete
Args:
conn: database connection
run_id: id of the run to check
"""
return bool(select_one_where(conn, "runs", "is_completed",
"run_id", run_id))
def get_completed_timestamp_from_run_id(
conn: ConnectionPlus, run_id: int) -> float:
"""
Retrieve the timestamp when the given measurement run was completed
If the measurement run has not been marked as completed, then the returned
value is None.
Args:
conn: database connection
run_id: id of the run
Returns:
timestamp in seconds since the Epoch, or None
"""
return select_one_where(conn, "runs", "completed_timestamp",
"run_id", run_id)
def get_guid_from_run_id(conn: ConnectionPlus, run_id: int) -> str:
"""
Get the guid of the given run
Args:
conn: database connection
run_id: id of the run
"""
return select_one_where(conn, "runs", "guid", "run_id", run_id)
def finish_experiment(conn: ConnectionPlus, exp_id: int):
""" Finish experiment
Args:
conn: database connection
name: the name of the experiment
"""
query = """
UPDATE experiments SET end_time=? WHERE exp_id=?;
"""
atomic_transaction(conn, query, time.time(), exp_id)
def get_run_counter(conn: ConnectionPlus, exp_id: int) -> int:
""" Get the experiment run counter
Args:
conn: the connection to the sqlite database
exp_id: experiment identifier
Returns:
the exepriment run counter
"""
return select_one_where(conn, "experiments", "run_counter",
where_column="exp_id",
where_value=exp_id)
def get_experiments(conn: ConnectionPlus) -> List[sqlite3.Row]:
""" Get a list of experiments
Args:
conn: database connection
Returns:
list of rows
"""
sql = """
SELECT * FROM experiments
"""
c = atomic_transaction(conn, sql)
return c.fetchall()
def get_matching_exp_ids(conn: ConnectionPlus, **match_conditions) -> List:
"""
Get exp_ids for experiments matching the match_conditions
Raises:
ValueError if a match_condition that is not "name", "sample_name",
"format_string", "run_counter", "start_time", or "end_time"
"""
valid_conditions = ["name", "sample_name", "start_time", "end_time",
"run_counter", "format_string"]
for mcond in match_conditions:
if mcond not in valid_conditions:
raise ValueError(f"{mcond} is not a valid match condition.")
end_time = match_conditions.get('end_time', None)
time_eq = "=" if end_time is not None else "IS"
sample_name = match_conditions.get('sample_name', None)
sample_name_eq = "=" if sample_name is not None else "IS"
query = "SELECT exp_id FROM experiments "
for n, mcond in enumerate(match_conditions):
if n == 0:
query += f"WHERE {mcond} = ? "
else:
query += f"AND {mcond} = ? "
# now some syntax clean-up
if "format_string" in match_conditions:
format_string = match_conditions["format_string"]
query = query.replace("format_string = ?",
f'format_string = "{format_string}"')
match_conditions.pop("format_string")
query = query.replace("end_time = ?", f"end_time {time_eq} ?")
query = query.replace("sample_name = ?", f"sample_name {sample_name_eq} ?")
cursor = conn.cursor()
cursor.execute(query, tuple(match_conditions.values()))
rows = cursor.fetchall()
return [row[0] for row in rows]
def get_exp_ids_from_run_ids(conn: ConnectionPlus,
run_ids: Sequence[int]) -> List[int]:
"""
Get the corresponding exp_id for a sequence of run_ids
Args:
conn: connection to the database
run_ids: a sequence of the run_ids to get the exp_id of
Returns:
A list of exp_ids matching the run_ids
"""
sql_placeholders = sql_placeholder_string(len(run_ids))
exp_id_query = f"""
SELECT exp_id
FROM runs
WHERE run_id IN {sql_placeholders}
"""
cursor = conn.cursor()
cursor.execute(exp_id_query, run_ids)
rows = cursor.fetchall()
return [exp_id for row in rows for exp_id in row]
def get_last_experiment(conn: ConnectionPlus) -> Optional[int]:
"""
Return last started experiment id
Returns None if there are no experiments in the database
"""
query = "SELECT MAX(exp_id) FROM experiments"
c = atomic_transaction(conn, query)
return c.fetchall()[0][0]
def get_runs(conn: ConnectionPlus,
exp_id: Optional[int] = None)->List[sqlite3.Row]:
""" Get a list of runs.
Args:
conn: database connection
Returns:
list of rows
"""
with atomic(conn) as conn:
if exp_id:
sql = """
SELECT * FROM runs
where exp_id = ?
"""
c = transaction(conn, sql, exp_id)
else:
sql = """
SELECT * FROM runs
"""
c = transaction(conn, sql)
return c.fetchall()
def get_last_run(conn: ConnectionPlus, exp_id: int) -> Optional[int]:
"""
Get run_id of the last run in experiment with exp_id
Args:
conn: connection to use for the query
exp_id: id of the experiment to look inside
Returns:
the integer id of the last run or None if there are not runs in the
experiment
"""
query = """
SELECT run_id, max(run_timestamp), exp_id
FROM runs
WHERE exp_id = ?;
"""
c = atomic_transaction(conn, query, exp_id)
return one(c, 'run_id')
def run_exists(conn: ConnectionPlus, run_id: int) -> bool:
# the following query always returns a single sqlite3.Row with an integer
# value of `1` or `0` for existing and non-existing run_id in the database
query = """
SELECT EXISTS(
SELECT 1
FROM runs
WHERE run_id = ?
LIMIT 1
);
"""
res: sqlite3.Row = atomic_transaction(conn, query, run_id).fetchone()
return bool(res[0])
def data_sets(conn: ConnectionPlus) -> List[sqlite3.Row]:
""" Get a list of datasets
Args:
conn: database connection
Returns:
list of rows
"""
sql = """
SELECT * FROM runs
"""
c = atomic_transaction(conn, sql)
return c.fetchall()
def format_table_name(fmt_str: str, name: str, exp_id: int,
run_counter: int) -> str:
"""
Format the format_string into a table name
Args:
fmt_str: a valid format string
name: the run name
exp_id: the experiment ID
run_counter: the intra-experiment runnumber of this run
"""
table_name = fmt_str.format(name, exp_id, run_counter)
_validate_table_name(table_name) # raises if table_name not valid
return table_name
def _insert_run(conn: ConnectionPlus, exp_id: int, name: str,
guid: str,
parameters: Optional[List[ParamSpec]] = None,
):
# get run counter and formatter from experiments
run_counter, format_string = select_many_where(conn,
"experiments",
"run_counter",
"format_string",
where_column="exp_id",
where_value=exp_id)
run_counter += 1
formatted_name = format_table_name(format_string, name, exp_id,
run_counter)
table = "runs"
parameters = parameters or []
desc_str = RunDescriber(InterDependencies(*parameters)).to_json()
with atomic(conn) as conn:
if parameters:
query = f"""
INSERT INTO {table}
(name,
exp_id,
guid,
result_table_name,
result_counter,
run_timestamp,
parameters,
is_completed,
run_description)
VALUES
(?,?,?,?,?,?,?,?,?)
"""
curr = transaction(conn, query,
name,
exp_id,
guid,
formatted_name,
run_counter,
time.time(),
",".join([p.name for p in parameters]),
False,
desc_str)
_add_parameters_to_layout_and_deps(conn, formatted_name,
*parameters)
else:
query = f"""
INSERT INTO {table}
(name,
exp_id,
guid,
result_table_name,
result_counter,
run_timestamp,
is_completed,
run_description)
VALUES
(?,?,?,?,?,?,?,?)
"""
curr = transaction(conn, query,
name,
exp_id,
guid,
formatted_name,
run_counter,
time.time(),
False,
desc_str)
run_id = curr.lastrowid
return run_counter, formatted_name, run_id
def _update_experiment_run_counter(conn: ConnectionPlus, exp_id: int,
run_counter: int) -> None:
query = """
UPDATE experiments
SET run_counter = ?
WHERE exp_id = ?
"""
atomic_transaction(conn, query, run_counter, exp_id)
def get_parameters(conn: ConnectionPlus,
run_id: int) -> List[ParamSpec]:
"""
Get the list of param specs for run
Args:
conn: the connection to the sqlite database
run_id: The id of the run
Returns:
A list of param specs for this run
"""
sql = f"""
SELECT parameter FROM layouts WHERE run_id={run_id}
"""
c = conn.execute(sql)
param_names_temp = many_many(c, 'parameter')
param_names = [p[0] for p in param_names_temp]
param_names = cast(List[str], param_names)
parspecs = []
for param_name in param_names:
parspecs.append(get_paramspec(conn, run_id, param_name))
return parspecs
def get_paramspec(conn: ConnectionPlus,
run_id: int,
param_name: str) -> ParamSpec:
"""
Get the ParamSpec object for the given parameter name
in the given run
Args:
conn: Connection to the database
run_id: The run id
param_name: The name of the parameter
"""
# get table name
sql = f"""
SELECT result_table_name FROM runs WHERE run_id = {run_id}
"""
c = conn.execute(sql)
result_table_name = one(c, 'result_table_name')
# get the data type
sql = f"""
PRAGMA TABLE_INFO("{result_table_name}")
"""
c = conn.execute(sql)
for row in c.fetchall():
if row['name'] == param_name:
param_type = row['type']
break
# get everything else
sql = f"""
SELECT * FROM layouts
WHERE parameter="{param_name}" and run_id={run_id}
"""
c = conn.execute(sql)
resp = many(c, 'layout_id', 'run_id', 'parameter', 'label', 'unit',
'inferred_from')
(layout_id, _, _, label, unit, inferred_from_string) = resp
if inferred_from_string:
inferred_from = inferred_from_string.split(', ')
else:
inferred_from = []
deps = get_dependencies(conn, layout_id)
depends_on: Optional[List[str]]
if len(deps) == 0:
depends_on = None
else:
dps: List[int] = [dp[0] for dp in deps]
ax_nums: List[int] = [dp[1] for dp in deps]
depends_on = []
for _, dp in sorted(zip(ax_nums, dps)):
sql = f"""
SELECT parameter FROM layouts WHERE layout_id = {dp}
"""
c = conn.execute(sql)
depends_on.append(one(c, 'parameter'))
parspec = ParamSpec(param_name, param_type, label, unit,
inferred_from,
depends_on)
return parspec
def update_run_description(conn: ConnectionPlus, run_id: int,
description: str) -> None:
"""
Update the run_description field for the given run_id. The description
string must be a valid JSON string representation of a RunDescriber object
"""
try:
RunDescriber.from_json(description)
except Exception as e:
raise ValueError("Invalid description string. Must be a JSON string "
"representaion of a RunDescriber object.") from e
sql = """
UPDATE runs
SET run_description = ?
WHERE run_id = ?
"""
with atomic(conn) as conn:
conn.cursor().execute(sql, (description, run_id))
def add_parameter(conn: ConnectionPlus,
formatted_name: str,
*parameter: ParamSpec):
"""
Add parameters to the dataset
This will update the layouts and dependencies tables
NOTE: two parameters with the same name are not allowed
Args:
conn: the connection to the sqlite database
formatted_name: name of the table
parameter: the list of ParamSpecs for parameters to add
"""
with atomic(conn) as conn:
p_names = []
for p in parameter:
insert_column(conn, formatted_name, p.name, p.type)
p_names.append(p.name)
# get old parameters column from run table
sql = f"""
SELECT parameters FROM runs
WHERE result_table_name=?
"""
with atomic(conn) as conn:
c = transaction(conn, sql, formatted_name)
old_parameters = one(c, 'parameters')
if old_parameters:
new_parameters = ",".join([old_parameters] + p_names)
else:
new_parameters = ",".join(p_names)
sql = "UPDATE runs SET parameters=? WHERE result_table_name=?"
with atomic(conn) as conn:
transaction(conn, sql, new_parameters, formatted_name)
# Update the layouts table
c = _add_parameters_to_layout_and_deps(conn, formatted_name,
*parameter)
def _add_parameters_to_layout_and_deps(conn: ConnectionPlus,
formatted_name: str,
*parameter: ParamSpec) -> sqlite3.Cursor:
# get the run_id
sql = f"""
SELECT run_id FROM runs WHERE result_table_name="{formatted_name}";
"""
run_id = one(transaction(conn, sql), 'run_id')
layout_args = []
for p in parameter:
layout_args.append(run_id)
layout_args.append(p.name)
layout_args.append(p.label)
layout_args.append(p.unit)
layout_args.append(p.inferred_from)
rowplaceholder = '(?, ?, ?, ?, ?)'
placeholder = ','.join([rowplaceholder] * len(parameter))
sql = f"""
INSERT INTO layouts (run_id, parameter, label, unit, inferred_from)
VALUES {placeholder}
"""
with atomic(conn) as conn:
c = transaction(conn, sql, *layout_args)
for p in parameter:
if p.depends_on != '':
layout_id = get_layout_id(conn, p, run_id)
deps = p.depends_on.split(', ')
for ax_num, dp in enumerate(deps):
sql = """
SELECT layout_id FROM layouts
WHERE run_id=? and parameter=?;
"""
c = transaction(conn, sql, run_id, dp)
dep_ind = one(c, 'layout_id')
sql = """
INSERT INTO dependencies (dependent, independent, axis_num)
VALUES (?,?,?)
"""
c = transaction(conn, sql, layout_id, dep_ind, ax_num)
return c
def _validate_table_name(table_name: str) -> bool:
valid = True
for i in table_name:
if unicodedata.category(i) not in _unicode_categories:
valid = False
raise RuntimeError("Invalid table name "
"{} starting at {}".format(table_name, i))
return valid
def _create_run_table(conn: ConnectionPlus,
formatted_name: str,
parameters: Optional[List[ParamSpec]] = None,
values: Optional[VALUES] = None
) -> None:
"""Create run table with formatted_name as name
Args:
conn: database connection
formatted_name: the name of the table to create
"""
_validate_table_name(formatted_name)
with atomic(conn) as conn:
if parameters and values:
_parameters = ",".join([p.sql_repr() for p in parameters])
query = f"""
CREATE TABLE "{formatted_name}" (
id INTEGER PRIMARY KEY,
{_parameters}
);
"""
transaction(conn, query)
# now insert values
insert_values(conn, formatted_name,
[p.name for p in parameters], values)
elif parameters:
_parameters = ",".join([p.sql_repr() for p in parameters])
query = f"""
CREATE TABLE "{formatted_name}" (
id INTEGER PRIMARY KEY,
{_parameters}
);
"""
transaction(conn, query)
else:
query = f"""
CREATE TABLE "{formatted_name}" (
id INTEGER PRIMARY KEY
);
"""
transaction(conn, query)
def create_run(conn: ConnectionPlus, exp_id: int, name: str,
guid: str,
parameters: Optional[List[ParamSpec]]=None,
values: List[Any] = None,
metadata: Optional[Dict[str, Any]]=None)->Tuple[int, int, str]:
""" Create a single run for the experiment.
This will register the run in the runs table, the counter in the
experiments table and create a new table with the formatted name.
Args:
- conn: the connection to the sqlite database
- exp_id: the experiment id we want to create the run into
- name: a friendly name for this run
- guid: the guid adhering to our internal guid format
- parameters: optional list of parameters this run has
- values: optional list of values for the parameters
- metadata: optional metadata dictionary
Returns:
- run_counter: the id of the newly created run (not unique)
- run_id: the row id of the newly created run
- formatted_name: the name of the newly created table
"""
with atomic(conn):
run_counter, formatted_name, run_id = _insert_run(conn,
exp_id,
name,
guid,
parameters)
if metadata:
add_meta_data(conn, run_id, metadata)
_update_experiment_run_counter(conn, exp_id, run_counter)
_create_run_table(conn, formatted_name, parameters, values)
return run_counter, run_id, formatted_name
def get_metadata(conn: ConnectionPlus, tag: str, table_name: str):
""" Get metadata under the tag from table
"""
return select_one_where(conn, "runs", tag,
"result_table_name", table_name)
def get_metadata_from_run_id(conn: ConnectionPlus, run_id: int) -> Dict:
"""
Get all metadata associated with the specified run
"""
# TODO: promote snapshot to be present at creation time
non_metadata = RUNS_TABLE_COLUMNS + ['snapshot']
metadata = {}
possible_tags = []
# first fetch all columns of the runs table
query = "PRAGMA table_info(runs)"
cursor = conn.cursor()
for row in cursor.execute(query):
if row['name'] not in non_metadata:
possible_tags.append(row['name'])
# and then fetch whatever metadata the run might have
for tag in possible_tags:
query = f"""
SELECT "{tag}"
FROM runs
WHERE run_id = ?
AND "{tag}" IS NOT NULL
"""
cursor.execute(query, (run_id,))
row = cursor.fetchall()
if row != []:
metadata[tag] = row[0][tag]
return metadata
def insert_meta_data(conn: ConnectionPlus, row_id: int, table_name: str,
metadata: Dict[str, Any]) -> None:
"""
Insert new metadata column and add values. Note that None is not a valid
metadata value
Args:
- conn: the connection to the sqlite database
- row_id: the row to add the metadata at
- table_name: the table to add to, defaults to runs
- metadata: the metadata to add
"""
for tag, val in metadata.items():
if val is None:
raise ValueError(f'Tag {tag} has value None. '
' That is not a valid metadata value!')
for key in metadata.keys():
insert_column(conn, table_name, key)
update_meta_data(conn, row_id, table_name, metadata)
def update_meta_data(conn: ConnectionPlus, row_id: int, table_name: str,
metadata: Dict[str, Any]) -> None:
"""
Updates metadata (they must exist already)
Args:
- conn: the connection to the sqlite database
- row_id: the row to add the metadata at
- table_name: the table to add to, defaults to runs
- metadata: the metadata to add
"""
update_where(conn, table_name, 'rowid', row_id, **metadata)
def add_meta_data(conn: ConnectionPlus,
row_id: int,
metadata: Dict[str, Any],
table_name: str = "runs") -> None:
"""
Add metadata data (updates if exists, create otherwise).
Note that None is not a valid metadata value.
Args:
- conn: the connection to the sqlite database
- row_id: the row to add the metadata at
- metadata: the metadata to add
- table_name: the table to add to, defaults to runs
"""
try:
insert_meta_data(conn, row_id, table_name, metadata)
except sqlite3.OperationalError as e:
# this means that the column already exists
# so just insert the new value
if str(e).startswith("duplicate"):
update_meta_data(conn, row_id, table_name, metadata)
else:
raise e
def get_user_version(conn: ConnectionPlus) -> int:
curr = atomic_transaction(conn, 'PRAGMA user_version')
res = one(curr, 0)
return res
def set_user_version(conn: ConnectionPlus, version: int) -> None:
atomic_transaction(conn, 'PRAGMA user_version({})'.format(version))
def get_experiment_name_from_experiment_id(
conn: ConnectionPlus, exp_id: int) -> str:
return select_one_where(
conn, "experiments", "name", "exp_id", exp_id)
def get_sample_name_from_experiment_id(
conn: ConnectionPlus, exp_id: int) -> str:
return select_one_where(
conn, "experiments", "sample_name", "exp_id", exp_id)
def get_run_timestamp_from_run_id(conn: ConnectionPlus,
run_id: int) -> float:
return select_one_where(conn, "runs", "run_timestamp", "run_id", run_id)
def update_GUIDs(conn: ConnectionPlus) -> None:
"""
Update all GUIDs in this database where either the location code or the
work_station code is zero to use the location and work_station code from
the qcodesrc.json file in home. Runs where it is not true that both codes
are zero are skipped.
"""
log.info('Commencing update of all GUIDs in database')
cfg = qc.Config()
location = cfg['GUID_components']['location']
work_station = cfg['GUID_components']['work_station']
if location == 0:
log.warning('The location is still set to the default (0). Can not '
'proceed. Please configure the location before updating '
'the GUIDs.')
return
if work_station == 0:
log.warning('The work_station is still set to the default (0). Can not'
' proceed. Please configure the location before updating '
'the GUIDs.')
return
query = f"select MAX(run_id) from runs"
c = atomic_transaction(conn, query)
no_of_runs = c.fetchall()[0][0]
# now, there are four actions we can take
def _both_nonzero(run_id: int, *args) -> None:
log.info(f'Run number {run_id} already has a valid GUID, skipping.')
def _location_only_zero(run_id: int, *args) -> None:
log.warning(f'Run number {run_id} has a zero (default) location '
'code, but a non-zero work station code. Please manually '
'resolve this, skipping the run now.')
def _workstation_only_zero(run_id: int, *args) -> None:
log.warning(f'Run number {run_id} has a zero (default) work station'
' code, but a non-zero location code. Please manually '
'resolve this, skipping the run now.')
def _both_zero(run_id: int, conn, guid_comps) -> None:
guid_str = generate_guid(timeint=guid_comps['time'],
sampleint=guid_comps['sample'])
with atomic(conn) as conn:
sql = f"""
UPDATE runs
SET guid = ?
where run_id == {run_id}
"""
cur = conn.cursor()
cur.execute(sql, (guid_str,))
log.info(f'Succesfully updated run number {run_id}.')
actions: Dict[Tuple[bool, bool], Callable]
actions = {(True, True): _both_zero,
(False, True): _workstation_only_zero,
(True, False): _location_only_zero,
(False, False): _both_nonzero}
for run_id in range(1, no_of_runs+1):
guid_str = get_guid_from_run_id(conn, run_id)
guid_comps = parse_guid(guid_str)
loc = guid_comps['location']
ws = guid_comps['work_station']
log.info(f'Updating run number {run_id}...')
actions[(loc == 0, ws == 0)](run_id, conn, guid_comps)
def remove_trigger(conn: ConnectionPlus, trigger_id: str) -> None:
"""
Removes a trigger with a given id if it exists.
Note that this transaction is not atomic!
Args:
conn: database connection object
name: id of the trigger
"""
transaction(conn, f"DROP TRIGGER IF EXISTS {trigger_id};")
def _fix_wrong_run_descriptions(conn: ConnectionPlus,
run_ids: Sequence[int]) -> None:
"""
NB: This is a FIX function. Do not use it unless your database has been
diagnosed with the problem that this function fixes.
Overwrite faulty run_descriptions by using information from the layouts and
dependencies tables. If a correct description is found for a run, that
run is left untouched.
Args:
conn: The connection to the database
run_ids: The runs to (potentially) fix
"""
log.info('[*] Fixing run descriptions...')
for run_id in run_ids:
trusted_paramspecs = get_parameters(conn, run_id)
trusted_desc = RunDescriber(
interdeps=InterDependencies(*trusted_paramspecs))
actual_desc_str = select_one_where(conn, "runs",
"run_description",
"run_id", run_id)
if actual_desc_str == trusted_desc.to_json():
log.info(f'[+] Run id: {run_id} had an OK description')
else:
log.info(f'[-] Run id: {run_id} had a broken description. '
f'Description found: {actual_desc_str}')
update_run_description(conn, run_id, trusted_desc.to_json())
log.info(f' Run id: {run_id} has been updated.')
| 31.977769
| 127
| 0.592553
|
2f47c4ba6b2f50994b9774948c47f978d9ef15c7
| 419
|
py
|
Python
|
Tagger_unit_test.py
|
akikowork/DeepDanbooru
|
e4b3e6eb60769d1257c826d842ce387812ad1ab5
|
[
"MIT"
] | 4
|
2020-04-01T09:55:31.000Z
|
2022-01-07T08:40:52.000Z
|
Tagger_unit_test.py
|
Hecate2/DeepDanbooru
|
5b6bbb1f9d99d7adc5dd7e45c0d9004966d20b9b
|
[
"MIT"
] | null | null | null |
Tagger_unit_test.py
|
Hecate2/DeepDanbooru
|
5b6bbb1f9d99d7adc5dd7e45c0d9004966d20b9b
|
[
"MIT"
] | 1
|
2020-04-02T00:31:35.000Z
|
2020-04-02T00:31:35.000Z
|
def format_url(fileurl):
tmpchr=""
tmpchrs=""
for chars in fileurl:
if chars == '/':
tmpchr = ""
else:
tmpchr+=chars
for chars in tmpchr:
if chars != '_':
tmpchrs+=chars
else:
break
return tmpchrs
print(format_url("a/b_c"))
print(format_url("a/a/b_c"))
| 23.277778
| 35
| 0.417661
|
6c848fe316b9b8e462259d1f614536c09b2fd837
| 711
|
py
|
Python
|
apps/sgx/prepare_test_libs.py
|
TaoLv/tvm
|
11318966571f654f4e8bc550bfd9a293303e3000
|
[
"Apache-2.0"
] | 1
|
2020-01-01T06:59:43.000Z
|
2020-01-01T06:59:43.000Z
|
apps/sgx/prepare_test_libs.py
|
TaoLv/tvm
|
11318966571f654f4e8bc550bfd9a293303e3000
|
[
"Apache-2.0"
] | null | null | null |
apps/sgx/prepare_test_libs.py
|
TaoLv/tvm
|
11318966571f654f4e8bc550bfd9a293303e3000
|
[
"Apache-2.0"
] | 2
|
2019-04-07T07:28:19.000Z
|
2019-11-18T08:09:06.000Z
|
"""Script to prepare test_addone_sys.o"""
from os import path as osp
import tvm
CWD = osp.dirname(osp.abspath(osp.expanduser(__file__)))
def prepare_test_libs(base_path):
n = tvm.var('n')
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1, name='B')
s = tvm.create_schedule(B.op)
s[B].parallel(s[B].op.axis[0])
print(tvm.lower(s, [A, B], simple_mode=True))
# Compile library in system library mode
fadd_syslib = tvm.build(s, [A, B], 'llvm --system-lib')
syslib_path = osp.join(base_path, 'test_addone_sys.o')
fadd_syslib.save(syslib_path)
def main():
prepare_test_libs(osp.join(CWD, 'lib'))
if __name__ == '__main__':
main()
| 26.333333
| 60
| 0.651195
|
cf901633d1e0453c1dedd91c20e944483338c159
| 992
|
py
|
Python
|
python/phonenumbers/shortdata/region_AT.py
|
vishnuku/python-phonenumbers
|
6ac2cdd06b7ccf709a8efb21629cf2c5f030e627
|
[
"Apache-2.0"
] | 3
|
2018-12-02T23:09:00.000Z
|
2018-12-02T23:16:59.000Z
|
python/phonenumbers/shortdata/region_AT.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/shortdata/region_AT.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
"""Auto-generated file, do not edit by hand. AT metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AT = PhoneMetadata(id='AT', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}', possible_length=(3, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|1(?:17|23))', possible_number_pattern='\\d{6}', example_number='116000', possible_length=(6,)),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='1(?:[12]2|33|44)', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6(?:00[06]|1(?:17|23)))|22|33|44)', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3, 6)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
| 76.307692
| 184
| 0.742944
|
e51684c6fd8896b3ad857f588edaf938e72bcdc3
| 29,508
|
py
|
Python
|
aries-cloudagent-python/aries_cloudagent/protocols/endorse_transaction/v1_0/tests/test_manager.py
|
sed-szeged/IndyPerf
|
68bb48c7c651cae113cf30696110825ff3267ccc
|
[
"CC0-1.0"
] | null | null | null |
aries-cloudagent-python/aries_cloudagent/protocols/endorse_transaction/v1_0/tests/test_manager.py
|
sed-szeged/IndyPerf
|
68bb48c7c651cae113cf30696110825ff3267ccc
|
[
"CC0-1.0"
] | null | null | null |
aries-cloudagent-python/aries_cloudagent/protocols/endorse_transaction/v1_0/tests/test_manager.py
|
sed-szeged/IndyPerf
|
68bb48c7c651cae113cf30696110825ff3267ccc
|
[
"CC0-1.0"
] | 1
|
2022-02-02T17:05:27.000Z
|
2022-02-02T17:05:27.000Z
|
import json
import uuid
from aiohttp import web
from asynctest import mock as async_mock
from asynctest import TestCase as AsyncTestCase
from .....cache.base import BaseCache
from .....cache.in_memory import InMemoryCache
from .....connections.models.conn_record import ConnRecord
from .....core.in_memory import InMemoryProfile
from .....ledger.base import BaseLedger
from .....storage.error import StorageNotFoundError
from ..manager import TransactionManager, TransactionManagerError
from ..messages.messages_attach import MessagesAttach
from ..messages.transaction_acknowledgement import TransactionAcknowledgement
from ..messages.transaction_request import TransactionRequest
from ..models.transaction_record import TransactionRecord
from ..transaction_jobs import TransactionJob
TEST_DID = "LjgpST2rjsoxYegQDRm7EL"
SCHEMA_NAME = "bc-reg"
SCHEMA_TXN = 12
SCHEMA_ID = f"{TEST_DID}:2:{SCHEMA_NAME}:1.0"
CRED_DEF_ID = f"{TEST_DID}:3:CL:12:tag1"
class TestTransactionManager(AsyncTestCase):
async def setUp(self):
self.session = InMemoryProfile.test_session()
sigs = [
(
"2iNTeFy44WK9zpsPfcwfu489aHWroYh3v8mme9tPyNKn"
"crk1tVbWKNU4zFvLAbSBwHWxShQSJrhRgoxwaehCaz2j"
),
(
"3hPr2WgAixcXQRQfCZKnmpY7SkQyQW4cegX7QZMPv6Fv"
"sNRFV7yW21VaFC5CA3Aze264dkHjX4iZ1495am8fe1qZ"
),
]
self.test_messages_attach = f"""{{
"endorser": "DJGEjaMunDtFtBVrn1qJMT",
"identifier": "C3nJhruVc7feyB6ckJwhi2",
"operation": {{
"data": {{
"attr_names": ["score"],
"name": "prefs",
"version": "1.0"
}},
"type": "101"
}},
"protocolVersion": 2,
"reqId": 1613463373859595201,
"signatures": {{
"C3nJhruVc7feyB6ckJwhi2": {sigs[0]}
}}
}}"""
self.test_expires_time = "2021-03-29T05:22:19Z"
self.test_connection_id = "3fa85f64-5717-4562-b3fc-2c963f66afa6"
self.test_receivers_connection_id = "3fa85f64-5717-4562-b3fc-2c963f66afa7"
self.test_author_transaction_id = "3fa85f64-5717-4562-b3fc-2c963f66afa7"
self.test_endorser_transaction_id = "3fa85f64-5717-4562-b3fc-2c963f66afa8"
self.test_endorsed_message = f"""{{
"endorser": "DJGEjaMunDtFtBVrn1qJMT",
"identifier": "C3nJhruVc7feyB6ckJwhi2",
"operation": {{
"data": {{
"attr_names": ["score"],
"name": "prefs",
"version": "1.0"
}},
"type": "101"
}},
"protocolVersion": 2,
"reqId": 1613463373859595201,
"signatures": {{
"C3nJhruVc7feyB6ckJwhi2": {sigs[0]},
"DJGEjaMunDtFtBVrn1qJMT": {sigs[1]}
}}
}}"""
self.test_signature = f"""{{
"endorser": "DJGEjaMunDtFtBVrn1qJMT",
"identifier": "C3nJhruVc7feyB6ckJwhi2",
"operation": {{
"data": {{
"attr_names": ["score"],
"name": "prefs",
"version": "1.0"
}},
"type": "101"
}},
"protocolVersion": 2,
"reqId": 1613463373859595201,
"signatures": {{
"C3nJhruVc7feyB6ckJwhi2": {sigs[0]},
"DJGEjaMunDtFtBVrn1qJMT": {sigs[1]}
}}
}}"""
self.test_endorser_did = "DJGEjaMunDtFtBVrn1qJMT"
self.test_endorser_verkey = "3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRx"
self.test_refuser_did = "AGDEjaMunDtFtBVrn1qPKQ"
self.ledger = async_mock.create_autospec(BaseLedger)
self.session.context.injector.bind_instance(BaseLedger, self.ledger)
self.manager = TransactionManager(self.session)
assert self.manager.session
async def test_transaction_jobs(self):
author = TransactionJob.TRANSACTION_AUTHOR
endorser = TransactionJob.TRANSACTION_ENDORSER
assert author != endorser
async def test_create_record(self):
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
save_record.assert_called_once()
assert (
transaction_record.formats[0]["attach_id"]
== transaction_record.messages_attach[0]["@id"]
)
assert (
transaction_record.formats[0]["format"]
== TransactionRecord.FORMAT_VERSION
)
assert (
transaction_record.messages_attach[0]["data"]["json"]
== self.test_messages_attach
)
assert (
transaction_record.state == TransactionRecord.STATE_TRANSACTION_CREATED
)
async def test_txn_rec_retrieve_by_connection_and_thread_caching(self):
async with self.session.profile.session() as sesn:
sesn.context.injector.bind_instance(BaseCache, InMemoryCache())
txn_rec = TransactionRecord(
connection_id="123",
thread_id="456",
)
await txn_rec.save(self.session)
await TransactionRecord.retrieve_by_connection_and_thread(
session=sesn,
connection_id="123",
thread_id="456",
) # set in cache
await TransactionRecord.retrieve_by_connection_and_thread(
session=sesn,
connection_id="123",
thread_id="456",
) # get from cache
async def test_create_request_bad_state(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
with self.assertRaises(TransactionManagerError):
await self.manager.create_request(transaction=transaction_record)
async def test_create_request(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
(
transaction_record,
transaction_request,
) = await self.manager.create_request(
transaction_record,
expires_time=self.test_expires_time,
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_REQUEST
assert transaction_record.signature_request[0] == {
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"author_goal_code": TransactionRecord.WRITE_TRANSACTION,
}
assert transaction_record.state == TransactionRecord.STATE_REQUEST_SENT
assert transaction_record.connection_id == self.test_connection_id
assert transaction_record.timing["expires_time"] == self.test_expires_time
assert transaction_request.transaction_id == transaction_record._id
assert (
transaction_request.signature_request
== transaction_record.signature_request[0]
)
assert transaction_request.timing == transaction_record.timing
assert (
transaction_request.messages_attach == transaction_record.messages_attach[0]
)
async def test_recieve_request(self):
mock_request = async_mock.MagicMock()
mock_request.transaction_id = self.test_author_transaction_id
mock_request.signature_request = {
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"author_goal_code": TransactionRecord.WRITE_TRANSACTION,
}
mock_request.messages_attach = {
"@id": str(uuid.uuid4()),
"mime-type": "application/json",
"data": {"json": self.test_messages_attach},
}
mock_request.timing = {"expires_time": self.test_expires_time}
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
transaction_record = await self.manager.receive_request(
mock_request, self.test_receivers_connection_id
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_REQUEST
assert transaction_record.signature_request[0] == mock_request.signature_request
assert transaction_record.timing == mock_request.timing
assert transaction_record.formats[0] == {
"attach_id": mock_request.messages_attach["@id"],
"format": TransactionRecord.FORMAT_VERSION,
}
assert transaction_record.messages_attach[0] == mock_request.messages_attach
assert transaction_record.thread_id == self.test_author_transaction_id
assert transaction_record.connection_id == self.test_receivers_connection_id
assert transaction_record.state == TransactionRecord.STATE_REQUEST_RECEIVED
async def test_create_endorse_response_bad_state(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
with self.assertRaises(TransactionManagerError):
await self.manager.create_endorse_response(
transaction=transaction_record,
state=TransactionRecord.STATE_TRANSACTION_ENDORSED,
endorser_did=self.test_endorser_did,
endorser_verkey=self.test_endorser_verkey,
endorsed_msg=self.test_endorsed_message,
signature=self.test_signature,
)
async def test_create_endorse_response(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_REQUEST_RECEIVED
transaction_record.thread_id = self.test_author_transaction_id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
(
transaction_record,
endorsed_transaction_response,
) = await self.manager.create_endorse_response(
transaction_record,
state=TransactionRecord.STATE_TRANSACTION_ENDORSED,
endorser_did=self.test_endorser_did,
endorser_verkey=self.test_endorser_verkey,
endorsed_msg=self.test_endorsed_message,
signature=self.test_signature,
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_RESPONSE
assert (
transaction_record.messages_attach[0]["data"]["json"]
== self.test_endorsed_message
)
assert transaction_record.signature_response[0] == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signature": {self.test_endorser_did: self.test_signature},
}
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_ENDORSED
assert (
endorsed_transaction_response.transaction_id
== self.test_author_transaction_id
)
assert endorsed_transaction_response.thread_id == transaction_record._id
assert endorsed_transaction_response.signature_response == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signature": {self.test_endorser_did: self.test_signature},
}
assert (
endorsed_transaction_response.state
== TransactionRecord.STATE_TRANSACTION_ENDORSED
)
assert endorsed_transaction_response.endorser_did == self.test_endorser_did
async def test_receive_endorse_response(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
self.test_author_transaction_id = transaction_record._id
mock_response = async_mock.MagicMock()
mock_response.transaction_id = self.test_author_transaction_id
mock_response.thread_id = self.test_endorser_transaction_id
mock_response.signature_response = {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signature": {self.test_endorser_did: self.test_signature},
}
mock_response.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
mock_response.endorser_did = self.test_endorser_did
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
transaction_record = await self.manager.receive_endorse_response(
mock_response
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_RESPONSE
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_ENDORSED
assert transaction_record.signature_response[0] == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.ENDORSE_TRANSACTION,
"signature_type": TransactionRecord.SIGNATURE_TYPE,
"signature": {self.test_endorser_did: self.test_signature},
}
assert transaction_record.thread_id == self.test_endorser_transaction_id
assert (
transaction_record.messages_attach[0]["data"]["json"] == self.test_signature
)
async def test_complete_transaction(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
self.ledger.get_indy_storage = async_mock.MagicMock(
return_value=async_mock.MagicMock(add_record=async_mock.CoroutineMock())
)
self.ledger.txn_submit = async_mock.CoroutineMock(
return_value=json.dumps(
{
"result_4": {
"txn": {"type": "101", "metadata": {"from": TEST_DID}},
"txnMetadata": {"txnId": SCHEMA_ID},
}
}
)
)
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record, async_mock.patch.object(
ConnRecord, "retrieve_by_id"
) as mock_conn_rec_retrieve:
mock_conn_rec_retrieve.return_value = async_mock.MagicMock(
metadata_get=async_mock.CoroutineMock(
return_value={
"transaction_their_job": (
TransactionJob.TRANSACTION_ENDORSER.name
),
"transaction_my_job": (TransactionJob.TRANSACTION_AUTHOR.name),
}
)
)
(
transaction_record,
transaction_acknowledgement_message,
) = await self.manager.complete_transaction(transaction_record)
save_record.assert_called_once()
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_ACKED
async def test_create_refuse_response_bad_state(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
with self.assertRaises(TransactionManagerError):
await self.manager.create_refuse_response(
transaction=transaction_record,
state=TransactionRecord.STATE_TRANSACTION_REFUSED,
refuser_did=self.test_refuser_did,
)
async def test_create_refuse_response(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_REQUEST_RECEIVED
transaction_record.thread_id = self.test_author_transaction_id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
(
transaction_record,
refused_transaction_response,
) = await self.manager.create_refuse_response(
transaction_record,
state=TransactionRecord.STATE_TRANSACTION_REFUSED,
refuser_did=self.test_refuser_did,
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_RESPONSE
assert transaction_record.signature_response[0] == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.REFUSE_TRANSACTION,
}
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_REFUSED
assert (
refused_transaction_response.transaction_id
== self.test_author_transaction_id
)
assert refused_transaction_response.thread_id == transaction_record._id
assert refused_transaction_response.signature_response == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.REFUSE_TRANSACTION,
}
assert (
refused_transaction_response.state
== TransactionRecord.STATE_TRANSACTION_REFUSED
)
assert refused_transaction_response.endorser_did == self.test_refuser_did
async def test_receive_refuse_response(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
self.test_author_transaction_id = transaction_record._id
mock_response = async_mock.MagicMock()
mock_response.transaction_id = self.test_author_transaction_id
mock_response.thread_id = self.test_endorser_transaction_id
mock_response.signature_response = {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.REFUSE_TRANSACTION,
}
mock_response.state = TransactionRecord.STATE_TRANSACTION_REFUSED
mock_response.endorser_did = self.test_refuser_did
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
transaction_record = await self.manager.receive_refuse_response(
mock_response
)
save_record.assert_called_once()
assert transaction_record._type == TransactionRecord.SIGNATURE_RESPONSE
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_REFUSED
assert transaction_record.signature_response[0] == {
"message_id": transaction_record.messages_attach[0]["@id"],
"context": TransactionRecord.SIGNATURE_CONTEXT,
"method": TransactionRecord.ADD_SIGNATURE,
"signer_goal_code": TransactionRecord.REFUSE_TRANSACTION,
}
assert transaction_record.thread_id == self.test_endorser_transaction_id
async def test_cancel_transaction_bad_state(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
with self.assertRaises(TransactionManagerError):
await self.manager.cancel_transaction(
transaction=transaction_record,
state=TransactionRecord.STATE_TRANSACTION_CANCELLED,
)
async def test_cancel_transaction(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_REQUEST_SENT
transaction_record.thread_id = self.test_endorser_transaction_id
transaction_record._id = self.test_author_transaction_id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
(
transaction_record,
cancelled_transaction_response,
) = await self.manager.cancel_transaction(
transaction_record, state=TransactionRecord.STATE_TRANSACTION_CANCELLED
)
save_record.assert_called_once()
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_CANCELLED
assert (
cancelled_transaction_response.thread_id == self.test_author_transaction_id
)
assert (
cancelled_transaction_response.state
== TransactionRecord.STATE_TRANSACTION_CANCELLED
)
async def test_receive_cancel_transaction(self):
author_transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
(
author_transaction_record,
author_transaction_request,
) = await self.manager.create_request(author_transaction_record)
endorser_transaction_record = await self.manager.receive_request(
author_transaction_request, self.test_receivers_connection_id
)
mock_response = async_mock.MagicMock()
mock_response.state = TransactionRecord.STATE_TRANSACTION_CANCELLED
mock_response.thread_id = author_transaction_record._id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
endorser_transaction_record = await self.manager.receive_cancel_transaction(
mock_response, self.test_receivers_connection_id
)
save_record.assert_called_once()
assert (
endorser_transaction_record.state
== TransactionRecord.STATE_TRANSACTION_CANCELLED
)
async def test_transaction_resend_bad_state(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_ENDORSED
with self.assertRaises(TransactionManagerError):
await self.manager.transaction_resend(
transaction=transaction_record,
state=TransactionRecord.STATE_TRANSACTION_RESENT,
)
async def test_transaction_resend(self):
transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
transaction_record.state = TransactionRecord.STATE_TRANSACTION_REFUSED
transaction_record.thread_id = self.test_endorser_transaction_id
transaction_record._id = self.test_author_transaction_id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
(
transaction_record,
resend_transaction_response,
) = await self.manager.transaction_resend(
transaction_record, state=TransactionRecord.STATE_TRANSACTION_RESENT
)
save_record.assert_called_once()
assert transaction_record.state == TransactionRecord.STATE_TRANSACTION_RESENT
assert resend_transaction_response.thread_id == self.test_author_transaction_id
assert (
resend_transaction_response.state
== TransactionRecord.STATE_TRANSACTION_RESENT_RECEIEVED
)
async def test_receive_transaction_resend(self):
author_transaction_record = await self.manager.create_record(
messages_attach=self.test_messages_attach,
connection_id=self.test_connection_id,
)
(
author_transaction_record,
author_transaction_request,
) = await self.manager.create_request(author_transaction_record)
endorser_transaction_record = await self.manager.receive_request(
author_transaction_request, self.test_receivers_connection_id
)
mock_response = async_mock.MagicMock()
mock_response.state = TransactionRecord.STATE_TRANSACTION_RESENT_RECEIEVED
mock_response.thread_id = author_transaction_record._id
with async_mock.patch.object(
TransactionRecord, "save", autospec=True
) as save_record:
endorser_transaction_record = await self.manager.receive_transaction_resend(
mock_response, self.test_receivers_connection_id
)
save_record.assert_called_once()
assert (
endorser_transaction_record.state
== TransactionRecord.STATE_TRANSACTION_RESENT_RECEIEVED
)
async def test_set_transaction_my_job(self):
conn_record = async_mock.MagicMock(
metadata_get=async_mock.CoroutineMock(
side_effect=[
None,
{"meta": "data"},
]
),
metadata_set=async_mock.CoroutineMock(),
)
for i in range(2):
await self.manager.set_transaction_my_job(conn_record, "Hello")
async def test_set_transaction_their_job(self):
mock_job = async_mock.MagicMock()
mock_receipt = async_mock.MagicMock()
with async_mock.patch.object(
ConnRecord, "retrieve_by_did", async_mock.CoroutineMock()
) as mock_retrieve:
mock_retrieve.return_value = async_mock.MagicMock(
metadata_get=async_mock.CoroutineMock(
side_effect=[
None,
{"meta": "data"},
]
),
metadata_set=async_mock.CoroutineMock(),
)
for i in range(2):
await self.manager.set_transaction_their_job(mock_job, mock_receipt)
async def test_set_transaction_their_job_conn_not_found(self):
mock_job = async_mock.MagicMock()
mock_receipt = async_mock.MagicMock()
with async_mock.patch.object(
ConnRecord, "retrieve_by_did", async_mock.CoroutineMock()
) as mock_retrieve:
mock_retrieve.side_effect = StorageNotFoundError()
with self.assertRaises(TransactionManagerError):
await self.manager.set_transaction_their_job(mock_job, mock_receipt)
| 41.677966
| 88
| 0.649654
|
7a28e004e32a21ec644fa49111aa7697d49f26ac
| 1,643
|
py
|
Python
|
bud_get/bud_get.py
|
doggan/bud-get
|
9b678493783705f416ea73ba174f083f0b733d03
|
[
"Unlicense"
] | null | null | null |
bud_get/bud_get.py
|
doggan/bud-get
|
9b678493783705f416ea73ba174f083f0b733d03
|
[
"Unlicense"
] | null | null | null |
bud_get/bud_get.py
|
doggan/bud-get
|
9b678493783705f416ea73ba174f083f0b733d03
|
[
"Unlicense"
] | null | null | null |
""" Budget utilities.
"""
import csv
def _reader_filter(entries, *keys):
""" Perform filtering of dictionary keys, returning an interator.
"""
for entry in entries:
yield dict((k, entry[k]) for k in keys)
FILTER_COLUMNS = ('Type', 'Trans Date', 'Description', 'Amount')
def filter_csv(in_file):
""" Perform filter / sorting operations on CSV data.
"""
results = []
with open(in_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in _reader_filter(reader, *FILTER_COLUMNS):
# Skip payments.
if row['Type'] == 'Payment':
continue
# More filtering.
row.pop('Type', None)
# Strip the negative sign.
amount = row['Amount']
if len(amount) > 0 and amount[0] == '-':
row['Amount'] = amount[1:]
results.append(row)
# Sort by transaction date and amount (ascending).
results = sorted(results, key=lambda x:\
# Sort by year.
(x['Trans Date'][-4:],\
# Sort by month/day.
x['Trans Date'][:-4],\
# Sort by amount.
float(x['Amount'])))
return results
def write_csv(csv_data, out_file):
""" Write CSV data (rows of dicts) to an output file.
"""
if len(csv_data) <= 0:
print "No data to write."
return
print "Writing %s" % out_file
with open(out_file, 'w') as csvfile:
fieldnames = csv_data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in csv_data:
writer.writerow(row)
| 26.934426
| 69
| 0.561169
|
aa2cd3da2e69287cd5685b8bc4c3a1b05327fed2
| 908
|
py
|
Python
|
setup.py
|
goes-funky/tap-google-analytics
|
13e3fc81ad96dcf165eb18ef65b60c9b072c9360
|
[
"MIT"
] | null | null | null |
setup.py
|
goes-funky/tap-google-analytics
|
13e3fc81ad96dcf165eb18ef65b60c9b072c9360
|
[
"MIT"
] | null | null | null |
setup.py
|
goes-funky/tap-google-analytics
|
13e3fc81ad96dcf165eb18ef65b60c9b072c9360
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
setup(
name="tap-google-analytics",
version="0.1.1",
description="Singer.io tap for extracting data from the Google Analytics Reporting API",
author='Meltano Team & Contributors',
author_email="meltano@gitlab.com",
url="https://gitlab.com/meltano/tap-google-analytics",
classifiers=["Programming Language :: Python :: 3 :: Only"],
py_modules=["tap_google_analytics"],
install_requires=[
"singer-python==5.6.1",
"google-api-python-client==1.7.9",
"oauth2client==4.1.3",
"backoff==1.3.2"
],
entry_points="""
[console_scripts]
tap-google-analytics=tap_google_analytics:main
""",
packages=["tap_google_analytics"],
package_data = {
'tap_google_analytics/defaults': [
"default_report_definition.json",
],
},
include_package_data=True,
)
| 29.290323
| 92
| 0.64978
|
9d4686fd7bfd962414e0686df3ff28f8967303a7
| 30,014
|
py
|
Python
|
b2share/modules/records/views.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | null | null | null |
b2share/modules/records/views.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | null | null | null |
b2share/modules/records/views.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | 1
|
2020-09-29T10:56:03.000Z
|
2020-09-29T10:56:03.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import uuid
import re
from functools import partial, wraps
from sqlalchemy import and_
from sqlalchemy.orm import aliased
from flask import Blueprint, abort, request, url_for, make_response
from flask import jsonify, Flask, current_app
from flask_mail import Message
from jsonschema.exceptions import ValidationError
from invenio_db import db
from invenio_pidstore import current_pidstore
from invenio_pidstore.resolver import Resolver
from invenio_pidstore.errors import PIDDoesNotExistError, PIDRedirectedError
from invenio_pidstore.models import PersistentIdentifier
from invenio_pidrelations.contrib.versioning import PIDNodeVersioning
from invenio_pidrelations.models import PIDRelation
from invenio_records_files.api import Record
from invenio_rest.errors import RESTValidationError
from invenio_search import RecordsSearch
from invenio_records.models import RecordMetadata
from invenio_records_files.api import RecordsBuckets
from invenio_records_rest.views import (pass_record,
RecordsListResource, RecordResource,
RecordsListOptionsResource,
SuggestResource)
from invenio_records_rest.links import default_links_factory
from invenio_records_rest.query import default_search_factory
from invenio_records_rest.utils import obj_or_import_string
from invenio_mail import InvenioMail
from invenio_mail.tasks import send_email
from invenio_rest import ContentNegotiatedMethodView
from invenio_accounts.models import User
from .providers import RecordUUIDProvider
from .permissions import DeleteRecordPermission
from .proxies import current_records_rest
# duplicated from invenio-records-rest because we need
# to pass the previous version record data
def verify_record_permission(permission_factory, record, **kwargs):
"""Check that the current user has the required permissions on record.
In case the permission check fails, an Flask abort is launched.
If the user was previously logged-in, a HTTP error 403 is returned.
Otherwise, is returned a HTTP error 401.
:param permission_factory: permission factory used to check permissions.
:param record: record whose access is limited.
"""
# Note, cannot be done in one line due overloading of boolean
# operations permission object.
if not permission_factory(record=record, **kwargs).can():
from flask_login import current_user
if not current_user.is_authenticated:
abort(401)
abort(403)
"""Create Invenio-Records-REST blueprint."""
blueprint = Blueprint(
'b2share_records_rest',
__name__,
url_prefix='',
)
def create_blueprint(endpoints):
for endpoint, options in (endpoints or {}).items():
#print("Endpoint: {}".format(endpoint))
#print("- options: {}".format(options))
for rule in create_url_rules(endpoint, **options):
#print("- rule: {}".format(rule))
blueprint.add_url_rule(**rule)
# catch record validation errors
@blueprint.errorhandler(ValidationError)
def validation_error(error):
"""Catch validation errors."""
return RESTValidationError().get_response()
return blueprint
def create_url_rules(endpoint, list_route=None, item_route=None,
pid_type=None, pid_minter=None, pid_fetcher=None,
read_permission_factory_imp=None,
create_permission_factory_imp=None,
update_permission_factory_imp=None,
delete_permission_factory_imp=None,
record_class=None,
record_serializers=None,
record_loaders=None,
search_class=None,
search_serializers=None,
search_index=None, search_type=None,
default_media_type=None,
max_result_window=None, use_options_view=True,
search_factory_imp=None, links_factory_imp=None,
suggesters=None):
"""Create Werkzeug URL rules.
:param endpoint: Name of endpoint.
:param list_route: record listing URL route . Required.
:param item_route: record URL route (must include ``<pid_value>`` pattern).
Required.
:param pid_type: Persistent identifier type for endpoint. Required.
:param template: Template to render. Defaults to
``invenio_records_ui/detail.html``.
:param read_permission_factory_imp: Import path to factory that creates a
read permission object for a given record.
:param create_permission_factory_imp: Import path to factory that creates a
create permission object for a given record.
:param update_permission_factory_imp: Import path to factory that creates a
update permission object for a given record.
:param delete_permission_factory_imp: Import path to factory that creates a
delete permission object for a given record.
:param search_index: Name of the search index used when searching records.
:param search_type: Name of the search type used when searching records.
:param record_class: Name of the record API class.
:param record_serializers: serializers used for records.
:param search_serializers: serializers used for search results.
:param default_media_type: default media type for both records and search.
:param max_result_window: maximum number of results that Elasticsearch can
provide for the given search index without use of scroll. This value
should correspond to Elasticsearch ``index.max_result_window`` value
for the index.
:param use_options_view: Determines if a special option view should be
installed.
:returns: a list of dictionaries with can each be passed as keywords
arguments to ``Blueprint.add_url_rule``.
"""
read_permission_factory = obj_or_import_string(
read_permission_factory_imp
)
create_permission_factory = obj_or_import_string(
create_permission_factory_imp
)
update_permission_factory = obj_or_import_string(
update_permission_factory_imp
)
delete_permission_factory = obj_or_import_string(
delete_permission_factory_imp
)
links_factory = obj_or_import_string(
links_factory_imp, default=default_links_factory
)
record_class = obj_or_import_string(
record_class, default=Record
)
search_class = obj_or_import_string(
search_class, default=RecordsSearch
)
search_class_kwargs = {}
if search_index:
search_class_kwargs['index'] = search_index
else:
search_index = search_class.Meta.index
if search_type:
search_class_kwargs['doc_type'] = search_type
else:
search_type = search_class.Meta.doc_types
if search_class_kwargs:
search_class = partial(search_class, **search_class_kwargs)
if record_loaders:
record_loaders = {mime: obj_or_import_string(func)
for mime, func in record_loaders.items()}
record_serializers = {mime: obj_or_import_string(func)
for mime, func in record_serializers.items()}
search_serializers = {mime: obj_or_import_string(func)
for mime, func in search_serializers.items()}
resolver = Resolver(pid_type=pid_type, object_type='rec',
getter=partial(record_class.get_record,
with_deleted=True))
# import deposit here in order to avoid dependency loop
from b2share.modules.deposit.api import Deposit
from b2share.modules.deposit.serializers import json_v1_response as deposit_serializer
list_view = B2ShareRecordsListResource.as_view(
RecordsListResource.view_name.format(endpoint),
resolver=resolver,
minter_name=pid_minter,
pid_type=pid_type,
pid_fetcher=pid_fetcher,
read_permission_factory=read_permission_factory,
create_permission_factory=create_permission_factory,
# replace the record serializer with deposit serializer as it
# is used only when the deposit is created.
record_serializers={
'application/json': deposit_serializer
},
record_loaders=record_loaders,
search_serializers=search_serializers,
search_class=search_class,
default_media_type=default_media_type,
max_result_window=max_result_window,
search_factory=(obj_or_import_string(
search_factory_imp, default=default_search_factory
)),
item_links_factory=links_factory,
record_class=Deposit,
)
item_view = B2ShareRecordResource.as_view(
B2ShareRecordResource.view_name.format(endpoint),
resolver=resolver,
read_permission_factory=read_permission_factory,
update_permission_factory=update_permission_factory,
delete_permission_factory=delete_permission_factory,
serializers=record_serializers,
loaders=record_loaders,
search_class=search_class,
links_factory=links_factory,
default_media_type=default_media_type)
versions_view = RecordsVersionsResource.as_view(
RecordsVersionsResource.view_name.format(endpoint),
resolver=resolver)
abuse_view = RecordsAbuseResource.as_view(
RecordsAbuseResource.view_name.format(endpoint),
resolver=resolver)
access_view = RequestAccessResource.as_view(
RequestAccessResource.view_name.format(endpoint),
resolver=resolver)
views = [
dict(rule=list_route, view_func=list_view),
dict(rule=item_route, view_func=item_view),
dict(rule=item_route + '/abuse', view_func=abuse_view),
dict(rule=item_route + '/accessrequests', view_func=access_view),
# Special case for versioning as the parent PID is redirected.
dict(rule='/api/records/<pid_value>/versions', view_func=versions_view),
]
if suggesters:
suggest_view = SuggestResource.as_view(
SuggestResource.view_name.format(endpoint),
suggesters=suggesters,
search_class=search_class,
)
views.append(dict(
rule=list_route + '_suggest',
view_func=suggest_view
))
if use_options_view:
options_view = RecordsListOptionsResource.as_view(
RecordsListOptionsResource.view_name.format(endpoint),
search_index=search_index,
max_result_window=max_result_window,
default_media_type=default_media_type,
search_media_types=search_serializers.keys(),
item_media_types=record_serializers.keys(),
)
return [
dict(rule="{0}_options".format(list_route), view_func=options_view)
] + views
return views
class MyContentNegotiatedMethodView(ContentNegotiatedMethodView):
"""MethodView with content negotiation.
Dispatch HTTP requests as MethodView does and build responses using the
registered serializers. It chooses the right serializer using the request's
accept type. It also provides a helper method for handling ETags.
"""
def __init__(self, serializers=None, method_serializers=None,
serializers_query_aliases=None, default_media_type=None,
default_method_media_type=None, *args, **kwargs):
"""Register the serializing functions.
Serializing functions will receive all named and non named arguments
provided to ``make_response`` or returned by request handling methods.
Recommended prototype is: ``serializer(data, code=200, headers=None)``
and it should return :class:`flask.Response` instances.
Serializing functions can also be overridden by setting
``self.serializers``.
:param serializers: A mapping from mediatype to a serializer function.
:param method_serializers: A mapping of HTTP method name (GET, PUT,
PATCH, POST, DELETE) -> dict(mediatype -> serializer function). If
set, it overrides the serializers dict.
:param serializers_query_aliases: A mapping of values of the defined
query arg (see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid
mimetypes: dict(alias -> mimetype).
:param default_media_type: Default media type used if no accept type
has been provided and global serializers are used for the request.
Can be None if there is only one global serializer or None. This
media type is used for method serializers too if
``default_method_media_type`` is not set.
:param default_method_media_type: Default media type used if no accept
type has been provided and a specific method serializers are used
for the request. Can be ``None`` if the method has only one
serializer or ``None``.
"""
super(MyContentNegotiatedMethodView, self).__init__()
self.serializers = serializers or None
self.default_media_type = default_media_type
self.default_method_media_type = default_method_media_type or {}
# set default default media_types if none has been given
if self.serializers and not self.default_media_type:
if len(self.serializers) == 1:
self.default_media_type = next(iter(self.serializers.keys()))
elif len(self.serializers) > 1:
raise ValueError('Multiple serializers with no default media'
' type')
# set method serializers
self.method_serializers = ({key.upper(): func for key, func in
method_serializers.items()} if
method_serializers else {})
# set serializer aliases
self.serializers_query_aliases = serializers_query_aliases or {}
# create default method media_types dict if none has been given
if self.method_serializers and not self.default_method_media_type:
self.default_method_media_type = {}
for http_method, meth_serial in self.method_serializers.items():
if len(self.method_serializers[http_method]) == 1:
self.default_method_media_type[http_method] = \
next(iter(self.method_serializers[http_method].keys()))
elif len(self.method_serializers[http_method]) > 1:
# try to use global default media type
if default_media_type in \
self.method_serializers[http_method]:
self.default_method_media_type[http_method] = \
default_media_type
else:
raise ValueError('Multiple serializers for method {0}'
'with no default media type'.format(
http_method))
class B2ShareRecordsListResource(MyContentNegotiatedMethodView):
"""Resource for records listing."""
view_name = '{0}_list'
def __init__(self, minter_name=None, pid_type=None,
pid_fetcher=None, read_permission_factory=None,
create_permission_factory=None,
list_permission_factory=None,
search_class=None,
record_serializers=None,
record_loaders=None,
search_serializers=None, default_media_type=None,
max_result_window=None, search_factory=None,
item_links_factory=None, record_class=None,
indexer_class=None, **kwargs):
"""Constructor."""
super(B2ShareRecordsListResource, self).__init__(
method_serializers={
'GET': search_serializers,
'POST': record_serializers,
},
default_method_media_type={
'GET': default_media_type,
'POST': default_media_type,
},
default_media_type=default_media_type,
**kwargs)
self.pid_type = pid_type
self.minter = current_pidstore.minters[minter_name]
self.pid_fetcher = current_pidstore.fetchers[pid_fetcher]
self.read_permission_factory = read_permission_factory
self.create_permission_factory = create_permission_factory or \
current_records_rest.create_permission_factory
self.list_permission_factory = list_permission_factory or \
current_records_rest.list_permission_factory
self.search_class = search_class
self.max_result_window = max_result_window or 10000
self.search_factory = partial(search_factory, self)
self.item_links_factory = item_links_factory
self.loaders = record_loaders or \
current_records_rest.loaders
self.record_class = record_class or Record
self.indexer_class = indexer_class
# @need_record_permission('list_permission_factory')
# @use_paginate_args(
# default_size=lambda self: current_app.config.get(
# 'RECORDS_REST_DEFAULT_RESULTS_SIZE', 10),
# max_results=lambda self: self.max_result_window,
# )
def get(self, pagination=None, **kwargs):
"""Search records.
Permissions: the `list_permission_factory` permissions are
checked.
:returns: Search result containing hits and aggregations as
returned by invenio-search.
"""
# Arguments that must be added in prev/next links
return self.make_response(
pid_fetcher=self.pid_fetcher,
search_result = kwargs
)
def post(self, **kwargs):
"""Create a record.
:returns: The created record.
"""
# import deposit dependencies here in order to avoid recursive imports
from b2share.modules.deposit.links import deposit_links_factory
from b2share.modules.deposit.api import copy_data_from_previous
from b2share.modules.deposit.errors import RecordNotFoundVersioningError, IncorrectRecordVersioningError
from b2share.modules.records.api import B2ShareRecord
if request.content_type not in self.loaders:
abort(415)
version_of = request.args.get('version_of')
previous_record = None
data = None
if version_of:
try:
_, previous_record = Resolver(
pid_type='b2rec',
object_type='rec',
getter=B2ShareRecord.get_record,
).resolve(version_of)
# if the pid doesn't exist
except PIDDoesNotExistError as e:
raise RecordNotFoundVersioningError()
# if it is the parent pid
except PIDRedirectedError as e:
raise IncorrectRecordVersioningError(version_of)
# Copy the metadata from a previous version if this version is
# specified and no data was provided.
if request.content_length == 0:
data = copy_data_from_previous(previous_record.model.json)
if data is None:
data = self.loaders[request.content_type]()
if data is None:
abort(400)
# Check permissions
permission_factory = self.create_permission_factory
if permission_factory:
verify_record_permission(permission_factory, data,
previous_record=previous_record)
# Create uuid for record
record_uuid = uuid.uuid4()
# Create persistent identifier
pid = self.minter(record_uuid, data=data)
# Create record
record = self.record_class.create(data, id_=record_uuid,
version_of=version_of)
db.session.commit()
response = self.make_response(
pid, record, 201, links_factory=deposit_links_factory)
# Add location headers
endpoint = 'b2share_deposit_rest.{0}_item'.format(pid.pid_type)
location = url_for(endpoint, pid_value=pid.pid_value, _external=True)
response.headers.extend(dict(location=location))
return response
class B2ShareRecordResource(RecordResource):
"""B2Share resource for records."""
def put(*args, **kwargs):
"""Disable PUT."""
abort(405)
@pass_record
def delete(self, pid, record, *args, **kwargs):
"""Delete a record."""
self.check_etag(str(record.model.version_id))
pid_value = request.view_args['pid_value']
pid, record = pid_value.data
# Check permissions.
permission_factory = self.delete_permission_factory
if permission_factory:
verify_record_permission(permission_factory, record)
record.delete()
db.session.commit()
return '', 204
class RecordsVersionsResource(ContentNegotiatedMethodView):
view_name = '{0}_versions'
def __init__(self, resolver=None, **kwargs):
"""Constructor.
:param resolver: Persistent identifier resolver instance.
"""
default_media_type = 'application/json'
super(RecordsVersionsResource, self).__init__(
serializers={
'application/json': lambda response: jsonify(response)
},
default_method_media_type={
'GET': default_media_type,
},
default_media_type=default_media_type,
**kwargs)
self.resolver = resolver
def get(self, pid=None, **kwargs):
"""GET a list of record's versions."""
record_endpoint = 'b2share_records_rest.{0}_item'.format(
RecordUUIDProvider.pid_type)
pid_value = request.view_args['pid_value']
pid = RecordUUIDProvider.get(pid_value).pid
pid_versioning = PIDNodeVersioning(child=pid)
if pid_versioning.is_child:
# This is a record PID. Retrieve the parent versioning PID.
version_parent_pid_value = pid_versioning.parent.pid_value
else:
# This is a parent versioning PID
version_parent_pid_value = pid_value
records = []
child_pid_table = aliased(PersistentIdentifier)
parent_pid_table = aliased(PersistentIdentifier)
pids_and_meta = db.session.query(
child_pid_table, RecordMetadata
).join(
PIDRelation,
PIDRelation.child_id == child_pid_table.id,
).join(
parent_pid_table,
PIDRelation.parent_id == parent_pid_table.id
).filter(
parent_pid_table.pid_value == version_parent_pid_value,
RecordMetadata.id == child_pid_table.object_uuid,
).order_by(RecordMetadata.created).all()
for version_number, rec_pid_and_rec_meta in enumerate(pids_and_meta):
rec_pid, rec_meta = rec_pid_and_rec_meta
records.append({
'version': version_number + 1,
'id': str(rec_pid.pid_value),
'url': url_for(record_endpoint,
pid_value=str(rec_pid.pid_value),
_external=True),
'created': rec_meta.created,
'updated': rec_meta.updated,
})
return {'versions': records}
class RecordsAbuseResource(ContentNegotiatedMethodView):
view_name = '{0}_abuse'
def __init__(self, resolver=None, **kwargs):
"""Constructor.
:param resolver: Persistent identifier resolver instance.
"""
default_media_type = 'application/json'
super(RecordsAbuseResource, self).__init__(
serializers={
'application/json': lambda response: jsonify(response)
},
default_method_media_type={
'POST': default_media_type,
},
default_media_type=default_media_type,
**kwargs)
self.resolver = resolver
def post(self, **kwargs):
for v in ['abusecontent', 'message', 'email', 'copyright', 'zipcode',
'phone', 'illegalcontent', 'city', 'noresearch', 'name',
'affiliation', 'address', 'country']:
if v not in request.json:
response = jsonify({'Error': '{} is required'.format(v)})
response.status_code = 400
return response
reason_list = ['noresearch', 'abusecontent', 'copyright', 'illegalcontent']
count = 0
for ii in reason_list:
if request.json[ii]:
count += 1
if count != 1:
response = jsonify({
'Error': 'From \'noresearch\', \'abusecontent\', \'copyright\','
' \'illegalcontent\' (only) one should be True'
})
response.status_code = 400
return response
friendly = {'abusecontent': 'Abuse or Inappropriate content',
'copyright': 'Copyrighted material',
'noresearch': 'No research data',
'illegalcontent': 'Illegal content'}
reason = [friendly[ii] for ii in reason_list if request.json[ii]][0]
msg_content = """
We have received new abuse report!
Link: """ + re.sub(r'/abuse\?$', '', request.full_path) + """
Subject: " Abuse Report for a Record "
Reason: """ + reason + """
Message: """ + str(request.json['message']) + """
Full Name: """ + str(request.json['name']) + """
Affiliation: """ + str(request.json['affiliation']) + """
Email: """ + str(request.json['email']) + """
Address: """ + str(request.json['address']) + """
City: """ + str(request.json['city']) + """
Country: """ + str(request.json['country']) + """
Postal Code: """ + str(request.json['zipcode']) + """
Phone: """ + str(request.json['phone']) + """
"""
support = str(current_app.config.get('SUPPORT_EMAIL'))
send_email(dict(
subject="Abuse Report for a Record",
sender=str(request.json['email']),
recipients=[support],
body=msg_content,
))
return self.make_response({
'message':'The record is reported.'
})
class RequestAccessResource(ContentNegotiatedMethodView):
view_name = '{0}_accessrequests'
def __init__(self, resolver=None, **kwargs):
"""Constructor.
:param resolver: Persistent identifier resolver instance.
"""
default_media_type = 'application/json'
super(RequestAccessResource, self).__init__(
serializers={
'application/json': lambda response: jsonify(response)
},
default_method_media_type={
'POST': default_media_type,
},
default_media_type=default_media_type,
**kwargs)
self.resolver = resolver
@pass_record
def post(self, pid, record, **kwargs):
for v in ['message', 'email', 'zipcode', 'phone', 'city', 'name',
'affiliation', 'address', 'country']:
if v not in request.json:
response = jsonify({'Error': v + ' is required'})
response.status_code = 400
return response
msg_content = """
You have a request for your data!
Link: """ + re.sub(r'/abuserecords\?$', '', request.full_path) + """
Subject: " Request Access to Data Files "
Message: """ + str(request.json['message']) + """
Full Name: """ + str(request.json['name']) + """
Affiliation: """ + str(request.json['affiliation']) + """
Email: """ + str(request.json['email']) + """
Address: """ + str(request.json['address']) + """
City: """ + str(request.json['city']) + """
Country: """ + str(request.json['country']) + """
Postal Code: """ + str(request.json['zipcode']) + """
Phone: """ + str(request.json['phone']) + """
"""
if 'contact_email' in record:
recipients = [record['contact_email']]
else:
owners = User.query.filter(
User.id.in_(record['_deposit']['owners'])).all()
recipients = [owner.email for owner in owners]
send_email(dict(
subject="Request Access to Data Files",
sender=str(request.json['email']),
recipients=recipients,
body=msg_content,
))
return self.make_response({
'message': 'An email was sent to the record owner.'
})
| 41.455801
| 112
| 0.639735
|
23357343c9c6501b46aa6e8b005c3e39c6eed43f
| 15,796
|
py
|
Python
|
python/plugins/processing/algs/qgis/ServiceAreaFromPoint.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/processing/algs/qgis/ServiceAreaFromPoint.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/processing/algs/qgis/ServiceAreaFromPoint.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | 1
|
2021-12-25T08:40:30.000Z
|
2021-12-25T08:40:30.000Z
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ServiceAreaFromPoint.py
---------------------
Date : December 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
from collections import OrderedDict
from qgis.PyQt.QtCore import QVariant, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsWkbTypes,
QgsUnitTypes,
QgsFeature,
QgsFeatureSink,
QgsGeometry,
QgsGeometryUtils,
QgsFields,
QgsField,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterBoolean,
QgsProcessingParameterDistance,
QgsProcessingParameterEnum,
QgsProcessingParameterPoint,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterDefinition)
from qgis.analysis import (QgsVectorLayerDirector,
QgsNetworkDistanceStrategy,
QgsNetworkSpeedStrategy,
QgsGraphBuilder,
QgsGraphAnalyzer
)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ServiceAreaFromPoint(QgisAlgorithm):
INPUT = 'INPUT'
START_POINT = 'START_POINT'
STRATEGY = 'STRATEGY'
TRAVEL_COST = 'TRAVEL_COST'
DIRECTION_FIELD = 'DIRECTION_FIELD'
VALUE_FORWARD = 'VALUE_FORWARD'
VALUE_BACKWARD = 'VALUE_BACKWARD'
VALUE_BOTH = 'VALUE_BOTH'
DEFAULT_DIRECTION = 'DEFAULT_DIRECTION'
SPEED_FIELD = 'SPEED_FIELD'
DEFAULT_SPEED = 'DEFAULT_SPEED'
TOLERANCE = 'TOLERANCE'
INCLUDE_BOUNDS = 'INCLUDE_BOUNDS'
OUTPUT = 'OUTPUT'
OUTPUT_LINES = 'OUTPUT_LINES'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'networkanalysis.svg'))
def group(self):
return self.tr('Network analysis')
def groupId(self):
return 'networkanalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.DIRECTIONS = OrderedDict([
(self.tr('Forward direction'), QgsVectorLayerDirector.DirectionForward),
(self.tr('Backward direction'), QgsVectorLayerDirector.DirectionBackward),
(self.tr('Both directions'), QgsVectorLayerDirector.DirectionBoth)])
self.STRATEGIES = [self.tr('Shortest'),
self.tr('Fastest')
]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Vector layer representing network'),
[QgsProcessing.TypeVectorLine]))
self.addParameter(QgsProcessingParameterPoint(self.START_POINT,
self.tr('Start point')))
self.addParameter(QgsProcessingParameterEnum(self.STRATEGY,
self.tr('Path type to calculate'),
self.STRATEGIES,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.TRAVEL_COST,
self.tr('Travel cost (distance for "Shortest", time for "Fastest")'),
QgsProcessingParameterNumber.Double,
0.0, False, 0))
params = []
params.append(QgsProcessingParameterField(self.DIRECTION_FIELD,
self.tr('Direction field'),
None,
self.INPUT,
optional=True))
params.append(QgsProcessingParameterString(self.VALUE_FORWARD,
self.tr('Value for forward direction'),
optional=True))
params.append(QgsProcessingParameterString(self.VALUE_BACKWARD,
self.tr('Value for backward direction'),
optional=True))
params.append(QgsProcessingParameterString(self.VALUE_BOTH,
self.tr('Value for both directions'),
optional=True))
params.append(QgsProcessingParameterEnum(self.DEFAULT_DIRECTION,
self.tr('Default direction'),
list(self.DIRECTIONS.keys()),
defaultValue=2))
params.append(QgsProcessingParameterField(self.SPEED_FIELD,
self.tr('Speed field'),
None,
self.INPUT,
optional=True))
params.append(QgsProcessingParameterNumber(self.DEFAULT_SPEED,
self.tr('Default speed (km/h)'),
QgsProcessingParameterNumber.Double,
5.0, False, 0))
params.append(QgsProcessingParameterDistance(self.TOLERANCE,
self.tr('Topology tolerance'),
0.0, self.INPUT, False, 0))
params.append(QgsProcessingParameterBoolean(self.INCLUDE_BOUNDS,
self.tr('Include upper/lower bound points'),
defaultValue=False))
for p in params:
p.setFlags(p.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(p)
lines_output = QgsProcessingParameterFeatureSink(self.OUTPUT_LINES,
self.tr('Service area (lines)'),
QgsProcessing.TypeVectorLine, optional=True)
lines_output.setCreateByDefault(True)
self.addParameter(lines_output)
nodes_output = QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Service area (boundary nodes)'),
QgsProcessing.TypeVectorPoint, optional=True)
nodes_output.setCreateByDefault(False)
self.addParameter(nodes_output)
def name(self):
return 'serviceareafrompoint'
def displayName(self):
return self.tr('Service area (from point)')
def processAlgorithm(self, parameters, context, feedback):
network = self.parameterAsSource(parameters, self.INPUT, context)
if network is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
startPoint = self.parameterAsPoint(parameters, self.START_POINT, context, network.sourceCrs())
strategy = self.parameterAsEnum(parameters, self.STRATEGY, context)
travelCost = self.parameterAsDouble(parameters, self.TRAVEL_COST, context)
directionFieldName = self.parameterAsString(parameters, self.DIRECTION_FIELD, context)
forwardValue = self.parameterAsString(parameters, self.VALUE_FORWARD, context)
backwardValue = self.parameterAsString(parameters, self.VALUE_BACKWARD, context)
bothValue = self.parameterAsString(parameters, self.VALUE_BOTH, context)
defaultDirection = self.parameterAsEnum(parameters, self.DEFAULT_DIRECTION, context)
speedFieldName = self.parameterAsString(parameters, self.SPEED_FIELD, context)
defaultSpeed = self.parameterAsDouble(parameters, self.DEFAULT_SPEED, context)
tolerance = self.parameterAsDouble(parameters, self.TOLERANCE, context)
include_bounds = True # default to true to maintain 3.0 API
if self.INCLUDE_BOUNDS in parameters:
include_bounds = self.parameterAsBoolean(parameters, self.INCLUDE_BOUNDS, context)
directionField = -1
if directionFieldName:
directionField = network.fields().lookupField(directionFieldName)
speedField = -1
if speedFieldName:
speedField = network.fields().lookupField(speedFieldName)
director = QgsVectorLayerDirector(network,
directionField,
forwardValue,
backwardValue,
bothValue,
defaultDirection)
distUnit = context.project().crs().mapUnits()
multiplier = QgsUnitTypes.fromUnitToUnitFactor(distUnit, QgsUnitTypes.DistanceMeters)
if strategy == 0:
strategy = QgsNetworkDistanceStrategy()
else:
strategy = QgsNetworkSpeedStrategy(speedField,
defaultSpeed,
multiplier * 1000.0 / 3600.0)
director.addStrategy(strategy)
builder = QgsGraphBuilder(network.sourceCrs(),
True,
tolerance)
feedback.pushInfo(QCoreApplication.translate('ServiceAreaFromPoint', 'Building graph…'))
snappedPoints = director.makeGraph(builder, [startPoint], feedback)
feedback.pushInfo(QCoreApplication.translate('ServiceAreaFromPoint', 'Calculating service area…'))
graph = builder.graph()
idxStart = graph.findVertex(snappedPoints[0])
tree, cost = QgsGraphAnalyzer.dijkstra(graph, idxStart, 0)
vertices = set()
points = []
lines = []
for vertex, start_vertex_cost in enumerate(cost):
inbound_edge_index = tree[vertex]
if inbound_edge_index == -1 and vertex != idxStart:
# unreachable vertex
continue
if start_vertex_cost > travelCost:
# vertex is too expensive, discard
continue
vertices.add(vertex)
start_point = graph.vertex(vertex).point()
# find all edges coming from this vertex
for edge_id in graph.vertex(vertex).outgoingEdges():
edge = graph.edge(edge_id)
end_vertex_cost = start_vertex_cost + edge.cost(0)
end_point = graph.vertex(edge.toVertex()).point()
if end_vertex_cost <= travelCost:
# end vertex is cheap enough to include
vertices.add(edge.toVertex())
lines.append([start_point, end_point])
else:
# travelCost sits somewhere on this edge, interpolate position
interpolated_end_point = QgsGeometryUtils.interpolatePointOnLineByValue(start_point.x(), start_point.y(), start_vertex_cost,
end_point.x(), end_point.y(), end_vertex_cost, travelCost)
points.append(interpolated_end_point)
lines.append([start_point, interpolated_end_point])
for i in vertices:
points.append(graph.vertex(i).point())
feedback.pushInfo(QCoreApplication.translate('ServiceAreaFromPoint', 'Writing results…'))
fields = QgsFields()
fields.append(QgsField('type', QVariant.String, '', 254, 0))
fields.append(QgsField('start', QVariant.String, '', 254, 0))
feat = QgsFeature()
feat.setFields(fields)
(point_sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.MultiPoint, network.sourceCrs())
results = {}
if point_sink is not None:
results[self.OUTPUT] = dest_id
geomPoints = QgsGeometry.fromMultiPointXY(points)
feat.setGeometry(geomPoints)
feat['type'] = 'within'
feat['start'] = startPoint.toString()
point_sink.addFeature(feat, QgsFeatureSink.FastInsert)
if include_bounds:
upperBoundary = []
lowerBoundary = []
vertices = []
for i, v in enumerate(cost):
if v > travelCost and tree[i] != -1:
vertexId = graph.edge(tree[i]).fromVertex()
if cost[vertexId] <= travelCost:
vertices.append(i)
for i in vertices:
upperBoundary.append(graph.vertex(graph.edge(tree[i]).toVertex()).point())
lowerBoundary.append(graph.vertex(graph.edge(tree[i]).fromVertex()).point())
geomUpper = QgsGeometry.fromMultiPointXY(upperBoundary)
geomLower = QgsGeometry.fromMultiPointXY(lowerBoundary)
feat.setGeometry(geomUpper)
feat['type'] = 'upper'
feat['start'] = startPoint.toString()
point_sink.addFeature(feat, QgsFeatureSink.FastInsert)
feat.setGeometry(geomLower)
feat['type'] = 'lower'
feat['start'] = startPoint.toString()
point_sink.addFeature(feat, QgsFeatureSink.FastInsert)
(line_sink, line_dest_id) = self.parameterAsSink(parameters, self.OUTPUT_LINES, context,
fields, QgsWkbTypes.MultiLineString, network.sourceCrs())
if line_sink is not None:
results[self.OUTPUT_LINES] = line_dest_id
geom_lines = QgsGeometry.fromMultiPolylineXY(lines)
feat.setGeometry(geom_lines)
feat['type'] = 'lines'
feat['start'] = startPoint.toString()
line_sink.addFeature(feat, QgsFeatureSink.FastInsert)
return results
| 47.722054
| 150
| 0.528488
|
af3f75b6a610a014a8b5a89ddce61a0c346c78c6
| 5,331
|
py
|
Python
|
research/delf/delf/python/feature_extractor_test.py
|
Dzinushi/models_1_4
|
d7e72793a68c1667d403b1542c205d1cd9b1d17c
|
[
"Apache-2.0"
] | null | null | null |
research/delf/delf/python/feature_extractor_test.py
|
Dzinushi/models_1_4
|
d7e72793a68c1667d403b1542c205d1cd9b1d17c
|
[
"Apache-2.0"
] | null | null | null |
research/delf/delf/python/feature_extractor_test.py
|
Dzinushi/models_1_4
|
d7e72793a68c1667d403b1542c205d1cd9b1d17c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from delf import feature_extractor
import numpy as np
import tensorflow as tf
class FeatureExtractorTest(tf.test.TestCase):
def testNormalizePixelValues(self):
image = tf.constant(
[[[3, 255, 0], [34, 12, 5]], [[45, 5, 65], [56, 77, 89]]],
dtype=tf.uint8)
normalized_image = feature_extractor.NormalizePixelValues(
image, pixel_value_offset=5.0, pixel_value_scale=2.0)
exp_normalized_image = [[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]],
[[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]]
with self.test_session() as sess:
normalized_image_out = sess.run(normalized_image)
self.assertAllEqual(normalized_image_out, exp_normalized_image)
def testCalculateReceptiveBoxes(self):
boxes = feature_extractor.CalculateReceptiveBoxes(
height=1, width=2, rf=291, stride=32, padding=145)
exp_boxes = [[-145., -145., 145., 145.], [-145., -113., 145., 177.]]
with self.test_session() as sess:
boxes_out = sess.run(boxes)
self.assertAllEqual(exp_boxes, boxes_out)
def testCalculateKeypointCenters(self):
boxes = [[-10.0, 0.0, 11.0, 21.0], [-2.5, 5.0, 18.5, 26.0],
[45.0, -2.5, 66.0, 18.5]]
centers = feature_extractor.CalculateKeypointCenters(boxes)
with self.test_session() as sess:
centers_out = sess.run(centers)
exp_centers = [[0.5, 10.5], [8.0, 15.5], [55.5, 8.0]]
self.assertAllEqual(exp_centers, centers_out)
def testExtractKeypointDescriptor(self):
image = tf.constant(
[[[0, 255, 255], [128, 64, 196]], [[0, 0, 32], [32, 128, 16]]],
dtype=tf.uint8)
# Arbitrary model function used to test ExtractKeypointDescriptor. The
# generated feature_map is a replicated version of the image, concatenated
# with zeros to achieve the required dimensionality. The attention is simply
# the norm of the input image pixels.
def _test_model_fn(image, normalized_image, reuse):
del normalized_image, reuse # Unused variables in the test.
image_shape = tf.shape(image)
attention = tf.squeeze(tf.norm(image, axis=3))
feature_map = tf.concat(
[
tf.tile(image, [1, 1, 1, 341]),
tf.zeros([1, image_shape[1], image_shape[2], 1])
],
axis=3)
return attention, feature_map
boxes, feature_scales, features, scores = feature_extractor.ExtractKeypointDescriptor(
image,
layer_name='resnet_v1_50/block3',
image_scales=tf.constant([1.0]),
iou=1.0,
max_feature_num=10,
abs_thres=1.5,
model_fn=_test_model_fn)
exp_boxes = [[-145.0, -145.0, 145.0, 145.0], [-113.0, -145.0, 177.0, 145.0]]
exp_feature_scales = [1.0, 1.0]
exp_features = np.array(
np.concatenate(
(np.tile([[-1.0, 127.0 / 128.0, 127.0 / 128.0], [-1.0, -1.0, -0.75]
], [1, 341]), np.zeros([2, 1])),
axis=1))
exp_scores = [[1.723042], [1.600781]]
with self.test_session() as sess:
boxes_out, feature_scales_out, features_out, scores_out = sess.run(
[boxes, feature_scales, features, scores])
self.assertAllEqual(exp_boxes, boxes_out)
self.assertAllEqual(exp_feature_scales, feature_scales_out)
self.assertAllClose(exp_features, features_out)
self.assertAllClose(exp_scores, scores_out)
def testPcaWhitening(self):
data = tf.constant([[1.0, 2.0, -2.0], [-5.0, 0.0, 3.0], [-1.0, 2.0, 0.0],
[0.0, 4.0, -1.0]])
pca_matrix = tf.constant([[2.0, 0.0, -1.0], [0.0, 1.0, 1.0],
[-1.0, 1.0, 3.0]])
pca_mean = tf.constant([1.0, 2.0, 3.0])
output_dim = 2
use_whitening = True
pca_variances = tf.constant([4.0, 1.0])
output = feature_extractor.ApplyPcaAndWhitening(
data, pca_matrix, pca_mean, output_dim, use_whitening, pca_variances)
exp_output = [[2.5, -5.0], [-6.0, -2.0], [-0.5, -3.0], [1.0, -2.0]]
with self.test_session() as sess:
output_out = sess.run(output)
self.assertAllEqual(exp_output, output_out)
if __name__ == '__main__':
tf.test.main()
| 40.082707
| 94
| 0.587882
|
09d359d0bf8170d1130254afed75670ea07789a7
| 2,646
|
py
|
Python
|
utils/makebad.py
|
emit-sds/emit-sds-l1b
|
be5307fe6821a043971becdd33609b4cf89b1974
|
[
"Apache-2.0"
] | null | null | null |
utils/makebad.py
|
emit-sds/emit-sds-l1b
|
be5307fe6821a043971becdd33609b4cf89b1974
|
[
"Apache-2.0"
] | null | null | null |
utils/makebad.py
|
emit-sds/emit-sds-l1b
|
be5307fe6821a043971becdd33609b4cf89b1974
|
[
"Apache-2.0"
] | null | null | null |
# David R Thompson
import argparse, sys, os
import numpy as np
import pylab as plt
from glob import glob
from spectral.io import envi
from scipy.stats import norm
from scipy.linalg import solve, inv
from astropy import modeling
from scipy.interpolate import interp1d
from sklearn.linear_model import RANSACRegressor
from sklearn.decomposition import PCA
from numpy import nanmedian
import json
from fpa import FPA
from lowess import lowess
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.split('.')[:-1])+'.hdr'):
return '.'.join(infile.split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def main():
description = "Calculate Bad Pixels from average frame"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('input')
parser.add_argument('--threshold',type=float,default=40)
parser.add_argument('--config',type=str,default=None)
parser.add_argument('output')
args = parser.parse_args()
fpa = FPA(args.config)
use = np.arange(fpa.first_illuminated_column,fpa.last_illuminated_column+1)
infile = envi.open(find_header(args.input))
frame = infile.load()
std = np.squeeze(frame[:,:,1])
frame = np.squeeze(frame[:,:,0])
rows, columns = frame.shape
mask = np.zeros((rows,columns))
frame = (frame.T - frame[:,use].mean(axis=1)).T
for col in use:
spectrum = frame[:,col]
spectrum[175:184] = interp1d([175,183],[spectrum[175],spectrum[183]])(np.arange(175,184))
chans = np.arange(rows)
sm = lowess(spectrum, chans, frac=0.2, return_sorted=False)
spectrum = spectrum - sm
#plt.plot(chans,spectrum)
bad = abs(spectrum)>args.threshold
#plt.plot(chans[bad],spectrum[bad],'ko')
#plt.show()
mask[bad,col] = 1
print(sum(bad),' bad pixels in column ',col)
bads = 0
bad_map = mask.copy()
bad_map = np.array(bad_map,dtype=np.int16)
for column in range(bad_map.shape[1]):
state_machine = 0
for row in range(bad_map.shape[0]):
if mask[row,column]:
state_machine = state_machine + 1
bad_map[row,column] = -state_machine
print(row,column,state_machine)
bads = bads + 1
else:
state_machine = 0
print('total bads:',bads)
bad_map = bad_map.reshape((rows,columns,1))
envi.save_image(args.output+'.hdr',
bad_map, interleave='bsq', ext='', force=True)
if __name__ == '__main__':
main()
| 30.068182
| 97
| 0.642857
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.