hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace4d03e48b885afbfe794e69e21982e942fcf47 | 25,988 | py | Python | qa/rpc-tests/test_framework/script.py | kazucoin/kazugold | 7b0e034b08eceacf907b18913be7043c9cdcf5f4 | [
"MIT"
] | 1 | 2019-06-02T17:21:29.000Z | 2019-06-02T17:21:29.000Z | qa/rpc-tests/test_framework/script.py | kazucoin/kazugold | 7b0e034b08eceacf907b18913be7043c9cdcf5f4 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/script.py | kazucoin/kazugold | 7b0e034b08eceacf907b18913be7043c9cdcf5f4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# script.py
#
# This file is modified from python-kazugoldlib.
#
"""Scripts
Functionality to build scripts, as well as SignatureHash().
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut())
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| 27.413502 | 146 | 0.61209 |
ace4d13ddaa759cfde3f5d18e0d2f93f74de0371 | 1,713 | py | Python | musikui.py | stakiran/musikui | ff5ea1d9b1a864cc82afe83e7e28f868af75e4d5 | [
"MIT"
] | null | null | null | musikui.py | stakiran/musikui | ff5ea1d9b1a864cc82afe83e7e28f868af75e4d5 | [
"MIT"
] | null | null | null | musikui.py | stakiran/musikui | ff5ea1d9b1a864cc82afe83e7e28f868af75e4d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import sys
def abort(msg):
print('Error!: {0}'.format(msg))
exit(1)
def file2list(filepath):
ret = []
with open(filepath, encoding='utf8', mode='r') as f:
ret = [line.rstrip('\n') for line in f.readlines()]
return ret
def list2file(filepath, ls):
with open(filepath, encoding='utf8', mode='w') as f:
f.writelines(['{:}\n'.format(line) for line in ls] )
def file2str(filepath):
ret = ''
with open(filepath, encoding='utf8', mode='r') as f:
ret = f.read()
return ret
def str2file(filepath, s):
with open(filepath, encoding='utf8', mode='w') as f:
f.write(s)
def musikui_string(s):
import re
pattern = r'([^\s#\-\[\]\(\)!`\+\|\.\,\:].){1}([^\s#\-\[\]\(\)!`\+\|\.\,\:].){1}'
after = r'@@\2'
new = re.sub(pattern, after, s)
return new
def test():
pass
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-i', '--input', default=None,
help='An input filename.')
parser.add_argument('-o', '--output', default=None,
help='An output filename.')
parser.add_argument('--test', default=False, action='store_true',
help='[DEBUG] Do unittest.')
args = parser.parse_args()
return args
def ____main____():
pass
if __name__ == "__main__":
args = parse_arguments()
if args.test:
test()
exit(0)
infilepath = args.input
outfilepath = args.output
contents = file2str(infilepath)
new_contents = musikui_string(contents)
str2file(outfilepath, new_contents)
| 21.683544 | 86 | 0.591944 |
ace4d1420f9a94d838852f994add13e35c4a78ad | 393 | py | Python | Exeplore/Exeplore/wsgi.py | Pierre-siddall/exeplore | 2a27f2ec6bf763efbb9748b1bc9b3bbe23030eec | [
"MIT"
] | null | null | null | Exeplore/Exeplore/wsgi.py | Pierre-siddall/exeplore | 2a27f2ec6bf763efbb9748b1bc9b3bbe23030eec | [
"MIT"
] | 3 | 2022-03-17T13:05:58.000Z | 2022-03-19T21:55:21.000Z | Exeplore/Exeplore/wsgi.py | Pierre-siddall/exeplore | 2a27f2ec6bf763efbb9748b1bc9b3bbe23030eec | [
"MIT"
] | null | null | null | """
WSGI config for Exeplore project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Exeplore.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
ace4d19c5251483772cba906460cc59f97ad4700 | 3,575 | py | Python | parlai/agents/tfidf_retriever/tfidf_doc_ranker.py | convei-lab/ParlAI | 8d5fc018fec8db0a9b5e5ae055f98059db8eac3f | [
"MIT"
] | 1 | 2021-07-09T06:14:06.000Z | 2021-07-09T06:14:06.000Z | parlai/agents/tfidf_retriever/tfidf_doc_ranker.py | convei-lab/ParlAI | 8d5fc018fec8db0a9b5e5ae055f98059db8eac3f | [
"MIT"
] | 4 | 2021-07-22T04:20:26.000Z | 2021-07-29T13:37:12.000Z | parlai/agents/tfidf_retriever/tfidf_doc_ranker.py | convei-lab/ParlAI | 8d5fc018fec8db0a9b5e5ae055f98059db8eac3f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Rank documents with TF-IDF scores.
Adapted from Adam Fisch's work at github.com/facebookresearch/DrQA/
"""
import numpy as np
import scipy.sparse as sp
from . import utils
from . import tokenizers
from parlai.utils.logging import logger
class TfidfDocRanker(object):
"""
Loads a pre-weighted inverted index of token/document terms.
Scores new queries by taking sparse dot products.
"""
def __init__(self, tfidf_path=None, strict=True):
"""
Args:
tfidf_path: path to saved model file
strict: fail on empty queries or continue (and return empty result)
"""
# Load from disk
logger.info('Loading %s' % tfidf_path)
matrix, metadata = utils.load_sparse_csr(tfidf_path)
self.doc_mat = matrix
self.ngrams = metadata['ngram']
self.hash_size = metadata['hash_size']
self.tokenizer = tokenizers.get_class(metadata['tokenizer'])()
self.doc_freqs = metadata['doc_freqs'].squeeze()
self.doc_dict = metadata.get('doc_dict', None)
self.num_docs = self.doc_mat.shape[1] - 1
self.strict = strict
def get_doc_id(self, doc_index):
"""
Convert doc_index --> doc_id.
"""
return self.doc_dict[1][doc_index] if self.doc_dict else doc_index
def closest_docs(self, query, k=1, matrix=None):
"""
Closest docs by dot product between query and documents in tfidf weighted word
vector space.
matrix arg can be provided to be used instead of internal doc matrix.
"""
spvec = self.text2spvec(query)
res = spvec * matrix if matrix is not None else spvec * self.doc_mat
if len(res.data) <= k:
o_sort = np.argsort(-res.data)
else:
o = np.argpartition(-res.data, k)[0:k]
o_sort = o[np.argsort(-res.data[o])]
doc_scores = res.data[o_sort]
doc_ids = res.indices[o_sort]
return doc_ids, doc_scores
def parse(self, query):
"""
Parse the query into tokens (either ngrams or tokens).
"""
tokens = self.tokenizer.tokenize(query)
return tokens.ngrams(n=self.ngrams, uncased=True, filter_fn=utils.filter_ngram)
def text2spvec(self, query):
"""
Create a sparse tfidf-weighted word vector from query.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
"""
# Get hashed ngrams
words = self.parse(utils.normalize(query))
wids = [utils.hash(w, self.hash_size) for w in words]
if len(wids) == 0:
if self.strict:
raise RuntimeError('No valid word in: %s' % query)
else:
# logger.warning('No valid word in: %s' % query)
return sp.csr_matrix((1, self.hash_size))
# Count TF
wids_unique, wids_counts = np.unique(wids, return_counts=True)
tfs = np.log1p(wids_counts)
# Count IDF
Ns = self.doc_freqs[wids_unique]
idfs = np.log((self.num_docs - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
# TF-IDF
data = np.multiply(tfs, idfs)
# One row, sparse csr matrix
indptr = np.array([0, len(wids_unique)])
spvec = sp.csr_matrix((data, wids_unique, indptr), shape=(1, self.hash_size))
return spvec
| 31.919643 | 87 | 0.604755 |
ace4d1dce7077bed5d6f9c16a44b6cc03281de78 | 1,897 | py | Python | tensorflow_addons/optimizers/__init__.py | vrince/tensorflow_addons | 12c3de4d78fb799ff229fe413356d06b8dfc9beb | [
"Apache-2.0"
] | 2 | 2020-03-27T16:10:19.000Z | 2021-04-30T07:39:49.000Z | tensorflow_addons/optimizers/__init__.py | vrince/tensorflow_addons | 12c3de4d78fb799ff229fe413356d06b8dfc9beb | [
"Apache-2.0"
] | null | null | null | tensorflow_addons/optimizers/__init__.py | vrince/tensorflow_addons | 12c3de4d78fb799ff229fe413356d06b8dfc9beb | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Additional optimizers that conform to Keras API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_addons.optimizers.conditional_gradient import ConditionalGradient
from tensorflow_addons.optimizers.cyclical_learning_rate import (
CyclicalLearningRate)
from tensorflow_addons.optimizers.cyclical_learning_rate import (
TriangularCyclicalLearningRate)
from tensorflow_addons.optimizers.cyclical_learning_rate import (
Triangular2CyclicalLearningRate)
from tensorflow_addons.optimizers.cyclical_learning_rate import (
ExponentialCyclicalLearningRate)
from tensorflow_addons.optimizers.lamb import LAMB
from tensorflow_addons.optimizers.lazy_adam import LazyAdam
from tensorflow_addons.optimizers.lookahead import Lookahead
from tensorflow_addons.optimizers.moving_average import MovingAverage
from tensorflow_addons.optimizers.rectified_adam import RectifiedAdam
from tensorflow_addons.optimizers.weight_decay_optimizers import AdamW
from tensorflow_addons.optimizers.weight_decay_optimizers import SGDW
from tensorflow_addons.optimizers.weight_decay_optimizers import (
extend_with_decoupled_weight_decay)
| 48.641026 | 81 | 0.808118 |
ace4d26203aae77064bcaf42cad45477acb7b17b | 1,255 | py | Python | data/defence_levels.py | RishavMz/strategic_siege | 70eb1e004e673ad407072bf6c0e51856a691ce0d | [
"MIT"
] | null | null | null | data/defence_levels.py | RishavMz/strategic_siege | 70eb1e004e673ad407072bf6c0e51856a691ce0d | [
"MIT"
] | null | null | null | data/defence_levels.py | RishavMz/strategic_siege | 70eb1e004e673ad407072bf6c0e51856a691ce0d | [
"MIT"
] | null | null | null | class Level:
def __init__(self,cards,defence):
self.cards = [[50,1],[150,2],[250,3],[350,4]]
self.defence = defence
for i in range(len(cards)):
self.cards[i].append(cards[i])
def showcards(self):
return self.cards
def showdefenses(self):
return self.defence
cards0=[10,10,10,0]
defence0 = [[1,150,200],[2,300,400],[1,250,210],[2,210,491],[1,350,260],[2,190,460]]
level0 = Level(cards0,defence0)
cards1 = [5,5,2,0]
defence1 = [[1,350,150],[2,220,300],[3,220,450],[4,350,450]]
level1 = Level(cards1,defence1)
cards2 = [5,5,2,0]
defence2 = [[1,350,150],[2,220,300],[3,220,450],[4,350,450],[4,300,150],[3,170,300],[2,170,450],[1,300,450]]
level2 = Level(cards2,defence2)
cards3 = [8,8,2,0]
defence3 = [[4,400,200],[4,400,400],[3,100,220],[3,100,420],[3,100,180],[3,100,180],[2,150,220],[2,150,420],[2,150,180],[2,150,180]]
level3 = Level(cards3,defence3)
cards4 = [6,6,2,0]
defence4 = [[4,400,200],[4,400,400],[3,100,220],[3,100,420],[3,100,180],[3,100,180],[2,150,220],[2,150,420],[2,150,180],[2,150,180],[1,180,220],[1,180,420],[1,180,180],[1,180,180]]
level4 = Level(cards4,defence4)
levels = [level0,level1,level2,level3,level4]
| 35.857143 | 185 | 0.585657 |
ace4d28bcdf6d5791ad44c24c3a1f65066f54446 | 1,155 | py | Python | py/py_0086_cuboid_route.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0086_cuboid_route.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0086_cuboid_route.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 86: Cuboid route
# https://projecteuler.net/problem=86
#
# A spider, S, sits in one corner of a cuboid room, measuring 6 by 5 by 3, and
# a fly, F, sits in the opposite corner. By travelling on the surfaces of the
# room the shortest "straight line" distance from S to F is 10 and the path is
# shown on the diagram. However, there are up to three "shortest" path
# candidates for any given cuboid and the shortest route doesn't always have
# integer length. It can be shown that there are exactly 2060 distinct
# cuboids, ignoring rotations, with integer dimensions, up to a maximum size
# of M by M by M, for which the shortest route has integer length when M =
# 100. This is the least value of M for which the number of solutions first
# exceeds two thousand; the number of solutions when M = 99 is 1975. Find the
# least value of M such that the number of solutions first exceeds one
# million.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 86
timed.caller(dummy, n, i, prob_id)
| 37.258065 | 79 | 0.722078 |
ace4d2ce9773ad77241f169af97ffa8d197f3980 | 22,903 | py | Python | mars/dataframe/reduction/tests/test_reduction_execute.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | 1 | 2020-06-25T13:51:16.000Z | 2020-06-25T13:51:16.000Z | mars/dataframe/reduction/tests/test_reduction_execute.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | null | null | null | mars/dataframe/reduction/tests/test_reduction_execute.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
import numpy as np
from mars.tests.core import TestBase, parameterized, ExecutorForTest
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
reduction_functions = dict(
sum=dict(func_name='sum', has_min_count=True),
prod=dict(func_name='prod', has_min_count=True),
min=dict(func_name='min', has_min_count=False),
max=dict(func_name='max', has_min_count=False),
mean=dict(func_name='mean', has_min_count=False),
var=dict(func_name='var', has_min_count=False),
std=dict(func_name='std', has_min_count=False),
)
@parameterized(**reduction_functions)
class TestReduction(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def compute(self, data, **kwargs):
return getattr(data, self.func_name)(**kwargs)
def testSeriesReduction(self):
data = pd.Series(np.random.randint(0, 8, (10,)), index=[str(i) for i in range(10)], name='a')
reduction_df1 = self.compute(from_pandas_series(data))
self.assertEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=6))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=4), axis='index')
self.assertAlmostEqual(
self.compute(data, axis='index'), self.executor.execute_dataframe(reduction_df4, concat=True)[0])
data = pd.Series(np.random.rand(20), name='a')
data[0] = 0.1 # make sure not all elements are NAN
data[data > 0.5] = np.nan
reduction_df1 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df2, concat=True)[0]))
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False, min_count=2)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df3, concat=True)[0]))
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=3), min_count=1)
self.assertAlmostEqual(
self.compute(data, min_count=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_series(data, chunk_size=3), min_count=21)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df5, concat=True)[0]))
def testDataFrameReduction(self):
data = pd.DataFrame(np.random.rand(20, 10))
reduction_df1 = self.compute(from_pandas_df(data))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), axis=1)
pd.testing.assert_series_equal(
self.compute(data, axis=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
# test null
np_data = np.random.rand(20, 10)
np_data[np_data > 0.6] = np.nan
data = pd.DataFrame(np_data)
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), min_count=15)
pd.testing.assert_series_equal(
self.compute(data, min_count=15),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), min_count=3)
pd.testing.assert_series_equal(
self.compute(data, min_count=3),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=3)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=3),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=8)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=8),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
# test numeric_only
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=2))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), axis='columns')
pd.testing.assert_series_equal(
self.compute(data, axis='columns'),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
data_dict = dict((str(i), np.random.rand(10)) for i in range(10))
data_dict['string'] = [str(i) for i in range(10)]
data_dict['bool'] = np.random.choice([True, False], (10,))
data = pd.DataFrame(data_dict)
reduction_df = self.compute(from_pandas_df(data, chunk_size=3), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df, concat=True)[0])
class TestCount(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def testSeriesCount(self):
array = np.random.rand(10)
array[[2, 7, 9]] = np.nan
data = pd.Series(array)
series = from_pandas_series(data)
result = self.executor.execute_dataframe(series.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
series2 = from_pandas_series(data, chunk_size=1)
result = self.executor.execute_dataframe(series2.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
series2 = from_pandas_series(data, chunk_size=3)
result = self.executor.execute_dataframe(series2.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
def testDataFrameCount(self):
data = pd.DataFrame({
"Person": ["John", "Myla", "Lewis", "John", "Myla"],
"Age": [24., np.nan, 21., 33, 26],
"Single": [False, True, True, True, False]})
df = from_pandas_df(data)
result = self.executor.execute_dataframe(df.count(), concat=True)[0]
expected = data.count()
pd.testing.assert_series_equal(result, expected)
result = self.executor.execute_dataframe(df.count(axis='columns'), concat=True)[0]
expected = data.count(axis='columns')
pd.testing.assert_series_equal(result, expected)
df2 = from_pandas_df(data, chunk_size=2)
result = self.executor.execute_dataframe(df2.count(), concat=True)[0]
expected = data.count()
pd.testing.assert_series_equal(result, expected)
result = self.executor.execute_dataframe(df2.count(axis='columns'), concat=True)[0]
expected = data.count(axis='columns')
pd.testing.assert_series_equal(result, expected)
df3 = from_pandas_df(data, chunk_size=3)
result = self.executor.execute_dataframe(df3.count(numeric_only=True), concat=True)[0]
expected = data.count(numeric_only=True)
pd.testing.assert_series_equal(result, expected)
result = self.executor.execute_dataframe(df3.count(axis='columns', numeric_only=True), concat=True)[0]
expected = data.count(axis='columns', numeric_only=True)
pd.testing.assert_series_equal(result, expected)
def testNunique(self):
data1 = pd.Series(np.random.randint(0, 5, size=(20,)))
series = from_pandas_series(data1)
result = self.executor.execute_dataframe(series.nunique(), concat=True)[0]
expected = data1.nunique()
self.assertEqual(result, expected)
series = from_pandas_series(data1, chunk_size=6)
result = self.executor.execute_dataframe(series.nunique(), concat=True)[0]
expected = data1.nunique()
self.assertEqual(result, expected)
# test dropna
data2 = data1.copy()
data2[[2, 9, 18]] = np.nan
series = from_pandas_series(data2)
result = self.executor.execute_dataframe(series.nunique(), concat=True)[0]
expected = data2.nunique()
self.assertEqual(result, expected)
series = from_pandas_series(data2, chunk_size=3)
result = self.executor.execute_dataframe(series.nunique(dropna=False), concat=True)[0]
expected = data2.nunique(dropna=False)
self.assertEqual(result, expected)
# test dataframe
data1 = pd.DataFrame(np.random.randint(0, 6, size=(20, 20)),
columns=['c' + str(i) for i in range(20)])
df = from_pandas_df(data1)
result = self.executor.execute_dataframe(df.nunique(), concat=True)[0]
expected = data1.nunique()
pd.testing.assert_series_equal(result, expected)
df = from_pandas_df(data1, chunk_size=6)
result = self.executor.execute_dataframe(df.nunique(), concat=True)[0]
expected = data1.nunique()
pd.testing.assert_series_equal(result, expected)
df = from_pandas_df(data1)
result = self.executor.execute_dataframe(df.nunique(axis=1), concat=True)[0]
expected = data1.nunique(axis=1)
pd.testing.assert_series_equal(result, expected)
df = from_pandas_df(data1, chunk_size=3)
result = self.executor.execute_dataframe(df.nunique(axis=1), concat=True)[0]
expected = data1.nunique(axis=1)
pd.testing.assert_series_equal(result, expected)
# test dropna
data2 = data1.copy()
data2.iloc[[2, 9, 18], [2, 9, 18]] = np.nan
df = from_pandas_df(data2)
result = self.executor.execute_dataframe(df.nunique(), concat=True)[0]
expected = data2.nunique()
pd.testing.assert_series_equal(result, expected)
df = from_pandas_df(data2, chunk_size=3)
result = self.executor.execute_dataframe(df.nunique(dropna=False), concat=True)[0]
expected = data2.nunique(dropna=False)
pd.testing.assert_series_equal(result, expected)
df = from_pandas_df(data1, chunk_size=3)
result = self.executor.execute_dataframe(df.nunique(axis=1), concat=True)[0]
expected = data1.nunique(axis=1)
pd.testing.assert_series_equal(result, expected)
def testUnique(self):
data1 = pd.Series(np.random.randint(0, 5, size=(20,)))
series = from_pandas_series(data1)
result = self.executor.execute_dataframe(series.unique(), concat=True)[0]
expected = data1.unique()
np.testing.assert_array_equal(result, expected)
series = from_pandas_series(data1, chunk_size=6)
result = self.executor.execute_dataframe(series.unique(), concat=True)[0]
expected = data1.unique()
np.testing.assert_array_equal(result, expected)
data2 = pd.Series([pd.Timestamp('20200101'), ] * 5 +
[pd.Timestamp('20200202')] +
[pd.Timestamp('20020101')] * 9)
series = from_pandas_series(data2)
result = self.executor.execute_dataframe(series.unique(), concat=True)[0]
expected = data2.unique()
np.testing.assert_array_equal(result, expected)
series = from_pandas_series(data2, chunk_size=6)
result = self.executor.execute_dataframe(series.unique(), concat=True)[0]
expected = data2.unique()
np.testing.assert_array_equal(result, expected)
cum_reduction_functions = dict(
cummax=dict(func_name='cummax'),
cummin=dict(func_name='cummin'),
cumprod=dict(func_name='cumprod'),
cumsum=dict(func_name='cumsum'),
)
@parameterized(**cum_reduction_functions)
class TestCumReduction(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def compute(self, data, **kwargs):
return getattr(data, self.func_name)(**kwargs)
def testSeriesCumReduction(self):
data = pd.Series(np.random.rand(20), index=[str(i) for i in range(20)], name='a')
reduction_df1 = self.compute(from_pandas_series(data))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=6))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=4), axis='index')
pd.testing.assert_series_equal(
self.compute(data, axis='index'), self.executor.execute_dataframe(reduction_df4, concat=True)[0])
data = pd.Series(np.random.rand(20), name='a')
data[0] = 0.1 # make sure not all elements are NAN
data[data > 0.5] = np.nan
reduction_df1 = self.compute(from_pandas_series(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
def testDataFrameCumReduction(self):
data = pd.DataFrame(np.random.rand(20, 10))
reduction_df1 = self.compute(from_pandas_df(data))
pd.testing.assert_frame_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_frame_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), axis=1)
pd.testing.assert_frame_equal(
self.compute(data, axis=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
# test null
np_data = np.random.rand(20, 10)
np_data[np_data > 0.6] = np.nan
data = pd.DataFrame(np_data)
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_frame_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_frame_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_frame_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
# test numeric_only
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=2))
pd.testing.assert_frame_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), axis='columns')
pd.testing.assert_frame_equal(
self.compute(data, axis='columns'),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
class TestAggregate(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def testDataFrameAggregate(self):
all_aggs = ['sum', 'prod', 'min', 'max', 'count', 'size', 'mean', 'var', 'std']
data = pd.DataFrame(np.random.rand(20, 20))
df = from_pandas_df(data)
result = df.agg(all_aggs)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(all_aggs))
result = df.agg('size')
self.assertEqual(self.executor.execute_dataframe(result)[0], data.agg('size'))
for func in (a for a in all_aggs if a != 'size'):
result = df.agg(func)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func))
result = df.agg(func, axis=1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func, axis=1))
df = from_pandas_df(data, chunk_size=3)
# will redirect to transform
result = df.agg(['cumsum', 'cummax'])
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(['cumsum', 'cummax']))
result = df.agg('size')
self.assertEqual(self.executor.execute_dataframe(result)[0], data.agg('size'))
for func in (a for a in all_aggs if a != 'size'):
result = df.agg(func)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func))
result = df.agg(func, axis=1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func, axis=1))
result = df.agg(['sum'])
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(['sum']))
result = df.agg(all_aggs)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(all_aggs))
result = df.agg(all_aggs, axis=1)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(all_aggs, axis=1))
result = df.agg({0: ['sum', 'min', 'var'], 9: ['mean', 'var', 'std']})
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg({0: ['sum', 'min', 'var'], 9: ['mean', 'var', 'std']}))
def testSeriesAggregate(self):
all_aggs = ['sum', 'prod', 'min', 'max', 'count', 'size', 'mean', 'var', 'std']
data = pd.Series(np.random.rand(20), index=[str(i) for i in range(20)], name='a')
series = from_pandas_series(data)
result = series.agg(all_aggs)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(all_aggs))
for func in all_aggs:
result = series.agg(func)
self.assertAlmostEqual(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func))
series = from_pandas_series(data, chunk_size=3)
for func in all_aggs:
result = series.agg(func)
self.assertAlmostEqual(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(func))
result = series.agg(all_aggs)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
data.agg(all_aggs))
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 45.17357 | 110 | 0.650744 |
ace4d2fabdcaf66939acf41790fdd33eea7e54df | 290 | py | Python | internship_3/conftest.py | Zamy97/internship_3 | 9c9db252b6818316e9864839075bb1d23714f7e4 | [
"MIT"
] | null | null | null | internship_3/conftest.py | Zamy97/internship_3 | 9c9db252b6818316e9864839075bb1d23714f7e4 | [
"MIT"
] | null | null | null | internship_3/conftest.py | Zamy97/internship_3 | 9c9db252b6818316e9864839075bb1d23714f7e4 | [
"MIT"
] | null | null | null | import pytest
from internship_3.users.models import User
from internship_3.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 19.333333 | 58 | 0.782759 |
ace4d3b7b8d2630570bfec99ecbfab82a09fbd53 | 590 | py | Python | Desafios/MODULO 2/Desafio 36.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | Desafios/MODULO 2/Desafio 36.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | Desafios/MODULO 2/Desafio 36.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | print('='*10, 'Desafio 36', '='*10)
valor_da_casa = float(input('O valor do imovel: R$ '))
salario_comprador = float(input('O salário do comprador: R$ '))
anos_financiamento = int(input('Anos de financiamento: '))
parcela = valor_da_casa / (anos_financiamento * 12)
print('Para pagar uma casa de R$ {:.2f} em {} anos, a prestação será de R$ {:.2f}.'.format(valor_da_casa, anos_financiamento, parcela))
if parcela <= 0.3 * salario_comprador:
print('\033[1;32mEmpréstimo APROVADO!\033[0;0m')
else:
print('\033[1;31mEmpréstimo REPROVADO!\033[0;0m')
print('='*10, 'Desafio 36', '='*10)
| 49.166667 | 135 | 0.688136 |
ace4d414c9021ec7884463b5dbe26602dbf1733c | 5,758 | py | Python | conans/test/integration/multi_remote_test.py | ytimenkov/conan | 89eb275b9696b308aaaa1fbfaa0f8cdab284a764 | [
"MIT"
] | null | null | null | conans/test/integration/multi_remote_test.py | ytimenkov/conan | 89eb275b9696b308aaaa1fbfaa0f8cdab284a764 | [
"MIT"
] | null | null | null | conans/test/integration/multi_remote_test.py | ytimenkov/conan | 89eb275b9696b308aaaa1fbfaa0f8cdab284a764 | [
"MIT"
] | null | null | null | import unittest
from conans.test.utils.tools import TestServer, TestClient
from conans.model.ref import ConanFileReference
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from collections import OrderedDict
class MultiRemoteTest(unittest.TestCase):
def setUp(self):
self.servers = OrderedDict()
self.users = {}
for i in range(3):
test_server = TestServer()
self.servers["remote%d" % i] = test_server
self.users["remote%d" % i] = [("lasote", "mypass")]
self.client = TestClient(servers=self.servers, users=self.users)
def predefine_remote_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
self.client.save(files)
self.client.run("export . lasote/stable")
self.client.run("upload Hello0/0.1@lasote/stable -r=remote0")
self.client.run("upload Hello0/0.1@lasote/stable -r=remote1")
self.client.run("upload Hello0/0.1@lasote/stable -r=remote2")
self.client.run('remove "*" -f')
self.client.run("remote add_ref Hello0/0.1@lasote/stable remote1")
self.client.run("install Hello0/0.1@lasote/stable --build=missing")
self.assertIn("Hello0/0.1@lasote/stable: Retrieving from predefined remote 'remote1'",
self.client.user_io.out)
self.client.run("remote list_ref")
self.assertIn("Hello0/0.1@lasote/stable: remote1", self.client.user_io.out)
def upload_test(self):
conan_reference = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
self.client.save(files)
self.client.run("export . lasote/stable")
self.client.run("upload %s" % str(conan_reference))
self.client.run("info %s" % str(conan_reference))
self.assertIn("remote0=http://", self.client.user_io.out)
# The remote, once fixed does not change
self.client.run("upload %s -r=remote1" % str(conan_reference))
self.client.run("info %s" % str(conan_reference))
self.assertIn("remote0=http://", self.client.user_io.out)
# Now install it in other machine from remote 0
client2 = TestClient(servers=self.servers, users=self.users)
client2.run("install %s --build=missing" % str(conan_reference))
client2.run("info %s" % str(conan_reference))
self.assertIn("remote0=http://", client2.user_io.out)
# Now install it in other machine from remote 1
servers = self.servers.copy()
servers.pop("remote0")
client3 = TestClient(servers=servers, users=self.users)
client3.run("install %s --build=missing" % str(conan_reference))
client3.run("info %s" % str(conan_reference))
self.assertIn("remote1=http://", client3.user_io.out)
def fail_when_not_notfound_test(self):
"""
If a remote fails with a 404 it has to keep looking in the next remote, but if it fails by
any other reason it has to stop
"""
servers = OrderedDict()
servers["s0"] = TestServer()
servers["s1"] = TestServer()
servers["s2"] = TestServer()
client = TestClient(servers=servers, users=self.users)
files = cpp_hello_conan_files("MyLib", "0.1", build=False)
client.save(files)
client.run("create . lasote/testing")
client.run("user lasote -p mypass -r s1")
client.run("upload MyLib* -r s1 -c")
servers["s1"].fake_url = "http://asdlhaljksdhlajkshdljakhsd" # Do not exist
client2 = TestClient(servers=servers, users=self.users)
err = client2.run("install MyLib/0.1@conan/testing --build=missing", ignore_error=True)
self.assertTrue(err)
self.assertIn("MyLib/0.1@conan/testing: Trying with 's0'...", client2.out)
self.assertIn("MyLib/0.1@conan/testing: Trying with 's1'...", client2.out)
self.assertIn("Unable to connect to s1=http://asdlhaljksdhlajkshdljakhsd", client2.out)
# s2 is not even tried
self.assertNotIn("MyLib/0.1@conan/testing: Trying with 's2'...", client2.out)
def install_from_remotes_test(self):
for i in range(3):
conan_reference = ConanFileReference.loads("Hello%d/0.1@lasote/stable" % i)
files = cpp_hello_conan_files("Hello%d" % i, "0.1", build=False)
self.client.save(files)
self.client.run("export . lasote/stable")
self.client.run("upload %s -r=remote%d" % (str(conan_reference), i))
self.client.run("info %s" % str(conan_reference))
self.assertIn("remote%d=http://" % i, self.client.user_io.out)
# Now install it in other machine from remote 0
client2 = TestClient(servers=self.servers, users=self.users)
files = cpp_hello_conan_files("HelloX", "0.1", deps=["Hello0/0.1@lasote/stable",
"Hello1/0.1@lasote/stable",
"Hello2/0.1@lasote/stable"])
files["conanfile.py"] = files["conanfile.py"].replace("def build(", "def build2(")
client2.save(files)
client2.run("install . --build=missing")
self.assertIn("Hello0/0.1@lasote/stable from 'remote0'", client2.user_io.out)
self.assertIn("Hello1/0.1@lasote/stable from 'remote1'", client2.user_io.out)
self.assertIn("Hello2/0.1@lasote/stable from 'remote2'", client2.user_io.out)
client2.run("info .")
self.assertIn("Remote: remote0=http://", client2.user_io.out)
self.assertIn("Remote: remote1=http://", client2.user_io.out)
self.assertIn("Remote: remote2=http://", client2.user_io.out)
| 49.213675 | 98 | 0.63199 |
ace4d49e6608b5c72887d68d7ae3139f7f4f68e1 | 235 | py | Python | src/bromine/_version.py | Etiqa/bromine | cabf0931f5a06796c26fdc7fb9f7ecf147554fd5 | [
"BSD-2-Clause"
] | 2 | 2018-09-20T12:37:01.000Z | 2021-08-30T14:44:25.000Z | src/bromine/_version.py | Etiqa/bromine | cabf0931f5a06796c26fdc7fb9f7ecf147554fd5 | [
"BSD-2-Clause"
] | null | null | null | src/bromine/_version.py | Etiqa/bromine | cabf0931f5a06796c26fdc7fb9f7ecf147554fd5 | [
"BSD-2-Clause"
] | null | null | null | """
https://semver.org/spec/v2.0.0.html
https://www.python.org/dev/peps/pep-0440/
https://packaging.python.org/guides/single-sourcing-package-version/#single-sourcing-the-version
"""
__version__ = '0.4.0_BUILD'
__commit__ = 'COMMIT'
| 23.5 | 96 | 0.740426 |
ace4d621172fb8a2a3ce41a3ecb0982ecbebb12e | 4,790 | py | Python | python/nano/example/pytorch/quantization/inc/resnet18_cifar.py | Laniakea94/BigDL | 4d01734086dda893a7f08ba53251dc3c5c8ecfd1 | [
"Apache-2.0"
] | null | null | null | python/nano/example/pytorch/quantization/inc/resnet18_cifar.py | Laniakea94/BigDL | 4d01734086dda893a7f08ba53251dc3c5c8ecfd1 | [
"Apache-2.0"
] | null | null | null | python/nano/example/pytorch/quantization/inc/resnet18_cifar.py | Laniakea94/BigDL | 4d01734086dda893a7f08ba53251dc3c5c8ecfd1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is adapted from PyTorch Lightning Tutorial.
# https://github.com/PyTorchLightning/lightning-tutorials/blob/main/
# lightning_examples/cifar10-baseline/baseline.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import OneCycleLR
from torchmetrics.functional import accuracy
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
seed_everything(7)
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
BATCH_SIZE = 64
NUM_WORKERS = int(os.cpu_count() / 2)
train_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
cifar10_dm = CIFAR10DataModule(
data_dir=PATH_DATASETS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
pin_memory=False
)
def create_model():
model = torchvision.models.resnet18(pretrained=False, num_classes=10)
model.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
model.maxpool = nn.Identity()
return model
class LitResnet(LightningModule):
def __init__(self, lr=0.05):
super().__init__()
self.save_hyperparameters()
self.model = create_model()
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
self.log("train_loss", loss)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
lr=self.hparams.lr,
momentum=0.9,
weight_decay=5e-4,
)
steps_per_epoch = 45000 // BATCH_SIZE
scheduler_dict = {
"scheduler": OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=steps_per_epoch,
),
"interval": "step",
}
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
model = LitResnet(lr=0.05)
model.datamodule = cifar10_dm
trainer = Trainer(
progress_bar_refresh_rate=10,
max_epochs=30,
logger=TensorBoardLogger("lightning_logs/", name="resnet"),
callbacks=[LearningRateMonitor(logging_interval="step")],
)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm)
| 29.9375 | 76 | 0.690814 |
ace4d6af454c8c8641a230f9b0746ca9431bd6d7 | 1,096 | py | Python | conexao_twitter.py | rsoaresp/open_data_campinas | 3b342202409ae71a4cd94b71787fa08a43c6628b | [
"MIT"
] | null | null | null | conexao_twitter.py | rsoaresp/open_data_campinas | 3b342202409ae71a4cd94b71787fa08a43c6628b | [
"MIT"
] | null | null | null | conexao_twitter.py | rsoaresp/open_data_campinas | 3b342202409ae71a4cd94b71787fa08a43c6628b | [
"MIT"
] | null | null | null | import uuid
import tweepy
from mensagens import Mensagens
def publicar_tuites():
auth = tweepy.OAuthHandler('VyzupUYulaeOR3xTZHcOF6bJ6', 'Y7fBjYGhpPpas1NMhg7RhsN56olZa6B5KCG1517L3jN9VPYfzl')
auth.set_access_token('1252698169131110401-xRgChKOyA1DGI9e2qCx4R5nNcscUE1', 'JPqzIgSNLn19UMeHX6HNia7GsAUvPZThcraRaH2ElJInK')
api = tweepy.API(auth)
message_api = Mensagens()
for msg in message_api.total_despesas():
status = api.update_status(msg)
for msg in message_api.top_gastos():
try:
status_top_gastos = api.update_status(msg, id=str(uuid.uuid4()), in_reply_to_status_id=status_top_gastos.id)
except UnboundLocalError:
status_top_gastos = api.update_status(msg, id=str(uuid.uuid4()))
for msg in message_api.top_gastos_credor():
try:
status_top_gastos_credor = api.update_status(msg, id=str(uuid.uuid4()), in_reply_to_status_id=status_top_gastos_credor.id)
except UnboundLocalError:
status_top_gastos_credor = api.update_status(msg, id=str(uuid.uuid4()))
return None
| 30.444444 | 134 | 0.730839 |
ace4d8304792642f863ecfb507d5717e3c204f68 | 547 | py | Python | natas/natas27-2.py | nxvl/wargames | d8726a0200800adc9b3df82211d0af73dbf8834b | [
"MIT"
] | null | null | null | natas/natas27-2.py | nxvl/wargames | d8726a0200800adc9b3df82211d0af73dbf8834b | [
"MIT"
] | null | null | null | natas/natas27-2.py | nxvl/wargames | d8726a0200800adc9b3df82211d0af73dbf8834b | [
"MIT"
] | null | null | null | import requests
from requests.auth import HTTPBasicAuth
HOST = 'http://natas27.natas.labs.overthewire.org/'
auth = HTTPBasicAuth('natas27', '55TBjpPZUUJgVP5b3BnbG6ON9uDPVzCJ')
cookies = dict()
data = dict(username='natas28' + ' '*666 + 'Z', password='')
r = requests.post(HOST, cookies=cookies, auth=auth, data=data)
if 'Wrong password' not in r.content:
print r.content
data = dict(username='natas28', password='')
r = requests.post(HOST, cookies=cookies, auth=auth, data=data)
if 'Wrong password' not in r.content:
print r.content
| 27.35 | 67 | 0.723949 |
ace4d8a59f16cc9976273fd4eb9ff95eac9ed4ee | 2,305 | py | Python | samples/network/watcher/manage_network_watcher.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 47 | 2020-05-29T18:25:57.000Z | 2022-03-30T06:04:56.000Z | samples/network/watcher/manage_network_watcher.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 27 | 2020-05-13T06:37:24.000Z | 2022-03-01T07:58:34.000Z | samples/network/watcher/manage_network_watcher.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 67 | 2020-05-09T06:09:19.000Z | 2022-03-22T23:18:06.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
NETWORK_WATCHER = "network_watcherxxyyzz"
# Create client
# For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
network_client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# Create network watcher
network_watcher = network_client.network_watchers.create_or_update(
GROUP_NAME,
NETWORK_WATCHER,
{
"location": "eastus"
}
)
print("Create network watcher:\n{}".format(network_watcher))
# Get network watcher
network_watcher = network_client.network_watchers.get(
GROUP_NAME,
NETWORK_WATCHER
)
print("Get network watcher:\n{}".format(network_watcher))
# Update network watcher
network_watcher = network_client.network_watchers.update_tags(
GROUP_NAME,
NETWORK_WATCHER,
{
"tags": {
"tag1": "value1",
"tag2": "value2"
}
}
)
print("Update network watcher:\n{}".format(network_watcher))
# Delete network watcher
network_watcher = network_client.network_watchers.begin_delete(
GROUP_NAME,
NETWORK_WATCHER
).result()
print("Delete network watcher.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
| 28.45679 | 95 | 0.632104 |
ace4d8eb30f82c34c8ea0e53ecb020b88babe757 | 6,515 | py | Python | circuit_training/learning/train_ppo_lib.py | Ray-Hao-Rui/circuit_training | 8f3a660132bbeaa6779d5bd62db3809a3055016b | [
"Apache-2.0"
] | 1 | 2022-03-10T08:12:48.000Z | 2022-03-10T08:12:48.000Z | circuit_training/learning/train_ppo_lib.py | Ray-Hao-Rui/circuit_training | 8f3a660132bbeaa6779d5bd62db3809a3055016b | [
"Apache-2.0"
] | null | null | null | circuit_training/learning/train_ppo_lib.py | Ray-Hao-Rui/circuit_training | 8f3a660132bbeaa6779d5bd62db3809a3055016b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample training with distributed collection using a variable container."""
import os
import time
from absl import logging
from circuit_training.learning import agent
from circuit_training.learning import learner as learner_lib
import reverb
import tensorflow as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.train import learner as actor_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
def train(
root_dir,
strategy,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
sequence_length,
# Training params
# This is the per replica batch size. The global batch size can be computed
# by this number multiplied by the number of replicas (8 in the case of 2x2
# TPUs).
per_replica_batch_size=32,
num_epochs=4,
num_iterations=10000,
# This is the number of episodes we train on in each iteration.
# num_episodes_per_iteration * epsisode_length * num_epochs =
# global_step (number of gradient updates) * per_replica_batch_size *
# num_replicas.
num_episodes_per_iteration=1024,
use_model_tpu=False):
"""Trains a PPO agent."""
# Get the specs from the environment.
env = create_env_fn()
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent.
with strategy.scope():
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
logging.info('Using GRL agent networks.')
static_features = env.wrapped_env().get_static_obs()
tf_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features,
use_model_tpu=use_model_tpu)
tf_agent.initialize()
# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weights.
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
start=-num_episodes_per_iteration,
interval=num_episodes_per_iteration)
# Create the variable container.
variables = {
reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=None,
table_name='training_table',
server_address=replay_buffer_server_address)
# Initialize the dataset.
def experience_dataset_fn():
get_dtype = lambda x: x.dtype
get_shape = lambda x: (None,) + x.shape
shapes = tf.nest.map_structure(get_shape, tf_agent.collect_data_spec)
dtypes = tf.nest.map_structure(get_dtype, tf_agent.collect_data_spec)
dataset = reverb.TrajectoryDataset(
server_address=replay_buffer_server_address,
table='training_table',
dtypes=dtypes,
shapes=shapes,
# Menger uses learner_iterations_per_call (256). Using 8 here instead
# because we do not need that much data in the buffer (they have to be
# filtered out for the next iteration anyways). The rule of thumb is
# 2-3x batch_size.
max_in_flight_samples_per_worker=8,
num_workers_per_iterator=-1,
max_samples_per_stream=-1,
rate_limiter_timeout_ms=-1,
)
def broadcast_info(info_traj):
# Assumes that the first element of traj is shaped
# (sequence_length, ...); and we extract this length.
info, traj = info_traj
first_elem = tf.nest.flatten(traj)[0]
length = first_elem.shape[0] or tf.shape(first_elem)[0]
info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]), info)
return reverb.ReplaySample(info, traj)
dataset = dataset.map(broadcast_info)
return dataset
# Create the learner.
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
def per_sequence_fn(sample):
# At this point, each sample data contains a sequence of trajectories.
data, info = sample.data, sample.info
data = tf_agent.preprocess_sequence(data)
return data, info
learner = learner_lib.CircuittrainingPPOLearner(
root_dir,
train_step,
model_id,
tf_agent,
experience_dataset_fn,
sequence_length,
num_episodes_per_iteration=num_episodes_per_iteration,
minibatch_size=per_replica_batch_size,
shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),
triggers=learning_triggers,
summary_interval=1000,
strategy=strategy,
num_epochs=num_epochs,
per_sequence_fn=per_sequence_fn,
)
# Run the training loop.
for i in range(num_iterations):
step_val = train_step.numpy()
logging.info('Training. Iteration: %d', i)
start_time = time.time()
learner.run()
num_steps = train_step.numpy() - step_val
run_time = time.time() - start_time
logging.info('Steps per sec: %s', num_steps / run_time)
logging.info('Pushing variables at model_id: %d', model_id.numpy())
variable_container.push(variables)
logging.info('clearing replay buffer')
reverb_replay_train.clear()
| 35.026882 | 80 | 0.736761 |
ace4da7c4d501bcb66bd9f2bf7737077f7697bb4 | 3,460 | py | Python | tests/test_objects.py | WillemRvX/ethelease | 65100c13a509f0df59c87175efd57c54946f05d0 | [
"MIT"
] | null | null | null | tests/test_objects.py | WillemRvX/ethelease | 65100c13a509f0df59c87175efd57c54946f05d0 | [
"MIT"
] | null | null | null | tests/test_objects.py | WillemRvX/ethelease | 65100c13a509f0df59c87175efd57c54946f05d0 | [
"MIT"
] | 1 | 2021-08-29T11:29:54.000Z | 2021-08-29T11:29:54.000Z | #!/usr/bin/env python
from ethelease.commons.etl_tools import PathBuilder
from ethelease.k8s.ops import K8sPodConf
def test_PathBuilder() -> None:
target = 's3://test/env=dv/source=a-test/subsrc=part-of/yr=1970/mo=01/dy=01/kind=raw/data.txt'
_test = (
PathBuilder()
.bucket(cloud='aws', name='test')
.env('dv')
.source('a-test')
.subsource('part-of')
.ds('1970-01-01')
.kind('raw')
.file_name('data.txt')
.full_path()
)
assert _test == target
def test_K8sPodConf() -> None:
target = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': 'test-test-dv',
'namespace': 'default'
},
'spec': {
'affinity': {
'nodeAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [
{
'matchExpressions': [
{
'key': 'test',
'operator': 'In',
'values': ['testjobs']
}
]
}
]
}
}
},
'containers': [
{
'command': [
'python',
'test/pipelines/test',
'--Args', '{"test": 0}'
],
'image': 'test/test.dv:latest',
'imagePullPolicy': 'Always',
'name': 'base',
'resources': {
'limits': {
'cpu': '2000m',
'memory': '1G'
},
'requests': {
'cpu': '1000m',
'memory': '500M'
}
},
'volumeMounts': [
{
'mountPath': '/etc/secrets',
'name': 'testdarkarts',
'readOnly': True
}
]
}
],
'maxRetries': 3,
'restartPolicy': 'Never',
'terminationGracePeriodSeconds': 30,
'volumes': [
{
'name': 'testdarkarts',
'secret': {
'defaultMode': 420,
'secretName': 'chamberofsecrets'
}
}
]
}
}
_test = (
K8sPodConf()
.env('dv')
.integration_name('test')
.container_registry('test')
.metadata(name='test', namespace='default')
.pipeline_script('test')
.script_args('{"test": 0}')
.which_nodepoolorgroup(name='testjobs', matchexpkey='test')
.pick_secret(name='chamberofsecrets')
.cpu_usage(req='1000m', lim='2000m')
.mem_usage(req='500M', lim='1G')
.restart_policy('Never')
.assemble()
)
assert _test == target
| 32.037037 | 98 | 0.339884 |
ace4da9f048bf27a93ef95bec6ec3caa76eeac87 | 6,812 | py | Python | src/oidcservice/oidc/provider_info_discovery.py | IdentityPython/oiccli | 98b9b1e4926936cdec5a5d4d00da278ba673c741 | [
"Apache-2.0"
] | 3 | 2019-01-22T17:57:34.000Z | 2019-09-12T02:53:59.000Z | src/oidcservice/oidc/provider_info_discovery.py | IdentityPython/oidcservice | 98b9b1e4926936cdec5a5d4d00da278ba673c741 | [
"Apache-2.0"
] | 5 | 2019-08-08T12:17:19.000Z | 2019-12-02T07:08:32.000Z | src/oidcservice/oidc/provider_info_discovery.py | IdentityPython/oidcservice | 98b9b1e4926936cdec5a5d4d00da278ba673c741 | [
"Apache-2.0"
] | 4 | 2018-06-22T07:09:56.000Z | 2020-01-10T03:49:26.000Z | import logging
from oidcmsg import oidc
from oidcmsg.oauth2 import ResponseMessage
from oidcservice.exception import ConfigurationError
from oidcservice.oauth2 import provider_info_discovery
__author__ = 'Roland Hedberg'
logger = logging.getLogger(__name__)
PREFERENCE2PROVIDER = {
# "require_signed_request_object": "request_object_algs_supported",
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg":
"request_object_encryption_alg_values_supported",
"request_object_encryption_enc":
"request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg":
"userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc":
"userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg":
"id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc":
"id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg":
"token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
'grant_types': 'grant_types_supported',
'scope': 'scopes_supported'
}
PROVIDER2PREFERENCE = dict([(v, k) for k, v in PREFERENCE2PROVIDER.items()])
PROVIDER_DEFAULT = {
"token_endpoint_auth_method": "client_secret_basic",
"id_token_signed_response_alg": "RS256",
}
def add_redirect_uris(request_args, service=None, **kwargs):
"""
Add redirect_uris to the request arguments.
:param request_args: Incomming request arguments
:param service: A link to the service
:param kwargs: Possible extra keyword arguments
:return: A possibly augmented set of request arguments.
"""
_context = service.service_context
if "redirect_uris" not in request_args:
# Callbacks is a dictionary with callback type 'code', 'implicit',
# 'form_post' as keys.
_cbs = _context.get('callback')
if _cbs:
# Filter out local additions.
_uris = [v for k, v in _cbs.items() if not k.startswith('__')]
request_args['redirect_uris'] = _uris
else:
request_args['redirect_uris'] = _context.get('redirect_uris')
return request_args, {}
class ProviderInfoDiscovery(provider_info_discovery.ProviderInfoDiscovery):
msg_type = oidc.Message
response_cls = oidc.ProviderConfigurationResponse
error_msg = ResponseMessage
def __init__(self, service_context, client_authn_factory=None, conf=None):
provider_info_discovery.ProviderInfoDiscovery.__init__(
self, service_context, client_authn_factory=client_authn_factory,
conf=conf)
def update_service_context(self, resp, **kwargs):
self._update_service_context(resp)
self.match_preferences(resp, self.service_context.get('issuer'))
if 'pre_load_keys' in self.conf and self.conf['pre_load_keys']:
_jwks = self.service_context.keyjar.export_jwks_as_json(
issuer=resp['issuer'])
logger.info(
'Preloaded keys for {}: {}'.format(resp['issuer'], _jwks))
def match_preferences(self, pcr=None, issuer=None):
"""
Match the clients preferences against what the provider can do.
This is to prepare for later client registration and or what
functionality the client actually will use.
In the client configuration the client preferences are expressed.
These are then compared with the Provider Configuration information.
If the Provider has left some claims out, defaults specified in the
standard will be used.
:param pcr: Provider configuration response if available
:param issuer: The issuer identifier
"""
if not pcr:
pcr = self.service_context.get('provider_info')
regreq = oidc.RegistrationRequest
_behaviour = self.service_context.get('behaviour')
for _pref, _prov in PREFERENCE2PROVIDER.items():
try:
vals = self.service_context.client_preferences[_pref]
except KeyError:
continue
try:
_pvals = pcr[_prov]
except KeyError:
try:
# If the provider have not specified use what the
# standard says is mandatory if at all.
_pvals = PROVIDER_DEFAULT[_pref]
except KeyError:
logger.info(
'No info from provider on {} and no default'.format(
_pref))
_pvals = vals
if isinstance(vals, str):
if vals in _pvals:
_behaviour[_pref] = vals
else:
try:
vtyp = regreq.c_param[_pref]
except KeyError:
# Allow non standard claims
if isinstance(vals, list):
_behaviour[_pref] = [v for v in vals if v in _pvals]
elif vals in _pvals:
_behaviour[_pref] = vals
else:
if isinstance(vtyp[0], list):
_behaviour[_pref] = []
for val in vals:
if val in _pvals:
_behaviour[_pref].append(
val)
else:
for val in vals:
if val in _pvals:
_behaviour[_pref] = val
break
if _pref not in _behaviour:
raise ConfigurationError("OP couldn't match preference:%s" % _pref, pcr)
for key, val in self.service_context.client_preferences.items():
if key in _behaviour:
continue
try:
vtyp = regreq.c_param[key]
if isinstance(vtyp[0], list):
pass
elif isinstance(val, list) and not isinstance(val, str):
val = val[0]
except KeyError:
pass
if key not in PREFERENCE2PROVIDER:
_behaviour[key] = val
self.service_context.set('behaviour', _behaviour)
logger.debug('service_context behaviour: {}'.format(_behaviour))
| 38.704545 | 88 | 0.618467 |
ace4db43d3c54f2199e62a78406d481f5e370375 | 10,274 | py | Python | lib/trade/position.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | 1 | 2018-06-28T09:49:08.000Z | 2018-06-28T09:49:08.000Z | lib/trade/position.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | null | null | null | lib/trade/position.py | myron0330/metatrade | b0358ad3dce6ba50e4801b6af557d7883d8a5d9a | [
"MIT"
] | null | null | null | """
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File: position file.
# Author: Myron
# **********************************************************************************#
"""
from __future__ import division
from utils.exceptions import *
from . base import SecuritiesType
def choose_position(security_type):
"""
Choose position by security type.
Args:
security_type(string): security type.
Returns:
obj: Position object
"""
if security_type == SecuritiesType.futures:
position_obj = FuturesPosition
else:
raise ExceptionsFormat.INVALID_SECURITY_TYPE.format(security_type)
return position_obj
class LongShortPosition(object):
"""
Long short position.
"""
__slots__ = [
'symbol',
'price',
'long_amount',
'long_cost',
'short_amount',
'short_cost',
'long_margin',
'short_margin',
'value',
'profit',
'today_profit',
'offset_profit'
]
def __init__(self, symbol=0, price=0., long_amount=0, short_amount=0, long_margin=0,
short_margin=0, long_cost=0, short_cost=0, value=0, profit=0, today_profit=0,
offset_profit=0):
self.symbol = symbol
self.price = price
self.long_amount = long_amount
self.short_amount = short_amount
self.long_margin = long_margin
self.short_margin = short_margin
self.long_cost = long_cost
self.short_cost = short_cost
self.value = value
self.profit = profit
self.today_profit = today_profit
self.offset_profit = offset_profit
def evaluate(self, reference_price, multiplier=1., margin_rate=1.):
"""
Evaluate position by reference price, multiplier and margin rate.
Args:
reference_price(float): price
multiplier(float): multiplier of futures
margin_rate(float): margin rate of futures
Returns:
tuple(LongShortPosition, string): position instance, portfolio value
"""
long_mv = reference_price * self.long_amount * multiplier
short_mv = reference_price * self.short_amount * multiplier
if not self.value:
self.value = multiplier * (self.long_cost * self.long_amount - self.short_cost * self.short_amount)
float_pnl_added = long_mv - short_mv - self.value
self.price = reference_price
self.long_margin = long_mv * margin_rate
self.short_margin = short_mv * margin_rate
self.profit = long_mv - short_mv - multiplier * (
self.long_cost * self.long_amount - self.short_cost * self.short_amount)
self.value = long_mv - short_mv
return self, float_pnl_added
@classmethod
def from_request(cls, request):
"""
Generate new FuturesPosition from request
Args:
request(dict): request database
"""
return cls(**request)
@classmethod
def from_query(cls, query_data):
"""
Recover existed FuturesPosition from query database
Args:
query_data(dict): query database
"""
position = cls(**query_data)
return position
def to_database_item(self):
"""
To redis item
"""
redis_item = {
'symbol': self.symbol,
'price': self.price,
'long_amount': self.long_amount,
'short_amount': self.short_amount,
'long_margin': self.long_margin,
'short_margin': self.short_margin,
'long_cost': self.long_cost,
'short_cost': self.short_cost,
'value': self.value,
'profit': self.profit,
'today_profit': self.today_profit,
'offset_profit': self.offset_profit
}
return redis_item
def to_dict(self):
"""
To dict
"""
return {
'symbol': self.symbol,
'price': self.price,
'long_amount': self.long_amount,
'long_cost': self.long_cost,
'long_margin': self.long_margin,
'short_amount': self.short_amount,
'short_cost': self.short_cost,
'short_margin': self.short_margin,
'value': self.value,
'profit': self.profit,
'today_profit': self.today_profit,
'offset_profit': self.offset_profit
}
def get(self, key, default=None):
"""
Get the value of a key with it's default to be appointed.
Args:
key(obj): the key of the dict
default(obj): the default value
Returns:
obj: the value
"""
return self.__dict__.get(key, default)
def __repr__(self):
return "{}(symbol: {}, price: {}, long_amount: {}, short_amount: {}, " \
"long_margin: {}, short_margin: {}," \
"long_cost: {}, short_cost: {}, profit: {})".format(self.__class__.__name__,
self.symbol,
self.price,
self.long_amount,
self.short_amount,
self.long_margin,
self.short_margin,
self.long_cost,
self.short_cost,
self.profit)
class FuturesPosition(LongShortPosition):
"""
Futures position.
"""
def __init__(self, symbol=None, price=None, long_amount=0, short_amount=0, long_margin=0, short_margin=0,
long_cost=0, short_cost=0, value=0, profit=0, today_long_open=0, today_short_open=0,
today_profit=0, offset_profit=0, pre_settlement_price=0, settlement_price=0,
margin_rate=0):
super(FuturesPosition, self).__init__(symbol, price=price, long_amount=long_amount,
short_amount=short_amount,
long_margin=long_margin,
short_margin=short_margin,
long_cost=long_cost,
short_cost=short_cost,
value=value,
profit=profit,
today_profit=today_profit,
offset_profit=offset_profit)
self.today_long_open = today_long_open
self.today_short_open = today_short_open
self.pre_settlement_price = pre_settlement_price
self.settlement_price = settlement_price
self.margin_rate = margin_rate
def calc_close_pnl(self, trade, multiplier):
"""
仅计算并返回平仓盈亏,不更新价格、amount及value
Args:
trade(PMSTrade): 成交记录
multiplier(float): 合约乘数
Returns(float): 平仓盈亏
"""
amount = self.long_amount if trade.direction == 1 else self.short_amount
if amount < trade.filled_amount:
raise ExceptionsFormat.INVALID_FILLED_AMOUNT.format(trade.filled_amount)
cost = self.long_cost if trade.direction == 1 else self.short_cost
close_pnl = trade.direction * (trade.transact_price - cost) * trade.filled_amount * multiplier
return close_pnl
@classmethod
def from_request(cls, request):
"""
Generate new FuturesPosition from request
Args:
request(dict): request data
"""
return cls(**request)
@classmethod
def from_query(cls, query_data):
"""
Recover existed FuturesPosition from query data
Args:
query_data(dict): query data
"""
position = cls(**query_data)
return position
@classmethod
def from_ctp(cls, position_response):
"""
Receive from ctp response.
Args:
position_response(obj): position response
"""
item = {
'symbol': position_response.instrument_id,
'price': position_response.settlement_price,
'profit': position_response.position_profit,
'pre_settlement_price': position_response.pre_settlement_price,
'settlement_price': position_response.settlement_price,
'margin_rate': position_response.margin_rate_by_money,
}
if position_response.position_direction == '2':
item['long_amount'] = position_response.position
item['long_margin'] = position_response.use_margin
item['long_cost'] = position_response.use_margin / position_response.position
elif position_response.position_direction == '3':
item['short_amount'] = position_response.position
item['short_margin'] = position_response.use_margin
item['short_cost'] = position_response.use_margin / position_response.position
return cls(**item)
def to_database_item(self):
"""
To redis item
"""
redis_item = {
'symbol': self.symbol,
'price': self.price,
'long_amount': self.long_amount,
'short_amount': self.short_amount,
'long_margin': self.long_margin,
'short_margin': self.short_margin,
'long_cost': self.long_cost,
'short_cost': self.short_cost,
'value': self.value,
'profit': self.profit,
'today_long_open': self.today_long_open,
'today_short_open': self.today_short_open,
'today_profit': self.today_profit,
'offset_profit': self.offset_profit
}
return redis_item
| 36.049123 | 111 | 0.539128 |
ace4dc4b249c7f995a7a5e1f554eade418f171a9 | 1,869 | py | Python | corehq/apps/api/resources/meta.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/api/resources/meta.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/api/resources/meta.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf import settings
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.throttle import CacheDBThrottle
from corehq.apps.api.resources.auth import LoginAndDomainAuthentication
from corehq.apps.api.serializers import CustomXMLSerializer
from corehq.toggles import API_THROTTLE_WHITELIST
class HQThrottle(CacheDBThrottle):
def should_be_throttled(self, identifier, **kwargs):
if API_THROTTLE_WHITELIST.enabled(identifier):
return False
return super(HQThrottle, self).should_be_throttled(identifier, **kwargs)
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Does everything the ``CacheThrottle`` class does, plus logs the
access within the database using the ``ApiAccess`` model.
"""
# Do the import here, instead of top-level, so that the model is
# only required when using this throttling mechanism.
from tastypie.models import ApiAccess
super(CacheDBThrottle, self).accessed(identifier, **kwargs)
# Write out the access to the DB for logging purposes.
url = kwargs.get('url', '')
if len(url) > 255:
url = url[:251] + '...'
ApiAccess.objects.create(
identifier=identifier,
url=url,
request_method=kwargs.get('request_method', '')
)
class CustomResourceMeta(object):
authorization = ReadOnlyAuthorization()
authentication = LoginAndDomainAuthentication()
serializer = CustomXMLSerializer()
default_format = 'application/json'
throttle = HQThrottle(
throttle_at=getattr(settings, 'CCHQ_API_THROTTLE_REQUESTS', 25),
timeframe=getattr(settings, 'CCHQ_API_THROTTLE_TIMEFRAME', 15)
)
| 36.647059 | 80 | 0.704655 |
ace4dc5b8ab6af0475b5cf2ce188b99c398d69f2 | 1,198 | py | Python | clients/kratos/python/test/test_ui_node_image_attributes.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 77 | 2020-02-14T17:27:36.000Z | 2022-03-25T08:44:52.000Z | clients/kratos/python/test/test_ui_node_image_attributes.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 125 | 2020-02-07T21:45:52.000Z | 2022-03-31T12:54:24.000Z | clients/kratos/python/test/test_ui_node_image_attributes.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 44 | 2020-01-31T22:05:47.000Z | 2022-03-09T14:41:22.000Z | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.8.2-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.ui_node_image_attributes import UiNodeImageAttributes
class TestUiNodeImageAttributes(unittest.TestCase):
"""UiNodeImageAttributes unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUiNodeImageAttributes(self):
"""Test UiNodeImageAttributes"""
# FIXME: construct object with mandatory attributes with example values
# model = UiNodeImageAttributes() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.378378 | 446 | 0.746244 |
ace4dc77c3254854fcdea4250fc95f8c28b78f85 | 989 | py | Python | StarNavi/users/forms.py | vlsh1n/StarNaviTestApp | 57662644ede3d3bb395354035ce7e2e5582d5cb5 | [
"MIT"
] | null | null | null | StarNavi/users/forms.py | vlsh1n/StarNaviTestApp | 57662644ede3d3bb395354035ce7e2e5582d5cb5 | [
"MIT"
] | null | null | null | StarNavi/users/forms.py | vlsh1n/StarNaviTestApp | 57662644ede3d3bb395354035ce7e2e5582d5cb5 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
class UserRegisterForm(UserCreationForm):
username = forms.CharField(label='Username', widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(label='Email', widget=forms.EmailInput(attrs={'class': 'form-control'}))
password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(label='Confirm password', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
class UserLoginForm(AuthenticationForm):
username = forms.CharField(label='Username', widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
| 47.095238 | 118 | 0.733064 |
ace4dc8627b742fdffe061219b81f50724fba217 | 8,750 | py | Python | src/idom/html.py | acivitillo/idom | 6af125ca340790fd2ff6ccce705e25802f0ee564 | [
"MIT"
] | null | null | null | src/idom/html.py | acivitillo/idom | 6af125ca340790fd2ff6ccce705e25802f0ee564 | [
"MIT"
] | null | null | null | src/idom/html.py | acivitillo/idom | 6af125ca340790fd2ff6ccce705e25802f0ee564 | [
"MIT"
] | null | null | null | """
**Fragment**
- :func:`_`
**Dcument metadata**
- :func:`base`
- :func:`head`
- :func:`link`
- :func:`meta`
- :func:`style`
- :func:`title`
**Content sectioning**
- :func:`body`
- :func:`address`
- :func:`article`
- :func:`aside`
- :func:`footer`
- :func:`header`
- :func:`h1`
- :func:`h2`
- :func:`h3`
- :func:`h4`
- :func:`h5`
- :func:`h6`
- :func:`main`
- :func:`nav`
- :func:`section`
**Text content**
- :func:`blockquote`
- :func:`dd`
- :func:`div`
- :func:`dl`
- :func:`dt`
- :func:`figcaption`
- :func:`figure`
- :func:`hr`
- :func:`li`
- :func:`ol`
- :func:`p`
- :func:`pre`
- :func:`ul`
**Inline text semantics**
- :func:`a`
- :func:`abbr`
- :func:`b`
- :func:`bdi`
- :func:`bdo`
- :func:`br`
- :func:`cite`
- :func:`code`
- :func:`data`
- :func:`em`
- :func:`i`
- :func:`kbd`
- :func:`mark`
- :func:`q`
- :func:`rp`
- :func:`rt`
- :func:`ruby`
- :func:`s`
- :func:`samp`
- :func:`small`
- :func:`span`
- :func:`strong`
- :func:`sub`
- :func:`sup`
- :func:`time`
- :func:`u`
- :func:`var`
- :func:`wbr`
**Image and video**
- :func:`area`
- :func:`audio`
- :func:`img`
- :func:`map`
- :func:`track`
- :func:`video`
**Embedded content**
- :func:`embed`
- :func:`iframe`
- :func:`object`
- :func:`param`
- :func:`picture`
- :func:`portal`
- :func:`source`
**SVG and MathML**
- :func:`svg`
- :func:`math`
**Scripting**
- :func:`canvas`
- :func:`noscript`
- :func:`script`
**Demarcating edits**
- :func:`del_`
- :func:`ins`
**Table content**
- :func:`caption`
- :func:`col`
- :func:`colgroup`
- :func:`table`
- :func:`tbody`
- :func:`td`
- :func:`tfoot`
- :func:`th`
- :func:`thead`
- :func:`tr`
**Forms**
- :func:`button`
- :func:`fieldset`
- :func:`form`
- :func:`input`
- :func:`label`
- :func:`legend`
- :func:`meter`
- :func:`option`
- :func:`output`
- :func:`progress`
- :func:`select`
- :func:`textarea`
**Interactive elements**
- :func:`details`
- :func:`dialog`
- :func:`menu`
- :func:`menuitem`
- :func:`summary`
**Web components**
- :func:`slot`
- :func:`template`
.. autofunction:: _
"""
from __future__ import annotations
from typing import Any, Mapping
from .core.proto import VdomDict
from .core.vdom import coalesce_attributes_and_children, make_vdom_constructor
def _(*children: Any) -> VdomDict:
"""An HTML fragment - this element will not appear in the DOM"""
attributes, coalesced_children = coalesce_attributes_and_children(children)
if attributes:
raise TypeError("Fragments cannot have attributes")
return {"tagName": "", "children": coalesced_children}
# Dcument metadata
base = make_vdom_constructor("base")
head = make_vdom_constructor("head")
link = make_vdom_constructor("link")
meta = make_vdom_constructor("meta")
style = make_vdom_constructor("style")
title = make_vdom_constructor("title")
# Content sectioning
body = make_vdom_constructor("body")
address = make_vdom_constructor("address")
article = make_vdom_constructor("article")
aside = make_vdom_constructor("aside")
footer = make_vdom_constructor("footer")
header = make_vdom_constructor("header")
h1 = make_vdom_constructor("h1")
h2 = make_vdom_constructor("h2")
h3 = make_vdom_constructor("h3")
h4 = make_vdom_constructor("h4")
h5 = make_vdom_constructor("h5")
h6 = make_vdom_constructor("h6")
main = make_vdom_constructor("main")
nav = make_vdom_constructor("nav")
section = make_vdom_constructor("section")
# Text content
blockquote = make_vdom_constructor("blockquote")
dd = make_vdom_constructor("dd")
div = make_vdom_constructor("div")
dl = make_vdom_constructor("dl")
dt = make_vdom_constructor("dt")
figcaption = make_vdom_constructor("figcaption")
figure = make_vdom_constructor("figure")
hr = make_vdom_constructor("hr", allow_children=False)
li = make_vdom_constructor("li")
ol = make_vdom_constructor("ol")
p = make_vdom_constructor("p")
pre = make_vdom_constructor("pre")
ul = make_vdom_constructor("ul")
# Inline text semantics
a = make_vdom_constructor("a")
abbr = make_vdom_constructor("abbr")
b = make_vdom_constructor("b")
bdi = make_vdom_constructor("bdi")
bdo = make_vdom_constructor("bdo")
br = make_vdom_constructor("br", allow_children=False)
cite = make_vdom_constructor("cite")
code = make_vdom_constructor("code")
data = make_vdom_constructor("data")
em = make_vdom_constructor("em")
i = make_vdom_constructor("i")
kbd = make_vdom_constructor("kbd")
mark = make_vdom_constructor("mark")
q = make_vdom_constructor("q")
rp = make_vdom_constructor("rp")
rt = make_vdom_constructor("rt")
ruby = make_vdom_constructor("ruby")
s = make_vdom_constructor("s")
samp = make_vdom_constructor("samp")
small = make_vdom_constructor("small")
span = make_vdom_constructor("span")
strong = make_vdom_constructor("strong")
sub = make_vdom_constructor("sub")
sup = make_vdom_constructor("sup")
time = make_vdom_constructor("time")
u = make_vdom_constructor("u")
var = make_vdom_constructor("var")
wbr = make_vdom_constructor("wbr")
# Image and video
area = make_vdom_constructor("area", allow_children=False)
audio = make_vdom_constructor("audio")
img = make_vdom_constructor("img", allow_children=False)
map = make_vdom_constructor("map")
track = make_vdom_constructor("track")
video = make_vdom_constructor("video")
# Embedded content
embed = make_vdom_constructor("embed", allow_children=False)
iframe = make_vdom_constructor("iframe", allow_children=False)
object = make_vdom_constructor("object")
param = make_vdom_constructor("param")
picture = make_vdom_constructor("picture")
portal = make_vdom_constructor("portal", allow_children=False)
source = make_vdom_constructor("source", allow_children=False)
# SVG and MathML
svg = make_vdom_constructor("svg")
math = make_vdom_constructor("math")
# Scripting
canvas = make_vdom_constructor("canvas")
noscript = make_vdom_constructor("noscript")
def script(
*attributes_and_children: Mapping[str, Any] | str,
key: str | int | None = None,
) -> VdomDict:
"""Create a new `<{script}> <https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script>`__ element.
This behaves slightly differently than a normal script element in that it may be run
multiple times if its key changes (depending on specific browser behaviors). If no
key is given, the key is inferred to be the content of the script or, lastly its
'src' attribute if that is given.
If no attributes are given, the content of the script may evaluate to a function.
This function will be called when the script is initially created or when the
content of the script changes. The function may itself optionally return a teardown
function that is called when the script element is removed from the tree, or when
the script content changes.
"""
model: VdomDict = {"tagName": "script"}
attributes, children = coalesce_attributes_and_children(attributes_and_children)
if children:
if len(children) > 1:
raise ValueError("'script' nodes may have, at most, one child.")
elif not isinstance(children[0], str):
raise ValueError("The child of a 'script' must be a string.")
else:
model["children"] = children
if key is None:
key = children[0]
if attributes:
model["attributes"] = attributes
if key is None and not children and "src" in attributes:
key = attributes["src"]
if key is not None:
model["key"] = key
return model
# Demarcating edits
del_ = make_vdom_constructor("del")
ins = make_vdom_constructor("ins")
# Table content
caption = make_vdom_constructor("caption")
col = make_vdom_constructor("col")
colgroup = make_vdom_constructor("colgroup")
table = make_vdom_constructor("table")
tbody = make_vdom_constructor("tbody")
td = make_vdom_constructor("td")
tfoot = make_vdom_constructor("tfoot")
th = make_vdom_constructor("th")
thead = make_vdom_constructor("thead")
tr = make_vdom_constructor("tr")
# Forms
button = make_vdom_constructor("button")
fieldset = make_vdom_constructor("fieldset")
form = make_vdom_constructor("form")
input = make_vdom_constructor("input", allow_children=False)
label = make_vdom_constructor("label")
legend = make_vdom_constructor("legend")
meter = make_vdom_constructor("meter")
option = make_vdom_constructor("option")
output = make_vdom_constructor("output")
progress = make_vdom_constructor("progress")
select = make_vdom_constructor("select")
textarea = make_vdom_constructor("textarea")
# Interactive elements
details = make_vdom_constructor("details")
dialog = make_vdom_constructor("dialog")
menu = make_vdom_constructor("menu")
menuitem = make_vdom_constructor("menuitem")
summary = make_vdom_constructor("summary")
# Web components
slot = make_vdom_constructor("slot")
template = make_vdom_constructor("template")
| 24.787535 | 110 | 0.707771 |
ace4dc92b9335452a1117cdd4736227359f652a1 | 1,307 | py | Python | tests/1_local/test_shell.py | brianfunk3/cloudmesh-cloud | 1fa03625ddbc97d631440349f6eaac8cd02a5716 | [
"Apache-2.0"
] | null | null | null | tests/1_local/test_shell.py | brianfunk3/cloudmesh-cloud | 1fa03625ddbc97d631440349f6eaac8cd02a5716 | [
"Apache-2.0"
] | null | null | null | tests/1_local/test_shell.py | brianfunk3/cloudmesh-cloud | 1fa03625ddbc97d631440349f6eaac8cd02a5716 | [
"Apache-2.0"
] | null | null | null | ###############################################################
# pytest -v --capture=no tests/1_local/test_shell.py
# pytest -v tests/1_local/test_shell.py
# pytest -v --capture=no tests/1_local/test_shell.py:Test_name.<METHODNAME>
###############################################################
import pytest
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.common.Shell import Shell
from cloudmesh.common.util import HEADING
Benchmark.debug()
shell = Shell()
@pytest.mark.incremental
class TestName:
def test_terminal_type(self):
HEADING()
print(shell.terminal_type())
def test_pwd(self):
HEADING()
Benchmark.Start()
r = Shell.execute('pwd')
Benchmark.Stop()
print(r)
def test_ls_la_list(self):
HEADING()
Benchmark.Start()
r = Shell.execute('ls', ["-l", "-a"])
Benchmark.Stop()
print(r)
def test_ls_la_string(self):
HEADING()
Benchmark.Start()
r = Shell.execute('ls', "-l -a")
Benchmark.Stop()
print(r)
def test_ls(self):
HEADING()
Benchmark.Start()
r = Shell.ls(".", "*")
Benchmark.Stop()
print(r)
def test_benchmark(self):
HEADING()
Benchmark.print(csv=True)
| 24.203704 | 76 | 0.543994 |
ace4dd37a3048d47703b8d366ed1903380b68f94 | 5,043 | py | Python | src/python/pants/backend/jvm/tasks/coursier/coursier_subsystem.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/coursier/coursier_subsystem.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/coursier/coursier_subsystem.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import logging
import os
from pants.base.build_environment import get_buildroot, get_pants_cachedir
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.net.http.fetcher import Fetcher
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_concurrent_creation
logger = logging.getLogger(__name__)
class CoursierSubsystem(Subsystem):
"""Common configuration items for coursier tasks.
:API: public
"""
options_scope = 'coursier'
class Error(Exception):
"""Indicates an error bootstrapping coursier."""
@classmethod
def register_options(cls, register):
super(CoursierSubsystem, cls).register_options(register)
register('--cache-dir', type=str, fingerprint=True,
default=os.path.join(get_pants_cachedir(), 'coursier'),
help='Version paired with --bootstrap-jar-url, in order to invalidate and fetch the new version.')
register('--repos', type=list, fingerprint=True,
help='Maven style repos', default=['https://repo1.maven.org/maven2'])
register('--fetch-options', type=list, fingerprint=True,
default=[
# Quiet mode, so coursier does not show resolve progress,
# but still prints results if --report is specified.
'-q',
# Do not use default public maven repo.
'--no-default',
# Concurrent workers
'-n', '8',
],
help='Additional options to pass to coursier fetch. See `coursier fetch --help`')
register('--artifact-types', type=list, fingerprint=True,
default=['jar', 'bundle', 'test-jar', 'maven-plugin', 'src', 'doc', 'aar'],
help='Specify the type of artifacts to fetch. See `packaging` at https://maven.apache.org/pom.html#Maven_Coordinates, '
'except `src` and `doc` being coursier specific terms for sources and javadoc.')
register('--bootstrap-jar-url', fingerprint=True,
default='https://dl.dropboxusercontent.com/s/zwh074l9kxhqlwp/coursier-cli-1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417.jar?dl=0',
help='Location to download a bootstrap version of Coursier.')
# TODO(wisechengyi): currently using a custom url for fast iteration.
# Once the coursier builds are stable, move the logic to binary_util. https://github.com/pantsbuild/pants/issues/5381
# Ths sha in the version corresponds to the sha in the PR https://github.com/coursier/coursier/pull/774
# The jar is built by following https://github.com/coursier/coursier/blob/master/DEVELOPMENT.md#build-with-pants
register('--version', type=str, fingerprint=True,
default='1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417',
help='Version paired with --bootstrap-jar-url, in order to invalidate and fetch the new version.')
register('--bootstrap-fetch-timeout-secs', type=int, advanced=True, default=10,
help='Timeout the fetch if the connection is idle for longer than this value.')
@classmethod
def subsystem_dependencies(cls):
return super(CoursierSubsystem, cls).subsystem_dependencies() + (DistributionLocator,)
def bootstrap_coursier(self, workunit_factory):
opts = self.get_options()
bootstrap_url = opts.bootstrap_jar_url
coursier_bootstrap_dir = os.path.join(opts.pants_bootstrapdir,
'tools', 'jvm', 'coursier',
opts.version)
bootstrap_jar_path = os.path.join(coursier_bootstrap_dir, 'coursier.jar')
with workunit_factory(name='bootstrap-coursier', labels=[WorkUnitLabel.TOOL]) as workunit:
if not os.path.exists(bootstrap_jar_path):
with safe_concurrent_creation(bootstrap_jar_path) as temp_path:
fetcher = Fetcher(get_buildroot())
checksummer = fetcher.ChecksumListener(digest=hashlib.sha1())
try:
logger.info('\nDownloading {}'.format(bootstrap_url))
# TODO: Capture the stdout of the fetcher, instead of letting it output
# to the console directly.
fetcher.download(bootstrap_url,
listener=fetcher.ProgressListener().wrap(checksummer),
path_or_fd=temp_path,
timeout_secs=opts.bootstrap_fetch_timeout_secs)
logger.info('sha1: {}'.format(checksummer.checksum))
except fetcher.Error as e:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.Error('Problem fetching the coursier bootstrap jar! {}'.format(e))
else:
workunit.set_outcome(WorkUnit.SUCCESS)
return bootstrap_jar_path
| 48.028571 | 144 | 0.675987 |
ace4dd3d81a34e4ee8702f47a516f73d7d1bd025 | 141,681 | py | Python | vm-data/vm4-client/scapy-2.3.2/scapy/contrib/openflow3.py | cjlovering/Aminon | e32f9dfa9649ed9520ecabcb4379a0f409983b8c | [
"MIT"
] | null | null | null | vm-data/vm4-client/scapy-2.3.2/scapy/contrib/openflow3.py | cjlovering/Aminon | e32f9dfa9649ed9520ecabcb4379a0f409983b8c | [
"MIT"
] | null | null | null | vm-data/vm4-client/scapy-2.3.2/scapy/contrib/openflow3.py | cjlovering/Aminon | e32f9dfa9649ed9520ecabcb4379a0f409983b8c | [
"MIT"
] | null | null | null | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more information
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
## Copyright (C) 2014 Maxence Tury <maxence.tury@ssi.gouv.fr>
## OpenFlow is an open standard used in SDN deployments.
## Based on OpenFlow v1.3.4
## Specifications can be retrieved from https://www.opennetworking.org/
# scapy.contrib.description = openflow v1.3
# scapy.contrib.status = loads
import struct
from scapy.all import *
### If prereq_autocomplete is True then match prerequisites will be
### automatically handled. See OFPMatch class.
prereq_autocomplete = False
#####################################################
################# Predefined values #################
#####################################################
ofp_port_no = { 0xfffffff8: "IN_PORT",
0xfffffff9: "TABLE",
0xfffffffa: "NORMAL",
0xfffffffb: "FLOOD",
0xfffffffc: "ALL",
0xfffffffd: "CONTROLLER",
0xfffffffe: "LOCAL",
0xffffffff: "ANY" }
ofp_group = { 0xffffff00: "MAX",
0xfffffffc: "ALL",
0xffffffff: "ANY" }
ofp_table = { 0xfe: "MAX",
0xff: "ALL" }
ofp_queue = { 0xffffffff: "ALL" }
ofp_meter = { 0xffff0000: "MAX",
0xfffffffd: "SLOWPATH",
0xfffffffe: "CONTROLLER",
0xffffffff: "ALL" }
ofp_buffer = { 0xffffffff: "NO_BUFFER" }
ofp_max_len = { 0xffff: "NO_BUFFER" }
#####################################################
################# Common structures #################
#####################################################
### The following structures will be used in different types
### of OpenFlow messages: ports, matches/OXMs, actions,
### instructions, buckets, queues, meter bands.
################## Hello elements ###################
class _ofp_hello_elem_header(Packet):
name = "Dummy OpenFlow Hello Elem Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_hello_elem_types = { 1: "OFPHET_VERSIONBITMAP" }
class OFPHETVersionBitmap(_ofp_hello_elem_header):
name = "OFPHET_VERSIONBITMAP"
fields_desc = [ ShortEnumField("type", 1, ofp_hello_elem_types),
ShortField("len", 8),
FlagsField("bitmap", 0, 32, [ "Type 0",
"OFv1.0",
"OFv1.1",
"OFv1.2",
"OFv1.3",
"OFv1.4" ]) ]
ofp_hello_elem_cls = { 1: OFPHETVersionBitmap }
class HelloElemPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_hello_elem_cls.get(t, Raw)(s)
@staticmethod
def _get_hello_elem_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = HelloElemPacketListField._get_hello_elem_length(remain)
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
####################### Ports #######################
ofp_port_config = [ "PORT_DOWN",
"NO_STP", # undefined in v1.3
"NO_RECV",
"NO_RECV_STP", # undefined in v1.3
"NO_FLOOD", # undefined in v1.3
"NO_FWD",
"NO_PACKET_IN" ]
ofp_port_state = [ "LINK_DOWN",
"BLOCKED",
"LIVE" ]
ofp_port_features = [ "10MB_HD",
"10MB_FD",
"100MB_HD",
"100MB_FD",
"1GB_HD",
"1GB_FD",
"10GB_FD",
"40GB_FD",
"100GB_FD",
"1TB_FD",
"OTHER",
"COPPER",
"FIBER",
"AUTONEG",
"PAUSE",
"PAUSE_ASYM" ]
class OFPPort(Packet):
name = "OFP_PHY_PORT"
fields_desc = [ IntEnumField("port_no", 0, ofp_port_no),
XIntField("pad1", 0),
MACField("hw_addr", "0"),
XShortField("pad2", 0),
StrFixedLenField("port_name", "", 16),
FlagsField("config", 0, 32, ofp_port_config),
FlagsField("state", 0, 32, ofp_port_state),
FlagsField("curr", 0, 32, ofp_port_features),
FlagsField("advertised", 0, 32, ofp_port_features),
FlagsField("supported", 0, 32, ofp_port_features),
FlagsField("peer", 0, 32, ofp_port_features),
IntField("curr_speed", 0),
IntField("max_speed", 0) ]
def extract_padding(self, s):
return "", s
# extract_padding is overridden in order for s not to be considered
# as belonging to the same layer (s usually contains other OFPPorts)
################### Matches & OXMs ##################
ofp_oxm_classes = { 0: "OFPXMC_NXM_0",
1: "OFPXMC_NXM_1",
0x8000: "OFPXMC_OPENFLOW_BASIC",
0xffff: "OFPXMC_EXPERIMENTER" }
ofp_oxm_names = { 0: "OFB_IN_PORT",
1: "OFB_IN_PHY_PORT",
2: "OFB_METADATA",
3: "OFB_ETH_DST",
4: "OFB_ETH_SRC",
5: "OFB_ETH_TYPE",
6: "OFB_VLAN_VID",
7: "OFB_VLAN_PCP",
8: "OFB_IP_DSCP",
9: "OFB_IP_ECN",
10: "OFB_IP_PROTO",
11: "OFB_IPV4_SRC",
12: "OFB_IPV4_DST",
13: "OFB_TCP_SRC",
14: "OFB_TCP_DST",
15: "OFB_UDP_SRC",
16: "OFB_UDP_DST",
17: "OFB_SCTP_SRC",
18: "OFB_SCTP_DST",
19: "OFB_ICMPV4_TYPE",
20: "OFB_ICMPV4_CODE",
21: "OFB_ARP_OP",
22: "OFB_ARP_SPA",
23: "OFB_ARP_TPA",
24: "OFB_ARP_SHA",
25: "OFB_ARP_THA",
26: "OFB_IPV6_SRC",
27: "OFB_IPV6_DST",
28: "OFB_IPV6_FLABEL",
29: "OFB_ICMPV6_TYPE",
30: "OFB_ICMPV6_CODE",
31: "OFB_IPV6_ND_TARGET",
32: "OFB_IPV6_ND_SLL",
33: "OFB_IPV6_ND_TLL",
34: "OFB_MPLS_LABEL",
35: "OFB_MPLS_TC",
36: "OFB_MPLS_BOS",
37: "OFB_PBB_ISID",
38: "OFB_TUNNEL_ID",
39: "OFB_IPV6_EXTHDR" }
ofp_oxm_constr = { 0: ["OFBInPort", "in_port", 4],
1: ["OFBInPhyPort", "in_phy_port", 4],
2: ["OFBMetadata", "metadata", 8],
3: ["OFBEthDst", "eth_dst", 6],
4: ["OFBEthSrc", "eth_src", 6],
5: ["OFBEthType", "eth_type", 2],
6: ["OFBVLANVID", "vlan_vid", 2],
7: ["OFBVLANPCP", "vlan_pcp", 1],
8: ["OFBIPDSCP", "ip_dscp", 1],
9: ["OFBIPECN", "ip_ecn", 1],
10: ["OFBIPProto", "ip_proto", 1],
11: ["OFBIPv4Src", "ipv4_src", 4],
12: ["OFBIPv4Dst", "ipv4_dst", 4],
13: ["OFBTCPSrc", "tcp_src", 2],
14: ["OFBTCPDst", "tcp_dst", 2],
15: ["OFBUDPSrc", "udp_src", 2],
16: ["OFBUDPDst", "udp_dst", 2],
17: ["OFBSCTPSrc", "sctp_src", 2],
18: ["OFBSCTPDst", "sctp_dst", 2],
19: ["OFBICMPv4Type", "icmpv4_type", 1],
20: ["OFBICMPv4Code", "icmpv4_code", 1],
21: ["OFBARPOP", "arp_op", 2],
22: ["OFBARPSPA", "arp_spa", 4],
23: ["OFBARPTPA", "arp_tpa", 4],
24: ["OFBARPSHA", "arp_sha", 6],
25: ["OFBARPTHA", "arp_tha", 6],
26: ["OFBIPv6Src", "ipv6_src", 16],
27: ["OFBIPv6Dst", "ipv6_dst", 16],
28: ["OFBIPv6FLabel", "ipv6_flabel", 4],
29: ["OFBICMPv6Type", "icmpv6_type", 1],
30: ["OFBICMPv6Code", "icmpv6_code", 1],
31: ["OFBIPv6NDTarget", "ipv6_nd_target", 16],
32: ["OFBIPv6NDSLL", "ipv6_sll", 6],
33: ["OFBIPv6NDTLL", "ipv6_tll", 6],
34: ["OFBMPLSLabel", "mpls_label", 4],
35: ["OFBMPLSTC", "mpls_tc", 1],
36: ["OFBMPLSBoS", "mpls_bos", 1],
37: ["OFBPBBISID", "pbb_isid", 3],
38: ["OFBTunnelID", "tunnel_id", 8],
39: ["OFBIPv6ExtHdr", "ipv6_ext_hdr_flags", 2] }
# the ipv6flags array is useful only to the OFBIPv6ExtHdr class
ipv6flags = [ "NONEXT",
"ESP",
"AUTH",
"DEST",
"FRAG",
"ROUTER",
"HOP",
"UNREP",
"UNSEQ" ]
### here we fill ofp_oxm_fields with the fields that will be used
### to generate the various OXM classes
### e.g. the call to add_ofp_oxm_fields(0, ["OFBInPort", "in_port", 4])
### will add {0: [ShortEnumField("class",..), BitEnumField("field",..),..]}
ofp_oxm_fields = {}
def add_ofp_oxm_fields(i, org):
ofp_oxm_fields[i] = [ ShortEnumField("class", "OFPXMC_OPENFLOW_BASIC", ofp_oxm_classes),
BitEnumField("field", i/2, 7, ofp_oxm_names),
BitField("hasmask", i%2, 1) ]
ofp_oxm_fields[i].append(ByteField("length", org[2]+org[2]*(i%2)))
if i/2 == 0: # OFBInPort
ofp_oxm_fields[i].append(IntEnumField(org[1], 0, ofp_port_no))
elif i/2 == 3 or i/2 == 4: # OFBEthSrc & OFBEthDst
ofp_oxm_fields[i].append(MACField(org[1], None))
elif i/2 == 11 or i/2 == 12: # OFBIPv4Src & OFBIPv4Dst
ofp_oxm_fields[i].append(IPField(org[1], "0"))
elif i/2 == 39: # OFBIPv6ExtHdr
ofp_oxm_fields[i].append(FlagsField(org[1], 0, 8*org[2], ipv6flags))
else:
ofp_oxm_fields[i].append(BitField(org[1], 0, 8*org[2]))
if i%2:
ofp_oxm_fields[i].append(BitField(org[1]+"_mask", 0, 8*org[2]))
# some HM classes are not supported par OFv1.3 but we will create them anyway
for i,cls in ofp_oxm_constr.items():
add_ofp_oxm_fields(2*i, cls)
add_ofp_oxm_fields(2*i+1, cls)
### now we create every OXM class with the same call,
### (except that static variable create_oxm_class.i is each time different)
### and we fill ofp_oxm_cls with them
ofp_oxm_cls = {}
ofp_oxm_id_cls = {}
def create_oxm_cls():
# static variable initialization
if not hasattr(create_oxm_cls, "i"):
create_oxm_cls.i = 0
index = create_oxm_cls.i
cls_name = ofp_oxm_constr[index/4][0]
# we create standard OXM then OXM ID then OXM with mask then OXM-hasmask ID
if index % 4 == 2:
cls_name += "HM"
if index % 2:
cls_name += "ID"
oxm_name = ofp_oxm_names[index/4]
oxm_fields = ofp_oxm_fields[index/2]
# for ID classes we just want the first 4 fields (no payload)
if index % 2:
oxm_fields = oxm_fields[:4]
cls = type(cls_name, (Packet,), { "name": oxm_name, "fields_desc": oxm_fields })
### the first call to special function type will create the same class as in
### class OFBInPort(Packet):
### def __init__(self):
### self.name = "OFB_IN_PORT"
### self.fields_desc = [ ShortEnumField("class", 0x8000, ofp_oxm_classes),
### BitEnumField("field", 0, 7, ofp_oxm_names),
### BitField("hasmask", 0, 1),
### ByteField("length", 4),
### IntEnumField("in_port", 0, ofp_port_no) ]
if index % 2 == 0:
ofp_oxm_cls[index/2] = cls
else:
ofp_oxm_id_cls[index/2] = cls
create_oxm_cls.i += 1
return cls
OFBInPort = create_oxm_cls()
OFBInPortID = create_oxm_cls()
OFBInPortHM = create_oxm_cls()
OFBInPortHMID = create_oxm_cls()
OFBInPhyPort = create_oxm_cls()
OFBInPhyPortID = create_oxm_cls()
OFBInPhyPortHM = create_oxm_cls()
OFBInPhyPortHMID = create_oxm_cls()
OFBMetadata = create_oxm_cls()
OFBMetadataID = create_oxm_cls()
OFBMetadataHM = create_oxm_cls()
OFBMetadataHMID = create_oxm_cls()
OFBEthDst = create_oxm_cls()
OFBEthDstID = create_oxm_cls()
OFBEthDstHM = create_oxm_cls()
OFBEthDstHMID = create_oxm_cls()
OFBEthSrc = create_oxm_cls()
OFBEthSrcID = create_oxm_cls()
OFBEthSrcHM = create_oxm_cls()
OFBEthSrcHMID = create_oxm_cls()
OFBEthType = create_oxm_cls()
OFBEthTypeID = create_oxm_cls()
OFBEthTypeHM = create_oxm_cls()
OFBEthTypeHMID = create_oxm_cls()
OFBVLANVID = create_oxm_cls()
OFBVLANVIDID = create_oxm_cls()
OFBVLANVIDHM = create_oxm_cls()
OFBVLANVIDHMID = create_oxm_cls()
OFBVLANPCP = create_oxm_cls()
OFBVLANPCPID = create_oxm_cls()
OFBVLANPCPHM = create_oxm_cls()
OFBVLANPCPHMID = create_oxm_cls()
OFBIPDSCP = create_oxm_cls()
OFBIPDSCPID = create_oxm_cls()
OFBIPDSCPHM = create_oxm_cls()
OFBIPDSCPHMID = create_oxm_cls()
OFBIPECN = create_oxm_cls()
OFBIPECNID = create_oxm_cls()
OFBIPECNHM = create_oxm_cls()
OFBIPECNHMID = create_oxm_cls()
OFBIPProto = create_oxm_cls()
OFBIPProtoID = create_oxm_cls()
OFBIPProtoHM = create_oxm_cls()
OFBIPProtoHMID = create_oxm_cls()
OFBIPv4Src = create_oxm_cls()
OFBIPv4SrcID = create_oxm_cls()
OFBIPv4SrcHM = create_oxm_cls()
OFBIPv4SrcHMID = create_oxm_cls()
OFBIPv4Dst = create_oxm_cls()
OFBIPv4DstID = create_oxm_cls()
OFBIPv4DstHM = create_oxm_cls()
OFBIPv4DstHMID = create_oxm_cls()
OFBTCPSrc = create_oxm_cls()
OFBTCPSrcID = create_oxm_cls()
OFBTCPSrcHM = create_oxm_cls()
OFBTCPSrcHMID = create_oxm_cls()
OFBTCPDst = create_oxm_cls()
OFBTCPDstID = create_oxm_cls()
OFBTCPDstHM = create_oxm_cls()
OFBTCPDstHMID = create_oxm_cls()
OFBUDPSrc = create_oxm_cls()
OFBUDPSrcID = create_oxm_cls()
OFBUDPSrcHM = create_oxm_cls()
OFBUDPSrcHMID = create_oxm_cls()
OFBUDPDst = create_oxm_cls()
OFBUDPDstID = create_oxm_cls()
OFBUDPDstHM = create_oxm_cls()
OFBUDPDstHMID = create_oxm_cls()
OFBSCTPSrc = create_oxm_cls()
OFBSCTPSrcID = create_oxm_cls()
OFBSCTPSrcHM = create_oxm_cls()
OFBSCTPSrcHMID = create_oxm_cls()
OFBSCTPDst = create_oxm_cls()
OFBSCTPDstID = create_oxm_cls()
OFBSCTPDstHM = create_oxm_cls()
OFBSCTPDstHMID = create_oxm_cls()
OFBICMPv4Type = create_oxm_cls()
OFBICMPv4TypeID = create_oxm_cls()
OFBICMPv4TypeHM = create_oxm_cls()
OFBICMPv4TypeHMID = create_oxm_cls()
OFBICMPv4Code = create_oxm_cls()
OFBICMPv4CodeID = create_oxm_cls()
OFBICMPv4CodeHM = create_oxm_cls()
OFBICMPv4CodeHMID = create_oxm_cls()
OFBARPOP = create_oxm_cls()
OFBARPOPID = create_oxm_cls()
OFBARPOPHM = create_oxm_cls()
OFBARPOPHMID = create_oxm_cls()
OFBARPSPA = create_oxm_cls()
OFBARPSPAID = create_oxm_cls()
OFBARPSPAHM = create_oxm_cls()
OFBARPSPAHMID = create_oxm_cls()
OFBARPTPA = create_oxm_cls()
OFBARPTPAID = create_oxm_cls()
OFBARPTPAHM = create_oxm_cls()
OFBARPTPAHMID = create_oxm_cls()
OFBARPSHA = create_oxm_cls()
OFBARPSHAID = create_oxm_cls()
OFBARPSHAHM = create_oxm_cls()
OFBARPSHAHMID = create_oxm_cls()
OFBARPTHA = create_oxm_cls()
OFBARPTHAID = create_oxm_cls()
OFBARPTHAHM = create_oxm_cls()
OFBARPTHAHMID = create_oxm_cls()
OFBIPv6Src = create_oxm_cls()
OFBIPv6SrcID = create_oxm_cls()
OFBIPv6SrcHM = create_oxm_cls()
OFBIPv6SrcHMID = create_oxm_cls()
OFBIPv6Dst = create_oxm_cls()
OFBIPv6DstID = create_oxm_cls()
OFBIPv6DstHM = create_oxm_cls()
OFBIPv6DstHMID = create_oxm_cls()
OFBIPv6FLabel = create_oxm_cls()
OFBIPv6FLabelID = create_oxm_cls()
OFBIPv6FLabelHM = create_oxm_cls()
OFBIPv6FLabelHMID = create_oxm_cls()
OFBICMPv6Type = create_oxm_cls()
OFBICMPv6TypeID = create_oxm_cls()
OFBICMPv6TypeHM = create_oxm_cls()
OFBICMPv6TypeHMID = create_oxm_cls()
OFBICMPv6Code = create_oxm_cls()
OFBICMPv6CodeID = create_oxm_cls()
OFBICMPv6CodeHM = create_oxm_cls()
OFBICMPv6CodeHMID = create_oxm_cls()
OFBIPv6NDTarget = create_oxm_cls()
OFBIPv6NDTargetID = create_oxm_cls()
OFBIPv6NDTargetHM = create_oxm_cls()
OFBIPv6NDTargetHMID = create_oxm_cls()
OFBIPv6NDSLL = create_oxm_cls()
OFBIPv6NDSLLID = create_oxm_cls()
OFBIPv6NDSLLHM = create_oxm_cls()
OFBIPv6NDSLLHMID = create_oxm_cls()
OFBIPv6NDTLL = create_oxm_cls()
OFBIPv6NDTLLID = create_oxm_cls()
OFBIPv6NDTLLHM = create_oxm_cls()
OFBIPv6NDTLLHMID = create_oxm_cls()
OFBMPLSLabel = create_oxm_cls()
OFBMPLSLabelID = create_oxm_cls()
OFBMPLSLabelHM = create_oxm_cls()
OFBMPLSLabelHMID = create_oxm_cls()
OFBMPLSTC = create_oxm_cls()
OFBMPLSTCID = create_oxm_cls()
OFBMPLSTCHM = create_oxm_cls()
OFBMPLSTCHMID = create_oxm_cls()
OFBMPLSBoS = create_oxm_cls()
OFBMPLSBoSID = create_oxm_cls()
OFBMPLSBoSHM = create_oxm_cls()
OFBMPLSBoSHMID = create_oxm_cls()
OFBPBBISID = create_oxm_cls()
OFBPBBISIDID = create_oxm_cls()
OFBPBBISIDHM = create_oxm_cls()
OFBPBBISIDHMID = create_oxm_cls()
OFBTunnelID = create_oxm_cls()
OFBTunnelIDID = create_oxm_cls()
OFBTunnelIDHM = create_oxm_cls()
OFBTunnelIDHMID = create_oxm_cls()
OFBIPv6ExtHdr = create_oxm_cls()
OFBIPv6ExtHdrID = create_oxm_cls()
OFBIPv6ExtHdrHM = create_oxm_cls()
OFBIPv6ExtHdrHMID = create_oxm_cls()
### need_prereq holds a list of prerequisites defined in 7.2.3.8 of the specifications
### e.g. if you want to use an OFBTCPSrc instance (code 26)
### you first need to declare an OFBIPProto instance (code 20) with value 6,
### and if you want to use an OFBIPProto instance (still code 20)
### you first need to declare an OFBEthType instance (code 10) with value 0x0800
### (0x0800 means IPv4 by default, but you might want to use 0x86dd with IPv6)
### need_prereq codes are two times higher than previous oxm classes codes,
### except for 21 which is sort of a proxy for IPv6 (see below)
need_prereq = { 14: [12, 0x1000],
16: [10, 0x0800], # could be 0x86dd
18: [10, 0x0800], # could be 0x86dd
20: [10, 0x0800], # could be 0x86dd
21: [10, 0x86dd],
22: [10, 0x0800],
24: [10, 0x0800],
26: [20, 6],
28: [20, 6],
30: [20, 17],
32: [20, 17],
34: [20, 132],
36: [20, 132],
38: [20, 1],
40: [20, 1],
42: [10, 0x0806],
44: [10, 0x0806],
46: [10, 0x0806],
48: [10, 0x0806],
50: [10, 0x0806],
52: [10, 0x86dd],
54: [10, 0x86dd],
56: [10, 0x86dd],
58: [21, 58], ### small trick here, we refer to normally non-
60: [21, 58], ### existent field 21 to distinguish ipv6
62: [58, 135], # could be 136
64: [58, 135],
66: [58, 136],
68: [10, 0x8847], # could be 0x8848
70: [10, 0x8847], # could be 0x8848
72: [10, 0x8847], # could be 0x8848
74: [10, 0x88e7],
78: [10, 0x86dd] }
class OXMPacketListField(PacketListField):
def __init__(self, name, default, cls, length_from=None, autocomplete=prereq_autocomplete):
PacketListField.__init__(self, name, default, cls, length_from=length_from)
self.autocomplete = autocomplete
self.index = []
def i2m(self, pkt, val):
### this part makes for a faster writing of specs-compliant matches
### expect some unwanted behaviour if you try incoherent associations
### you might want to set autocomplete=False in __init__ method
if self.autocomplete:
# val might be modified during the loop so we need a fixed copy
fix_val = copy.deepcopy(val)
for oxm in fix_val:
f = 2*oxm.field
fix_index = list(self.index)
while f in need_prereq.keys():
# this loop enables a small recursion
# e.g. ipv6_nd<--icmpv6<--ip_proto<--eth_type
prereq = need_prereq[f]
f = prereq[0]
f2 = 20 if f == 21 else f # ipv6 trick...
if f2 not in fix_index:
self.index.insert(0, f2)
prrq = ofp_oxm_cls[f2]() # never HM
setattr(prrq, ofp_oxm_constr[f2/2][1], prereq[1])
val.insert(0, prrq)
# we could do more complicated stuff to
# make sure prerequisite order is correct
# but it works well when presented with any coherent input
# e.g. you should not mix OFBTCPSrc with OFBICMPv6Code
# and expect to get coherent results...
# you can still go manual by setting prereq_autocomplete=False
return val
def m2i(self, pkt, s):
t = struct.unpack("!B", s[2])[0]
nrm_t = t - t%2
if nrm_t not in self.index:
self.index.append(nrm_t)
return ofp_oxm_cls.get(t, Raw)(s)
@staticmethod
def _get_oxm_length(s):
return struct.unpack("!B", s[3])[0]
def addfield(self, pkt, s, val):
return s + "".join(map(str,self.i2m(pkt, val)))
def getfield(self, pkt, s):
lst = []
lim = self.length_from(pkt)
ret = s[lim:]
remain = s[:lim]
while remain and len(remain) > 4:
l = OXMPacketListField._get_oxm_length(remain) + 4
# this could also be done by parsing oxm_fields (fixed lengths)
if l <= 4 or len(remain) < l:
# no incoherent length
break
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
self.index = []
### since OXMPacketListField is called only twice (when OFPMatch and OFPSetField
### classes are created) and not when you want to instantiate an OFPMatch,
### index needs to be reinitialized, otherwise there will be some conflicts
### e.g. if you create OFPMatch with OFBTCPSrc and then change to OFBTCPDst,
### index will already be filled with ethertype and nwproto codes,
### thus the corresponding fields will not be added to the packet
return remain + ret, lst
class OXMIDPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!B", s[2])[0]
return ofp_oxm_id_cls.get(t, Raw)(s)
def getfield(self, pkt, s):
lst = []
lim = self.length_from(pkt)
ret = s[lim:]
remain = s[:lim]
while remain and len(remain) >= 4:
# all OXM ID are 32-bit long (no experimenter OXM support here)
current = remain[:4]
remain = remain[4:]
p = self.m2i(pkt, current)
lst.append(p)
return remain + ret, lst
class OFPMatch(Packet):
def post_build(self, p, pay):
l = self.length
if l is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
zero_bytes = (8 - l%8) % 8
p += "\x00" * zero_bytes
# message with user-defined length will not be automatically padded
return p + pay
def extract_padding(self, s):
l = self.length
zero_bytes = (8 - l%8) % 8
return s[zero_bytes:], s[:zero_bytes]
name = "OFP_MATCH"
fields_desc= [ ShortEnumField("type", 1, { 0: "OFPMT_STANDARD",
1: "OFPMT_OXM" }),
ShortField("length", None),
OXMPacketListField("oxm_fields", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
### ofp_match is no longer a fixed-length structure in v1.3
### furthermore it may include variable padding
### we introduce to that end a subclass of PacketField
class MatchField(PacketField):
def __init__(self, name):
PacketField.__init__(self, name, OFPMatch(), OFPMatch)
def getfield(self, pkt, s):
i = self.m2i(pkt, s)
### i can be <OFPMatch> or <OFPMatch <Padding>>
### or <OFPMatch <Raw>> or <OFPMatch <Raw <Padding>>>
### and we want to return "", <OFPMatch> or "", <OFPMatch <Padding>>
### or str(<Raw>), <OFPMatch> or str(<Raw>), <OFPMatch <Padding>>
if Raw in i:
r = i[Raw]
if Padding in r:
p = r[Padding]
i.payload = p
del(r.payload)
return r.load, i
else:
return "", i
###################### Actions ######################
class _ofp_action_header(Packet):
name = "Dummy OpenFlow Action Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_action_types = { 0: "OFPAT_OUTPUT",
1: "OFPAT_SET_VLAN_VID",
2: "OFPAT_SET_VLAN_PCP",
3: "OFPAT_STRIP_VLAN",
4: "OFPAT_SET_DL_SRC",
5: "OFPAT_SET_DL_DST",
6: "OFPAT_SET_NW_SRC",
7: "OFPAT_SET_NW_DST",
8: "OFPAT_SET_NW_TOS",
9: "OFPAT_SET_TP_SRC",
10: "OFPAT_SET_TP_DST",
#11: "OFPAT_ENQUEUE",
11: "OFPAT_COPY_TTL_OUT",
12: "OFPAT_COPY_TTL_IN",
13: "OFPAT_SET_MPLS_LABEL",
14: "OFPAT_DEC_MPLS_TC",
15: "OFPAT_SET_MPLS_TTL",
16: "OFPAT_DEC_MPLS_TTL",
17: "OFPAT_PUSH_VLAN",
18: "OFPAT_POP_VLAN",
19: "OFPAT_PUSH_MPLS",
20: "OFPAT_POP_MPLS",
21: "OFPAT_SET_QUEUE",
22: "OFPAT_GROUP",
23: "OFPAT_SET_NW_TTL",
24: "OFPAT_DEC_NW_TTL",
25: "OFPAT_SET_FIELD",
26: "OFPAT_PUSH_PBB",
27: "OFPAT_POP_PBB",
65535: "OFPAT_EXPERIMENTER" }
class OFPATOutput(_ofp_action_header):
name = "OFPAT_OUTPUT"
fields_desc = [ ShortEnumField("type", 0, ofp_action_types),
ShortField("len", 16),
IntEnumField("port", 0, ofp_port_no),
ShortEnumField("max_len", "NO_BUFFER", ofp_max_len),
XBitField("pad", 0, 48) ]
# the following actions are not supported by OFv1.3
class OFPATSetVLANVID(_ofp_action_header):
name = "OFPAT_SET_VLAN_VID"
fields_desc = [ ShortEnumField("type", 1, ofp_action_types),
ShortField("len", 8),
ShortField("vlan_vid", 0),
XShortField("pad", 0) ]
class OFPATSetVLANPCP(_ofp_action_header):
name = "OFPAT_SET_VLAN_PCP"
fields_desc = [ ShortEnumField("type", 2, ofp_action_types),
ShortField("len", 8),
ByteField("vlan_pcp", 0),
X3BytesField("pad", 0) ]
class OFPATStripVLAN(_ofp_action_header):
name = "OFPAT_STRIP_VLAN"
fields_desc = [ ShortEnumField("type", 3, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATSetDlSrc(_ofp_action_header):
name = "OFPAT_SET_DL_SRC"
fields_desc = [ ShortEnumField("type", 4, ofp_action_types),
ShortField("len", 16),
MACField("dl_addr", "0"),
XBitField("pad", 0, 48) ]
class OFPATSetDlDst(_ofp_action_header):
name = "OFPAT_SET_DL_DST"
fields_desc = [ ShortEnumField("type", 5, ofp_action_types),
ShortField("len", 16),
MACField("dl_addr", "0"),
XBitField("pad", 0, 48) ]
class OFPATSetNwSrc(_ofp_action_header):
name = "OFPAT_SET_NW_SRC"
fields_desc = [ ShortEnumField("type", 6, ofp_action_types),
ShortField("len", 8),
IPField("nw_addr", "0") ]
class OFPATSetNwDst(_ofp_action_header):
name = "OFPAT_SET_NW_DST"
fields_desc = [ ShortEnumField("type", 7, ofp_action_types),
ShortField("len", 8),
IPField("nw_addr", "0") ]
class OFPATSetNwToS(_ofp_action_header):
name = "OFPAT_SET_TP_TOS"
fields_desc = [ ShortEnumField("type", 8, ofp_action_types),
ShortField("len", 8),
ByteField("nw_tos", 0),
X3BytesField("pad", 0) ]
class OFPATSetTpSrc(_ofp_action_header):
name = "OFPAT_SET_TP_SRC"
fields_desc = [ ShortEnumField("type", 9, ofp_action_types),
ShortField("len", 8),
ShortField("tp_port", 0),
XShortField("pad", 0) ]
class OFPATSetTpDst(_ofp_action_header):
name = "OFPAT_SET_TP_DST"
fields_desc = [ ShortEnumField("type", 10, ofp_action_types),
ShortField("len", 8),
ShortField("tp_port", 0),
XShortField("pad", 0) ]
#class OFPATEnqueue(_ofp_action_header):
# name = "OFPAT_ENQUEUE"
# fields_desc = [ ShortEnumField("type", 11, ofp_action_types),
# ShortField("len", 16),
# ShortField("port", 0),
# XBitField("pad", 0, 48),
# IntEnumField("queue_id", 0, ofp_queue) ]
class OFPATSetMPLSLabel(_ofp_action_header):
name = "OFPAT_SET_MPLS_LABEL"
fields_desc = [ ShortEnumField("type", 13, ofp_action_types),
ShortField("len", 8),
IntField("mpls_label", 0) ]
class OFPATSetMPLSTC(_ofp_action_header):
name = "OFPAT_SET_MPLS_TC"
fields_desc = [ ShortEnumField("type", 14, ofp_action_types),
ShortField("len", 8),
ByteField("mpls_tc", 0),
X3BytesField("pad", 0) ]
# end of unsupported actions
class OFPATCopyTTLOut(_ofp_action_header):
name = "OFPAT_COPY_TTL_OUT"
fields_desc = [ ShortEnumField("type", 11, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATCopyTTLIn(_ofp_action_header):
name = "OFPAT_COPY_TTL_IN"
fields_desc = [ ShortEnumField("type", 12, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATSetMPLSTTL(_ofp_action_header):
name = "OFPAT_SET_MPLS_TTL"
fields_desc = [ ShortEnumField("type", 15, ofp_action_types),
ShortField("len", 8),
ByteField("mpls_ttl", 0),
X3BytesField("pad", 0) ]
class OFPATDecMPLSTTL(_ofp_action_header):
name = "OFPAT_DEC_MPLS_TTL"
fields_desc = [ ShortEnumField("type", 16, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATPushVLAN(_ofp_action_header):
name = "OFPAT_PUSH_VLAN"
fields_desc = [ ShortEnumField("type", 17, ofp_action_types),
ShortField("len", 8),
ShortField("ethertype", 0x8100), # or 0x88a8
XShortField("pad", 0) ]
class OFPATPopVLAN(_ofp_action_header):
name = "OFPAT_POP_VLAN"
fields_desc = [ ShortEnumField("type", 18, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATPushMPLS(_ofp_action_header):
name = "OFPAT_PUSH_MPLS"
fields_desc = [ ShortEnumField("type", 19, ofp_action_types),
ShortField("len", 8),
ShortField("ethertype", 0x8847), # or 0x8848
XShortField("pad", 0) ]
class OFPATPopMPLS(_ofp_action_header):
name = "OFPAT_POP_MPLS"
fields_desc = [ ShortEnumField("type", 20, ofp_action_types),
ShortField("len", 8),
ShortField("ethertype", 0x8847), # or 0x8848
XShortField("pad", 0) ]
class OFPATSetQueue(_ofp_action_header):
name = "OFPAT_SET_QUEUE"
fields_desc = [ ShortEnumField("type", 21, ofp_action_types),
ShortField("len", 8),
IntEnumField("queue_id", 0, ofp_queue) ]
class OFPATGroup(_ofp_action_header):
name = "OFPAT_GROUP"
fields_desc = [ ShortEnumField("type", 22, ofp_action_types),
ShortField("len", 8),
IntEnumField("group_id", 0, ofp_group) ]
class OFPATSetNwTTL(_ofp_action_header):
name = "OFPAT_SET_NW_TTL"
fields_desc = [ ShortEnumField("type", 23, ofp_action_types),
ShortField("len", 8),
ByteField("nw_ttl", 0),
X3BytesField("pad", 0) ]
class OFPATDecNwTTL(_ofp_action_header):
name = "OFPAT_DEC_NW_TTL"
fields_desc = [ ShortEnumField("type", 24, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATSetField(_ofp_action_header):
def post_build(self, p, pay):
l = self.len
zero_bytes = 0
if l is None:
l = len(p)+len(pay)
zero_bytes = (8 - l%8) % 8
l = l + zero_bytes # add padding length
p = p[:2] + struct.pack("!H", l) + p[4:]
else:
zero_bytes = (8 - l%8) % 8
# every message will be padded correctly
p += "\x00" * zero_bytes
return p + pay
def extract_padding(self, s):
return "", s
name = "OFPAT_SET_FIELD"
fields_desc = [ ShortEnumField("type", 25, ofp_action_types),
ShortField("len", None),
# there should not be more than one oxm tlv
OXMPacketListField("field", [], Packet,
length_from=lambda pkt:pkt.len-4,
# /!\ contains padding!
autocomplete=False) ]
class OFPATPushPBB(_ofp_action_header):
name = "OFPAT_PUSH_PBB"
fields_desc = [ ShortEnumField("type", 26, ofp_action_types),
ShortField("len", 8),
ShortField("ethertype", 0x88e7),
XShortField("pad", 0) ]
class OFPATPopPBB(_ofp_action_header):
name = "OFPAT_POP_PBB"
fields_desc = [ ShortEnumField("type", 27, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATExperimenter(_ofp_action_header):
name = "OFPAT_EXPERIMENTER"
fields_desc = [ ShortEnumField("type", 65535, ofp_action_types),
ShortField("len", 8),
IntField("experimenter", 0) ]
ofp_action_cls = { 0: OFPATOutput,
1: OFPATSetVLANVID,
2: OFPATSetVLANPCP,
3: OFPATStripVLAN,
4: OFPATSetDlSrc,
5: OFPATSetDlDst,
6: OFPATSetNwSrc,
7: OFPATSetNwDst,
8: OFPATSetNwToS,
9: OFPATSetTpSrc,
10: OFPATSetTpDst,
#11: OFPATEnqueue,
11: OFPATCopyTTLOut,
12: OFPATCopyTTLIn,
13: OFPATSetMPLSLabel,
14: OFPATSetMPLSTC,
15: OFPATSetMPLSTTL,
16: OFPATDecMPLSTTL,
17: OFPATPushVLAN,
18: OFPATPopVLAN,
19: OFPATPushMPLS,
20: OFPATPopMPLS,
21: OFPATSetQueue,
22: OFPATGroup,
23: OFPATSetNwTTL,
24: OFPATDecNwTTL,
25: OFPATSetField,
26: OFPATPushPBB,
27: OFPATPopPBB,
65535: OFPATExperimenter }
class ActionPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_action_cls.get(t, Raw)(s)
@staticmethod
def _get_action_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain and len(remain)>=4:
l = ActionPacketListField._get_action_length(remain)
if l < 8 or len(remain) < l:
# length should be at least 8 (non-zero, 64-bit aligned),
# and no incoherent length
break
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
##################### Action IDs ####################
# length is computed as in instruction structures,
# so we reuse _ofp_instruction_header
class OFPATOutputID(_ofp_action_header):
name = "OFPAT_OUTPUT"
fields_desc = [ ShortEnumField("type", 0, ofp_action_types),
ShortField("len", 4) ]
# the following actions are not supported by OFv1.3
class OFPATSetVLANVIDID(_ofp_action_header):
name = "OFPAT_SET_VLAN_VID"
fields_desc = [ ShortEnumField("type", 1, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetVLANPCPID(_ofp_action_header):
name = "OFPAT_SET_VLAN_PCP"
fields_desc = [ ShortEnumField("type", 2, ofp_action_types),
ShortField("len", 4) ]
class OFPATStripVLANID(_ofp_action_header):
name = "OFPAT_STRIP_VLAN"
fields_desc = [ ShortEnumField("type", 3, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetDlSrcID(_ofp_action_header):
name = "OFPAT_SET_DL_SRC"
fields_desc = [ ShortEnumField("type", 4, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetDlDstID(_ofp_action_header):
name = "OFPAT_SET_DL_DST"
fields_desc = [ ShortEnumField("type", 5, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetNwSrcID(_ofp_action_header):
name = "OFPAT_SET_NW_SRC"
fields_desc = [ ShortEnumField("type", 6, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetNwDstID(_ofp_action_header):
name = "OFPAT_SET_NW_DST"
fields_desc = [ ShortEnumField("type", 7, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetNwToSID(_ofp_action_header):
name = "OFPAT_SET_TP_TOS"
fields_desc = [ ShortEnumField("type", 8, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetTpSrcID(_ofp_action_header):
name = "OFPAT_SET_TP_SRC"
fields_desc = [ ShortEnumField("type", 9, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetTpDstID(_ofp_action_header):
name = "OFPAT_SET_TP_DST"
fields_desc = [ ShortEnumField("type", 10, ofp_action_types),
ShortField("len", 4) ]
#class OFPATEnqueueID(_ofp_action_header):
# name = "OFPAT_ENQUEUE"
# fields_desc = [ ShortEnumField("type", 11, ofp_action_types),
# ShortField("len", 4) ]
class OFPATSetMPLSLabelID(_ofp_action_header):
name = "OFPAT_SET_MPLS_LABEL"
fields_desc = [ ShortEnumField("type", 13, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetMPLSTCID(_ofp_action_header):
name = "OFPAT_SET_MPLS_TC"
fields_desc = [ ShortEnumField("type", 14, ofp_action_types),
ShortField("len", 4) ]
# end of unsupported actions
class OFPATCopyTTLOutID(_ofp_action_header):
name = "OFPAT_COPY_TTL_OUT"
fields_desc = [ ShortEnumField("type", 11, ofp_action_types),
ShortField("len", 4) ]
class OFPATCopyTTLInID(_ofp_action_header):
name = "OFPAT_COPY_TTL_IN"
fields_desc = [ ShortEnumField("type", 12, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetMPLSTTLID(_ofp_action_header):
name = "OFPAT_SET_MPLS_TTL"
fields_desc = [ ShortEnumField("type", 15, ofp_action_types),
ShortField("len", 4) ]
class OFPATDecMPLSTTLID(_ofp_action_header):
name = "OFPAT_DEC_MPLS_TTL"
fields_desc = [ ShortEnumField("type", 16, ofp_action_types),
ShortField("len", 4) ]
class OFPATPushVLANID(_ofp_action_header):
name = "OFPAT_PUSH_VLAN"
fields_desc = [ ShortEnumField("type", 17, ofp_action_types),
ShortField("len", 4) ]
class OFPATPopVLANID(_ofp_action_header):
name = "OFPAT_POP_VLAN"
fields_desc = [ ShortEnumField("type", 18, ofp_action_types),
ShortField("len", 4) ]
class OFPATPushMPLSID(_ofp_action_header):
name = "OFPAT_PUSH_MPLS"
fields_desc = [ ShortEnumField("type", 19, ofp_action_types),
ShortField("len", 4) ]
class OFPATPopMPLSID(_ofp_action_header):
name = "OFPAT_POP_MPLS"
fields_desc = [ ShortEnumField("type", 20, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetQueueID(_ofp_action_header):
name = "OFPAT_SET_QUEUE"
fields_desc = [ ShortEnumField("type", 21, ofp_action_types),
ShortField("len", 4) ]
class OFPATGroupID(_ofp_action_header):
name = "OFPAT_GROUP"
fields_desc = [ ShortEnumField("type", 22, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetNwTTLID(_ofp_action_header):
name = "OFPAT_SET_NW_TTL"
fields_desc = [ ShortEnumField("type", 23, ofp_action_types),
ShortField("len", 4) ]
class OFPATDecNwTTLID(_ofp_action_header):
name = "OFPAT_DEC_NW_TTL"
fields_desc = [ ShortEnumField("type", 24, ofp_action_types),
ShortField("len", 4) ]
class OFPATSetFieldID(_ofp_action_header):
name = "OFPAT_SET_FIELD"
fields_desc = [ ShortEnumField("type", 25, ofp_action_types),
ShortField("len", 4) ]
class OFPATPushPBBID(_ofp_action_header):
name = "OFPAT_PUSH_PBB"
fields_desc = [ ShortEnumField("type", 26, ofp_action_types),
ShortField("len", 4) ]
class OFPATPopPBBID(_ofp_action_header):
name = "OFPAT_POP_PBB"
fields_desc = [ ShortEnumField("type", 27, ofp_action_types),
ShortField("len", 4) ]
class OFPATExperimenterID(_ofp_action_header):
name = "OFPAT_EXPERIMENTER"
fields_desc = [ ShortEnumField("type", 65535, ofp_action_types),
ShortField("len", None) ]
ofp_action_id_cls = { 0: OFPATOutputID,
1: OFPATSetVLANVIDID,
2: OFPATSetVLANPCPID,
3: OFPATStripVLANID,
4: OFPATSetDlSrcID,
5: OFPATSetDlDstID,
6: OFPATSetNwSrcID,
7: OFPATSetNwDstID,
8: OFPATSetNwToSID,
9: OFPATSetTpSrcID,
10: OFPATSetTpDstID,
#11: OFPATEnqueueID,
11: OFPATCopyTTLOutID,
12: OFPATCopyTTLInID,
13: OFPATSetMPLSLabelID,
14: OFPATSetMPLSTCID,
15: OFPATSetMPLSTTLID,
16: OFPATDecMPLSTTLID,
17: OFPATPushVLANID,
18: OFPATPopVLANID,
19: OFPATPushMPLSID,
20: OFPATPopMPLSID,
21: OFPATSetQueueID,
22: OFPATGroupID,
23: OFPATSetNwTTLID,
24: OFPATDecNwTTLID,
25: OFPATSetFieldID,
26: OFPATPushPBBID,
27: OFPATPopPBBID,
65535: OFPATExperimenterID }
class ActionIDPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_action_id_cls.get(t, Raw)(s)
@staticmethod
def _get_action_id_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain and len(remain) >= 4:
l = ActionIDPacketListField._get_action_id_length(remain)
if l < 4 or len(remain) < l:
# length is 4 (may be more for experimenter messages),
# and no incoherent length
break
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
#################### Instructions ###################
class _ofp_instruction_header(Packet):
name = "Dummy OpenFlow Instruction Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_instruction_types = { 1: "OFPIT_GOTO_TABLE",
2: "OFPIT_WRITE_METADATA",
3: "OFPIT_WRITE_ACTIONS",
4: "OFPIT_APPLY_ACTIONS",
5: "OFPIT_CLEAR_ACTIONS",
6: "OFPIT_METER",
65535: "OFPIT_EXPERIMENTER" }
class OFPITGotoTable(_ofp_instruction_header):
name = "OFPIT_GOTO_TABLE"
fields_desc = [ ShortEnumField("type", 1, ofp_instruction_types),
ShortField("len", 8),
ByteEnumField("table_id", 0, ofp_table),
X3BytesField("pad", 0) ]
class OFPITWriteMetadata(_ofp_instruction_header):
name = "OFPIT_WRITE_METADATA"
fields_desc = [ ShortEnumField("type", 2, ofp_instruction_types),
ShortField("len", 24),
XIntField("pad", 0),
LongField("metadata", 0),
LongField("metadata_mask", 0) ]
class OFPITWriteActions(_ofp_instruction_header):
name = "OFPIT_WRITE_ACTIONS"
fields_desc = [ ShortEnumField("type", 3, ofp_instruction_types),
ShortField("len", None),
XIntField("pad", 0),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.len-8) ]
class OFPITApplyActions(_ofp_instruction_header):
name = "OFPIT_APPLY_ACTIONS"
fields_desc = [ ShortEnumField("type", 4, ofp_instruction_types),
ShortField("len", None),
XIntField("pad", 0),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.len-8) ]
class OFPITClearActions(_ofp_instruction_header):
name = "OFPIT_CLEAR_ACTIONS"
fields_desc = [ ShortEnumField("type", 5, ofp_instruction_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPITMeter(_ofp_instruction_header):
name = "OFPIT_METER"
fields_desc = [ ShortEnumField("type", 6, ofp_instruction_types),
ShortField("len", 8),
IntEnumField("meter_id", 1, ofp_meter) ]
class OFPITExperimenter(_ofp_instruction_header):
name = "OFPIT_EXPERIMENTER"
fields_desc = [ ShortEnumField("type", 65535, ofp_instruction_types),
ShortField("len", None),
IntField("experimenter", 0) ]
ofp_instruction_cls = { 1: OFPITGotoTable,
2: OFPITWriteMetadata,
3: OFPITWriteActions,
4: OFPITApplyActions,
5: OFPITClearActions,
6: OFPITMeter,
65535: OFPITExperimenter }
class InstructionPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_instruction_cls.get(t, Raw)(s)
@staticmethod
def _get_instruction_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain and len(remain) > 4:
l = InstructionPacketListField._get_instruction_length(remain)
if l < 8 or len(remain) < l:
# length should be at least 8 (non-zero, 64-bit aligned),
# and no incoherent length
break
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
################## Instruction IDs ##################
# length is computed as in instruction structures,
# so we reuse _ofp_instruction_header
class OFPITGotoTableID(_ofp_instruction_header):
name = "OFPIT_GOTO_TABLE"
fields_desc = [ ShortEnumField("type", 1, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITWriteMetadataID(_ofp_instruction_header):
name = "OFPIT_WRITE_METADATA"
fields_desc = [ ShortEnumField("type", 2, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITWriteActionsID(_ofp_instruction_header):
name = "OFPIT_WRITE_ACTIONS"
fields_desc = [ ShortEnumField("type", 3, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITApplyActionsID(_ofp_instruction_header):
name = "OFPIT_APPLY_ACTIONS"
fields_desc = [ ShortEnumField("type", 4, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITClearActionsID(_ofp_instruction_header):
name = "OFPIT_CLEAR_ACTIONS"
fields_desc = [ ShortEnumField("type", 5, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITMeterID(_ofp_instruction_header):
name = "OFPIT_METER"
fields_desc = [ ShortEnumField("type", 6, ofp_instruction_types),
ShortField("len", 4) ]
class OFPITExperimenterID(_ofp_instruction_header):
name = "OFPIT_EXPERIMENTER"
fields_desc = [ ShortEnumField("type", 65535, ofp_instruction_types),
ShortField("len", None) ]
ofp_instruction_id_cls = { 1: OFPITGotoTableID,
2: OFPITWriteMetadataID,
3: OFPITWriteActionsID,
4: OFPITApplyActionsID,
5: OFPITClearActionsID,
6: OFPITMeterID,
65535: OFPITExperimenterID }
class InstructionIDPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_instruction_cls.get(t, Raw)(s)
@staticmethod
def _get_instruction_id_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain and len(remain) >= 4:
l = InstructionIDPacketListField._get_instruction_id_length(remain)
if l < 4 or len(remain) < l:
# length is 4 (may be more for experimenter messages),
# and no incoherent length
break
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
###################### Buckets ######################
class OFPBucket(Packet):
def extract_padding(self, s):
return "", s
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_BUCKET"
fields_desc = [ ShortField("len", None),
ShortField("weight", 0),
IntEnumField("watch_port", 0, ofp_port_no),
IntEnumField("watch_group", 0, ofp_group),
XIntField("pad", 0),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
class BucketPacketListField(PacketListField):
@staticmethod
def _get_bucket_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = BucketPacketListField._get_bucket_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPBucket(current)
lst.append(p)
return remain, lst
####################### Queues ######################
class _ofp_queue_property_header(Packet):
name = "Dummy OpenFlow Queue Property Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_queue_property_types = { 0: "OFPQT_NONE",
1: "OFPQT_MIN_RATE" }
class OFPQTNone(_ofp_queue_property_header):
name = "OFPQT_NONE"
fields_desc = [ ShortEnumField("type", 0, ofp_queue_property_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPQTMinRate(_ofp_queue_property_header):
name = "OFPQT_MIN_RATE"
fields_desc = [ ShortEnumField("type", 1, ofp_queue_property_types),
ShortField("len", 16),
XIntField("pad1", 0),
ShortField("rate", 0),
XBitField("pad2", 0, 48) ]
ofp_queue_property_cls = { 0: OFPQTNone,
1: OFPQTMinRate }
class QueuePropertyPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_queue_property_cls.get(t, Raw)(s)
@staticmethod
def _get_queue_property_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = QueuePropertyPacketListField._get_queue_property_length(remain)
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
class OFPPacketQueue(Packet):
def extract_padding(self, s):
return "", s
def post_build(self, p, pay):
if self.properties == []:
p += str(OFPQTNone())
if self.len is None:
l = len(p)+len(pay)
p = p[:4] + struct.pack("!H", l) + p[6:]
return p + pay
name = "OFP_PACKET_QUEUE"
fields_desc = [ IntEnumField("queue_id", 0, ofp_queue),
ShortField("len", None),
XShortField("pad", 0),
QueuePropertyPacketListField("properties", [], Packet,
length_from=lambda pkt:pkt.len-8) ]
class QueuePacketListField(PacketListField):
@staticmethod
def _get_queue_length(s):
return struct.unpack("!H", s[4:6])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = QueuePacketListField._get_queue_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPPacketQueue(current)
lst.append(p)
return remain, lst
#################### Meter bands ####################
ofp_meter_band_types = { 0: "OFPMBT_DROP",
1: "OFPMBT_DSCP_REMARK",
65535: "OFPMBT_EXPERIMENTER" }
class OFPMBTDrop(Packet):
name = "OFPMBT_DROP"
fields_desc = [ ShortEnumField("type", 0, ofp_queue_property_types),
ShortField("len", 16),
IntField("rate", 0),
IntField("burst_size", 0),
XIntField("pad", 0) ]
class OFPMBTDSCPRemark(Packet):
name = "OFPMBT_DSCP_REMARK"
fields_desc = [ ShortEnumField("type", 1, ofp_queue_property_types),
ShortField("len", 16),
IntField("rate", 0),
IntField("burst_size", 0),
ByteField("prec_level", 0),
X3BytesField("pad", 0) ]
class OFPMBTExperimenter(Packet):
name = "OFPMBT_EXPERIMENTER"
fields_desc = [ ShortEnumField("type", 65535, ofp_queue_property_types),
ShortField("len", 16),
IntField("rate", 0),
IntField("burst_size", 0),
IntField("experimenter", 0) ]
ofp_meter_band_cls = { 0: OFPMBTDrop,
1: OFPMBTDSCPRemark,
2: OFPMBTExperimenter }
class MeterBandPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_meter_band_cls.get(t, Raw)(s)
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
current = remain[:16]
remain = remain[16:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
#####################################################
############## OpenFlow 1.3 Messages ################
#####################################################
ofp_version = { 0x01: "OpenFlow 1.0",
0x02: "OpenFlow 1.1",
0x03: "OpenFlow 1.2",
0x04: "OpenFlow 1.3",
0x05: "OpenFlow 1.4" }
ofp_type = { 0: "OFPT_HELLO",
1: "OFPT_ERROR",
2: "OFPT_ECHO_REQUEST",
3: "OFPT_ECHO_REPLY",
4: "OFPT_EXPERIMENTER",
5: "OFPT_FEATURES_REQUEST",
6: "OFPT_FEATURES_REPLY",
7: "OFPT_GET_CONFIG_REQUEST",
8: "OFPT_GET_CONFIG_REPLY",
9: "OFPT_SET_CONFIG",
10: "OFPT_PACKET_IN",
11: "OFPT_FLOW_REMOVED",
12: "OFPT_PORT_STATUS",
13: "OFPT_PACKET_OUT",
14: "OFPT_FLOW_MOD",
15: "OFPT_GROUP_MOD",
16: "OFPT_PORT_MOD",
17: "OFPT_TABLE_MOD",
18: "OFPT_MULTIPART_REQUEST",
19: "OFPT_MULTIPART_REPLY",
20: "OFPT_BARRIER_REQUEST",
21: "OFPT_BARRIER_REPLY",
22: "OFPT_QUEUE_GET_CONFIG_REQUEST",
23: "OFPT_QUEUE_GET_CONFIG_REPLY",
24: "OFPT_ROLE_REQUEST",
25: "OFPT_ROLE_REPLY",
26: "OFPT_GET_ASYNC_REQUEST",
27: "OFPT_GET_ASYNC_REPLY",
28: "OFPT_SET_ASYNC",
29: "OFPT_METER_MOD" }
class _ofp_header(Packet):
name = "Dummy OpenFlow Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
class OFPTHello(_ofp_header):
name = "OFPT_HELLO"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 0, ofp_type),
ShortField("len", None),
IntField("xid", 0),
HelloElemPacketListField("elements", [], Packet,
length_from=lambda pkt:pkt.len-32) ]
overload_fields = {TCP: {"sport": 6653}}
#####################################################
##################### OFPT_ERROR ####################
#####################################################
### this class will be used to display some messages
### sent back by the switch after an error
class OFPacketField(PacketField):
def getfield(self, pkt, s):
try:
l = s[2:4]
l = struct.unpack("!H", l)[0]
ofload = s[:l]
remain = s[l:]
return remain, OpenFlow(None, ofload)(ofload)
except:
return "", Raw(s)
ofp_error_type = { 0: "OFPET_HELLO_FAILED",
1: "OFPET_BAD_REQUEST",
2: "OFPET_BAD_ACTION",
3: "OFPET_BAD_INSTRUCTION",
4: "OFPET_BAD_MATCH",
5: "OFPET_FLOW_MOD_FAILED",
6: "OFPET_GROUP_MOD_FAILED",
7: "OFPET_PORT_MOD_FAILED",
8: "OFPET_TABLE_MOD_FAILED",
9: "OFPET_QUEUE_OP_FAILED",
10: "OFPET_SWITCH_CONFIG_FAILED",
11: "OFPET_ROLE_REQUEST_FAILED",
12: "OFPET_METER_MOD_FAILED",
13: "OFPET_TABLE_FEATURES_FAILED",
65535: "OFPET_EXPERIMENTER" }
class OFPETHelloFailed(_ofp_header):
name = "OFPET_HELLO_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 0, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPHFC_INCOMPATIBLE",
1: "OFPHFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadRequest(_ofp_header):
name = "OFPET_BAD_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 1, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBRC_BAD_VERSION",
1: "OFPBRC_BAD_TYPE",
2: "OFPBRC_BAD_MULTIPART",
3: "OFPBRC_BAD_EXPERIMENTER",
4: "OFPBRC_BAD_EXP_TYPE",
5: "OFPBRC_EPERM",
6: "OFPBRC_BAD_LEN",
7: "OFPBRC_BUFFER_EMPTY",
8: "OFPBRC_BUFFER_UNKNOWN",
9: "OFPBRC_BAD_TABLE_ID",
10: "OFPBRC_IS_SLAVE",
11: "OFPBRC_BAD_PORT",
12: "OFPBRC_BAD_PACKET",
13: "OFPBRC_MULTIPART_BUFFER_OVERFLOW" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadAction(_ofp_header):
name = "OFPET_BAD_ACTION"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 2, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBAC_BAD_TYPE",
1: "OFPBAC_BAD_LEN",
2: "OFPBAC_BAD_EXPERIMENTER",
3: "OFPBAC_BAD_EXP_TYPE",
4: "OFPBAC_BAD_OUT_PORT",
5: "OFPBAC_BAD_ARGUMENT",
6: "OFPBAC_EPERM",
7: "OFPBAC_TOO_MANY",
8: "OFPBAC_BAD_QUEUE",
9: "OFPBAC_BAD_OUT_GROUP",
10: "OFPBAC_MATCH_INCONSISTENT",
11: "OFPBAC_UNSUPPORTED_ORDER",
12: "OFPBAC_BAD_TAG",
13: "OFPBAC_BAD_SET_TYPE",
14: "OFPBAC_BAD_SET_LEN",
15: "OFPBAC_BAD_SET_ARGUMENT" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadInstruction(_ofp_header):
name = "OFPET_BAD_INSTRUCTION"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 3, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBIC_UNKNOWN_INST",
1: "OFPBIC_UNSUP_INST",
2: "OFPBIC_BAD_TABLE_ID",
3: "OFPBIC_UNSUP_METADATA",
4: "OFPBIC_UNSUP_METADATA_MASK",
5: "OFPBIC_BAD_EXPERIMENTER",
6: "OFPBIC_BAD_EXP_TYPE",
7: "OFPBIC_BAD_LEN",
8: "OFPBIC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadMatch(_ofp_header):
name = "OFPET_BAD_MATCH"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 4, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBMC_BAD_TYPE",
1: "OFPBMC_BAD_LEN",
2: "OFPBMC_BAD_TAG",
3: "OFPBMC_BAD_DL_ADDR_MASK",
4: "OFPBMC_BAD_NW_ADDR_MASK",
5: "OFPBMC_BAD_WILDCARDS",
6: "OFPBMC_BAD_FIELD",
7: "OFPBMC_BAD_VALUE",
8: "OFPBMC_BAD_MASK",
9: "OFPBMC_BAD_PREREQ",
10: "OFPBMC_DUP_FIELD",
11: "OFPBMC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETFlowModFailed(_ofp_header):
name = "OFPET_FLOW_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 5, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPFMFC_UNKNOWN",
1: "OFPFMFC_TABLE_FULL",
2: "OFPFMFC_BAD_TABLE_ID",
3: "OFPFMFC_OVERLAP",
4: "OFPFMFC_EPERM",
5: "OFPFMFC_BAD_TIMEOUT",
6: "OFPFMFC_BAD_COMMAND",
7: "OFPFMFC_BAD_FLAGS" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETGroupModFailed(_ofp_header):
name = "OFPET_GROUP_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 6, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPGMFC_GROUP_EXISTS",
1: "OFPGMFC_INVALID_GROUP",
2: "OFPGMFC_WEIGHT_UNSUPPORTED",
3: "OFPGMFC_OUT_OF_GROUPS",
4: "OFPGMFC_OUT_OF_BUCKETS",
5: "OFPGMFC_CHAINING_UNSUPPORTED",
6: "OFPGMFC_WATCH_UNSUPPORTED",
7: "OFPGMFC_LOOP",
8: "OFPGMFC_UNKNOWN_GROUP",
9: "OFPGMFC_CHAINED_GROUP",
10: "OFPGMFC_BAD_TYPE",
11: "OFPGMFC_BAD_COMMAND",
12: "OFPGMFC_BAD_BUCKET",
13: "OFPGMFC_BAD_WATCH",
14: "OFPFMFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETPortModFailed(_ofp_header):
name = "OFPET_PORT_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 7, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPPMFC_BAD_PORT",
1: "OFPPMFC_BAD_HW_ADDR",
2: "OFPPMFC_BAD_CONFIG",
3: "OFPPMFC_BAD_ADVERTISE",
4: "OFPPMFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETTableModFailed(_ofp_header):
name = "OFPET_TABLE_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 8, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPTMFC_BAD_TABLE",
1: "OFPTMFC_BAD_CONFIG",
2: "OFPTMFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETQueueOpFailed(_ofp_header):
name = "OFPET_QUEUE_OP_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 9, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPQOFC_BAD_PORT",
1: "OFPQOFC_BAD_QUEUE",
2: "OFPQOFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETSwitchConfigFailed(_ofp_header):
name = "OFPET_SWITCH_CONFIG_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 10, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPSCFC_BAD_FLAGS",
1: "OFPSCFC_BAD_LEN",
2: "OFPSCFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETRoleRequestFailed(_ofp_header):
name = "OFPET_ROLE_REQUEST_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 11, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPRRFC_STALE",
1: "OFPRRFC_UNSUP",
2: "OFPRRFC_BAD_ROLE" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETMeterModFailed(_ofp_header):
name = "OFPET_METER_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 12, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPMMFC_UNKNOWN",
1: "OFPMMFC_METER_EXISTS",
2: "OFPMMFC_INVALID_METER",
3: "OFPMMFC_UNKNOWN_METER",
4: "OFPMMFC_BAD_COMMAND",
5: "OFPMMFC_BAD_FLAGS",
6: "OFPMMFC_BAD_RATE",
7: "OFPMMFC_BAD_BURST",
8: "OFPMMFC_BAD_BAND",
9: "OFPMMFC_BAD_BAND_VALUE",
10: "OFPMMFC_OUT_OF_METERS",
11: "OFPMMFC_OUT_OF_BANDS" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETTableFeaturesFailed(_ofp_header):
name = "OFPET_TABLE_FEATURES_FAILED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 13, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPTFFC_BAD_TABLE",
1: "OFPTFFC_BAD_METADATA",
2: "OFPTFFC_BAD_TYPE",
3: "OFPTFFC_BAD_LEN",
4: "OFPTFFC_BAD_ARGUMENT",
5: "OFPTFFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETExperimenter(_ofp_header):
name = "OFPET_EXPERIMENTER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", "OFPET_EXPERIMENTER", ofp_error_type),
ShortField("exp_type", None),
IntField("experimenter", None),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
# ofp_error_cls allows generic method OpenFlow()
# to choose the right class for dissection
ofp_error_cls = { 0: OFPETHelloFailed,
1: OFPETBadRequest,
2: OFPETBadAction,
3: OFPETBadInstruction,
4: OFPETBadMatch,
5: OFPETFlowModFailed,
6: OFPETGroupModFailed,
7: OFPETPortModFailed,
8: OFPETTableModFailed,
9: OFPETQueueOpFailed,
10: OFPETSwitchConfigFailed,
11: OFPETRoleRequestFailed,
12: OFPETMeterModFailed,
13: OFPETTableFeaturesFailed,
65535: OFPETExperimenter }
################ end of OFPT_ERRORS #################
class OFPTEchoRequest(_ofp_header):
name = "OFPT_ECHO_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 2, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTEchoReply(_ofp_header):
name = "OFPT_ECHO_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 3, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTExperimenter(_ofp_header):
name = "OFPT_EXPERIMENTER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 4, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntField("experimenter", 0),
IntField("exp_type", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTFeaturesRequest(_ofp_header):
name = "OFPT_FEATURES_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 5, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTFeaturesReply(_ofp_header):
name = "OFPT_FEATURES_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 6, ofp_type),
ShortField("len", None),
IntField("xid", 0),
LongField("datapath_id", 0),
IntField("n_buffers", 0),
ByteField("n_tables", 1),
ByteField("auxiliary_id", 0),
XShortField("pad", 0),
FlagsField("capabilities", 0, 32, [ "FLOW_STATS",
"TABLE_STATS",
"PORT_STATS",
"GROUP_STATS",
"RESERVED", #undefined
"IP_REASM",
"QUEUE_STATS",
"ARP_MATCH_IP", #undefined
"PORT_BLOCKED"]),
IntField("reserved", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTGetConfigRequest(_ofp_header):
name = "OFPT_GET_CONFIG_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 7, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTGetConfigReply(_ofp_header):
name = "OFPT_GET_CONFIG_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 8, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("flags", 0, { 0: "FRAG_NORMAL",
1: "FRAG_DROP",
2: "FRAG_REASM",
3: "FRAG_MASK" }),
ShortField("miss_send_len", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTSetConfig(_ofp_header):
name = "OFPT_SET_CONFIG"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 9, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("flags", 0, { 0: "FRAG_NORMAL",
1: "FRAG_DROP",
2: "FRAG_REASM",
3: "FRAG_MASK" }),
ShortField("miss_send_len", 128) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTPacketIn(_ofp_header):
name = "OFPT_PACKET_IN"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 10, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
ShortField("total_len", 0),
ByteEnumField("reason", 0, { 0: "OFPR_NO_MATCH",
1: "OFPR_ACTION",
2: "OFPR_INVALID_TTL"}),
ByteEnumField("table_id", 0, ofp_table),
LongField("cookie", 0),
MatchField("match"),
XShortField("pad", 0),
PacketField("data", "", Ether) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTFlowRemoved(_ofp_header):
name = "OFPT_FLOW_REMOVED"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 11, ofp_type),
ShortField("len", None),
IntField("xid", 0),
LongField("cookie", 0),
ShortField("priority", 0),
ByteEnumField("reason", 0, { 0: "OFPRR_IDLE_TIMEOUT",
1: "OFPRR_HARD_TIMEOUT",
2: "OFPRR_DELETE",
3: "OFPRR_GROUP_DELETE"}),
ByteEnumField("table_id", 0, ofp_table),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
ShortField("idle_timeout", 0),
ShortField("hard_timeout", 0),
LongField("packet_count", 0),
LongField("byte_count", 0),
MatchField("match") ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTPortStatus(_ofp_header):
name = "OFPT_PORT_STATUS"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 12, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ByteEnumField("reason", 0, { 0: "OFPPR_ADD",
1: "OFPPR_DELETE",
2: "OFPPR_MODIFY"}),
XBitField("pad", 0, 56),
PacketField("desc", OFPPort(), OFPPort) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTPacketOut(_ofp_header):
name = "OFPT_PACKET_OUT"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 13, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
IntEnumField("in_port", "CONTROLLER", ofp_port_no),
FieldLenField("actions_len", None, fmt="H", length_of="actions"),
XBitField("pad", 0, 48),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.actions_len),
PacketField("data", "", Ether) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTFlowMod(_ofp_header):
name = "OFPT_FLOW_MOD"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 14, ofp_type),
ShortField("len", None),
IntField("xid", 0),
LongField("cookie", 0),
LongField("cookie_mask", 0),
ByteEnumField("table_id", 0, ofp_table),
ByteEnumField("cmd", 0, { 0: "OFPFC_ADD",
1: "OFPFC_MODIFY",
2: "OFPFC_MODIFY_STRICT",
3: "OFPFC_DELETE",
4: "OFPFC_DELETE_STRICT" }),
ShortField("idle_timeout", 0),
ShortField("hard_timeout", 0),
ShortField("priority", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
IntEnumField("out_port", "ANY", ofp_port_no),
IntEnumField("out_group", "ANY", ofp_group),
FlagsField("flags", 0, 16, [ "SEND_FLOW_REM",
"CHECK_OVERLAP",
"RESET_COUNTS",
"NO_PKT_COUNTS",
"NO_BYT_COUNTS" ]),
XShortField("pad", 0),
MatchField("match"),
InstructionPacketListField("instructions", [], Packet,
length_from=lambda pkt:pkt.len-48-(pkt.match.length+(8-pkt.match.length%8)%8)) ]
# include match padding to match.length
overload_fields = {TCP: {"sport": 6653}}
class OFPTGroupMod(_ofp_header):
name = "OFPT_GROUP_MOD"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 15, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("cmd", 0, { 0: "OFPGC_ADD",
1: "OFPGC_MODIFY",
2: "OFPGC_DELETE" }),
ByteEnumField("group_type", 0, { 0: "OFPGT_ALL",
1: "OFPGT_SELECT",
2: "OFPGT_INDIRECT",
3: "OFPGT_FF" }),
XByteField("pad", 0),
IntEnumField("group_id", 0, ofp_group),
BucketPacketListField("buckets", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTPortMod(_ofp_header):
name = "OFPT_PORT_MOD"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 16, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("port_no", 0, ofp_port_no),
XIntField("pad1", 0),
MACField("hw_addr", "0"),
XShortField("pad2", 0),
FlagsField("config", 0, 32, ofp_port_config),
FlagsField("mask", 0, 32, ofp_port_config),
FlagsField("advertise", 0, 32, ofp_port_features),
XIntField("pad3", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTTableMod(_ofp_header):
name = "OFPT_TABLE_MOD"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 17, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ByteEnumField("table_id", 0, ofp_table),
X3BytesField("pad", 0),
IntEnumField("config", 0, { 3: "OFPTC_DEPRECATED_MASK"}) ]
overload_fields = {TCP: {"sport": 6653}}
#####################################################
################## OFPT_MULTIPART ###################
#####################################################
ofp_multipart_types = { 0: "OFPMP_DESC",
1: "OFPMP_FLOW",
2: "OFPMP_AGGREGATE",
3: "OFPMP_TABLE",
4: "OFPMP_PORT_STATS",
5: "OFPMP_QUEUE",
6: "OFPMP_GROUP",
7: "OFPMP_GROUP_DESC",
8: "OFPMP_GROUP_FEATURES",
9: "OFPMP_METER",
10: "OFPMP_METER_CONFIG",
11: "OFPMP_METER_FEATURES",
12: "OFPMP_TABLE_FEATURES",
13: "OFPMP_PORT_DESC",
65535: "OFPST_VENDOR" }
ofpmp_request_flags = [ "REQ_MORE" ]
ofpmp_reply_flags = [ "REPLY_MORE" ]
class OFPMPRequestDesc(_ofp_header):
name = "OFPMP_REQUEST_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 0, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyDesc(_ofp_header):
name = "OFPMP_REPLY_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 0, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad", 0),
StrFixedLenField("mfr_desc", "", 256),
StrFixedLenField("hw_desc", "", 256),
StrFixedLenField("sw_desc", "", 256),
StrFixedLenField("serial_num", "", 32),
StrFixedLenField("dp_desc", "", 256) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestFlow(_ofp_header):
name = "OFPMP_REQUEST_FLOW"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 1, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
ByteEnumField("table_id", "ALL", ofp_table),
X3BytesField("pad2", 0),
IntEnumField("out_port", "ANY", ofp_port_no),
IntEnumField("out_group", "ANY", ofp_group),
IntField("pad3", 0),
LongField("cookie", 0),
LongField("cookie_mask", 0),
MatchField("match") ]
overload_fields = {TCP: {"sport": 6653}}
class OFPFlowStats(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_FLOW_STATS"
fields_desc = [ ShortField("length", None),
ByteEnumField("table_id", 0, ofp_table),
XByteField("pad1", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
ShortField("priority", 0),
ShortField("idle_timeout", 0),
ShortField("hard_timeout", 0),
FlagsField("flags", 0, 16, [ "SEND_FLOW_REM",
"CHECK_OVERLAP",
"RESET_COUNTS",
"NO_PKT_COUNTS",
"NO_BYT_COUNTS" ]),
IntField("pad2", 0),
LongField("cookie", 0),
LongField("packet_count", 0),
LongField("byte_count", 0),
MatchField("match"),
InstructionPacketListField("instructions", [], Packet,
length_from=lambda pkt:pkt.length-56-pkt.match.length) ]
class FlowStatsPacketListField(PacketListField):
@staticmethod
def _get_flow_stats_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = FlowStatsPacketListField._get_flow_stats_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPFlowStats(current)
lst.append(p)
return remain, lst
class OFPMPReplyFlow(_ofp_header):
name = "OFPMP_REPLY_FLOW"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 1, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
FlowStatsPacketListField("flow_stats", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestAggregate(_ofp_header):
name = "OFPMP_REQUEST_AGGREGATE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 2, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
ByteEnumField("table_id", "ALL", ofp_table),
X3BytesField("pad2", 0),
IntEnumField("out_port", "ANY", ofp_port_no),
IntEnumField("out_group", "ANY", ofp_group),
IntField("pad3", 0),
LongField("cookie", 0),
LongField("cookie_mask", 0),
MatchField("match") ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyAggregate(_ofp_header):
name = "OFPMP_REPLY_AGGREGATE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 2, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
LongField("packet_count", 0),
LongField("byte_count", 0),
IntField("flow_count", 0),
XIntField("pad2", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestTable(_ofp_header):
name = "OFPMP_REQUEST_TABLE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 3, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTableStats(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_TABLE_STATS"
fields_desc = [ ByteEnumField("table_id", 0, ofp_table),
X3BytesField("pad1", 0),
IntField("active_count", 0),
LongField("lookup_count", 0),
LongField("matched_count", 0) ]
class OFPMPReplyTable(_ofp_header):
name = "OFPMP_REPLY_TABLE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 3, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
PacketListField("table_stats", None, OFPTableStats,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestPortStats(_ofp_header):
name = "OFPMP_REQUEST_PORT_STATS"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 4, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("port_no", "ANY", ofp_port_no),
XIntField("pad", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPPortStats(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_PORT_STATS"
fields_desc = [ IntEnumField("port_no", 0, ofp_port_no),
XIntField("pad", 0),
LongField("rx_packets", 0),
LongField("tx_packets", 0),
LongField("rx_bytes", 0),
LongField("tx_bytes", 0),
LongField("rx_dropped", 0),
LongField("tx_dropped", 0),
LongField("rx_errors", 0),
LongField("tx_errors", 0),
LongField("rx_frame_err", 0),
LongField("rx_over_err", 0),
LongField("rx_crc_err", 0),
LongField("collisions", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0) ]
class OFPMPReplyPortStats(_ofp_header):
name = "OFPMP_REPLY_PORT_STATS"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 4, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
PacketListField("port_stats", None, OFPPortStats,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestQueue(_ofp_header):
name = "OFPMP_REQUEST_QUEUE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 5, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("port_no", "ANY", ofp_port_no),
IntEnumField("queue_id", "ALL", ofp_queue) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPQueueStats(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_QUEUE_STATS"
fields_desc = [ IntEnumField("port_no", 0, ofp_port_no),
IntEnumField("queue_id", 0, ofp_queue),
LongField("tx_bytes", 0),
LongField("tx_packets", 0),
LongField("tx_errors", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0) ]
class OFPMPReplyQueue(_ofp_header):
name = "OFPMP_REPLY_QUEUE"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 5, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
PacketListField("queue_stats", None, OFPQueueStats,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestGroup(_ofp_header):
name = "OFPMP_REQUEST_GROUP"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 6, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("group_id", "ANY", ofp_group),
XIntField("pad2", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPBucketStats(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_BUCKET_STATS"
fields_desc = [ LongField("packet_count", 0),
LongField("byte_count", 0) ]
class OFPGroupStats(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_GROUP_STATS"
fields_desc = [ ShortField("length", None),
XShortField("pad1", 0),
IntEnumField("group_id", 0, ofp_group),
IntField("ref_count", 0),
IntField("pad2", 0),
LongField("packet_count", 0),
LongField("byte_count", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
PacketListField("bucket_stats", None, OFPBucketStats,
length_from=lambda pkt:pkt.length-40) ]
class GroupStatsPacketListField(PacketListField):
@staticmethod
def _get_group_stats_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = GroupStatsPacketListField._get_group_stats_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPGroupStats(current)
lst.append(p)
return remain, lst
class OFPMPReplyGroup(_ofp_header):
name = "OFPMP_REPLY_GROUP"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 6, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
GroupStatsPacketListField("group_stats", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestGroupDesc(_ofp_header):
name = "OFPMP_REQUEST_GROUP_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 7, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPGroupDesc(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_GROUP_DESC"
fields_desc = [ ShortField("length", None),
ByteEnumField("type", 0, { 0: "OFPGT_ALL",
1: "OFPGT_SELECT",
2: "OFPGT_INDIRECT",
3: "OFPGT_FF" }),
XByteField("pad", 0),
IntEnumField("group_id", 0, ofp_group),
BucketPacketListField("buckets", None, Packet,
length_from=lambda pkt:pkt.length-8) ]
class GroupDescPacketListField(PacketListField):
@staticmethod
def _get_group_desc_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = GroupsDescPacketListField._get_group_desc_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPGroupDesc(current)
lst.append(p)
return remain, lst
class OFPMPReplyGroupDesc(_ofp_header):
name = "OFPMP_REPLY_GROUP_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 7, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
GroupDescPacketListField("group_descs", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestGroupFeatures(_ofp_header):
name = "OFPMP_REQUEST_GROUP_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 8, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0) ]
overload_fields = {TCP: {"sport": 6653}}
ofp_action_types_flags = ofp_action_types.values()[:-1] # no ofpat_experimenter flag
class OFPMPReplyGroupFeatures(_ofp_header):
name = "OFPMP_REPLY_GROUP_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 8, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
FlagsField("types", 0, 32, [ "ALL",
"SELECT",
"INDIRECT",
"FF" ]),
FlagsField("capabilities", 0, 32, [ "SELECT_WEIGHT",
"SELECT_LIVENESS",
"CHAINING",
"CHAINING_CHECKS" ]),
IntField("max_group_all", 0),
IntField("max_group_select", 0),
IntField("max_group_indirect", 0),
IntField("max_group_ff", 0),
# no ofpat_experimenter flag
FlagsField("actions_all", 0, 32, ofp_action_types_flags),
FlagsField("actions_select", 0, 32, ofp_action_types_flags),
FlagsField("actions_indirect", 0, 32, ofp_action_types_flags),
FlagsField("actions_ff", 0, 32, ofp_action_types_flags) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestMeter(_ofp_header):
name = "OFPMP_REQUEST_METER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 9, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("meter_id", "ALL", ofp_meter),
XIntField("pad2", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMeterBandStats(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_METER_BAND_STATS"
fields_desc = [ LongField("packet_band_count", 0),
LongField("byte_band_count", 0) ]
class OFPMeterStats(Packet):
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:4] + struct.pack("!H", l) + p[6:]
return p + pay
name = "OFP_GROUP_STATS"
fields_desc = [ IntEnumField("meter_id", 1, ofp_meter),
ShortField("len", None),
XBitField("pad", 0, 48),
IntField("flow_count", 0),
LongField("packet_in_count", 0),
LongField("byte_in_count", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
PacketListField("band_stats", None, OFPMeterBandStats,
length_from=lambda pkt:pkt.len-40) ]
class MeterStatsPacketListField(PacketListField):
@staticmethod
def _get_meter_stats_length(s):
return struct.unpack("!H", s[4:6])[0]
def getfield(self, pkt, s):
lst = []
l = 0
ret = ""
remain = s
while remain:
l = MeterStatsPacketListField._get_meter_stats_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPMeterStats(current)
lst.append(p)
return remain + ret, lst
class OFPMPReplyMeter(_ofp_header):
name = "OFPMP_REPLY_METER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 9, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
MeterStatsPacketListField("meter_stats", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestMeterConfig(_ofp_header):
name = "OFPMP_REQUEST_METER_CONFIG"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 10, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("meter_id", "ALL", ofp_meter),
XIntField("pad2", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMeterConfig(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_METER_CONFIG"
fields_desc = [ ShortField("length", None),
FlagsField("flags", 0, 16, [ "KBPS",
"PKTPS",
"BURST",
"STATS" ]),
IntEnumField("meter_id", 1, ofp_meter),
MeterBandPacketListField("bands", [], Packet,
length_from=lambda pkt:pkt.len-8) ]
class MeterConfigPacketListField(PacketListField):
@staticmethod
def _get_meter_config_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = MeterConfigPacketListField._get_meter_config_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPMeterConfig(current)
lst.append(p)
return remain, lst
class OFPMPReplyMeterConfig(_ofp_header):
name = "OFPMP_REPLY_METER_CONFIG"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 10, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
MeterConfigPacketListField("meter_configs", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestMeterFeatures(_ofp_header):
name = "OFPMP_REQUEST_METER_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 11, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyMeterFeatures(_ofp_header):
name = "OFPMP_REPLY_METER_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 11, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
IntField("max_meter", 0),
FlagsField("band_types", 0, 32, [ "DROP",
"DSCP_REMARK",
"EXPERIMENTER" ]),
FlagsField("capabilities", 0, 32, [ "KPBS",
"PKTPS",
"BURST",
"STATS" ]),
ByteField("max_bands", 0),
ByteField("max_color", 0),
XShortField("pad2", 0) ]
overload_fields = {TCP: {"dport": 6653}}
####### table features for multipart messages #######
class _ofp_table_features_prop_header(Packet):
name = "Dummy OpenFlow Table Features Properties Header"
def post_build(self, p, pay):
l = self.length
if l is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
# every message will be padded correctly
zero_bytes = (8 - l%8) % 8
p += "\x00" * zero_bytes
return p + pay
def extract_padding(self, s):
l = self.length
zero_bytes = (8 - l%8) % 8
return "", s
ofp_table_features_prop_types = { 0: "OFPTFPT_INSTRUCTIONS",
1: "OFPTFPT_INSTRUCTIONS_MISS",
2: "OFPTFPT_NEXT_TABLES",
3: "OFPTFPT_NEXT_TABLES_MISS",
4: "OFPTFPT_WRITE_ACTIONS",
5: "OFPTFPT_WRITE_ACTIONS_MISS",
6: "OFPTFPT_APPLY_ACTIONS",
7: "OFPTFPT_APPLY_ACTIONS_MISS",
8: "OFPTFPT_MATCH",
10: "OFPTFPT_WILDCARDS",
12: "OFPTFPT_WRITE_SETFIELD",
13: "OFPTFPT_WRITE_SETFIELD_MISS",
14: "OFPTFPT_APPLY_SETFIELD",
15: "OFPTFPT_APPLY_SETFIELD_MISS",
65534: "OFPTFPT_EXPERIMENTER",
65535: "OFPTFPT_EXPERIMENTER_MISS" }
class OFPTFPTInstructions(_ofp_table_features_prop_header):
name = "OFPTFPT_INSTRUCTIONS"
fields_desc = [ ShortField("type", 0),
ShortField("length", None),
InstructionIDPacketListField("instruction_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTInstructionsMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_INSTRUCTIONS_MISS"
fields_desc = [ ShortField("type", 1),
ShortField("length", None),
InstructionIDPacketListField("instruction_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTableID(Packet):
def extract_padding(self, s):
return "", s
name = "OFP_TABLE_ID"
fields_desc = [ ByteEnumField("table_id", 0, ofp_table) ]
class OFPTFPTNextTables(_ofp_table_features_prop_header):
name = "OFPTFPT_NEXT_TABLES"
fields_desc = [ ShortField("type", 2),
ShortField("length", None),
PacketListField("next_table_ids", None, OFPTableID,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTNextTablesMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_NEXT_TABLES_MISS"
fields_desc = [ ShortField("type", 3),
ShortField("length", None),
PacketListField("next_table_ids", None, OFPTableID,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTWriteActions(_ofp_table_features_prop_header):
name = "OFPTFPT_WRITE_ACTIONS"
fields_desc = [ ShortField("type", 4),
ShortField("length", None),
ActionIDPacketListField("action_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTWriteActionsMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_WRITE_ACTIONS_MISS"
fields_desc = [ ShortField("type", 5),
ShortField("length", None),
ActionIDPacketListField("action_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTApplyActions(_ofp_table_features_prop_header):
name = "OFPTFPT_APPLY_ACTIONS"
fields_desc = [ ShortField("type", 6),
ShortField("length", None),
ActionIDPacketListField("action_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTApplyActionsMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_APPLY_ACTIONS_MISS"
fields_desc = [ ShortField("type", 7),
ShortField("length", None),
ActionIDPacketListField("action_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTMatch(_ofp_table_features_prop_header):
name = "OFPTFPT_MATCH"
fields_desc = [ ShortField("type", 8),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTWildcards(_ofp_table_features_prop_header):
name = "OFPTFPT_WILDCARDS"
fields_desc = [ ShortField("type", 10),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTWriteSetField(_ofp_table_features_prop_header):
name = "OFPTFPT_WRITE_SETFIELD"
fields_desc = [ ShortField("type", 12),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTWriteSetFieldMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_WRITE_SETFIELD_MISS"
fields_desc = [ ShortField("type", 13),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTApplySetField(_ofp_table_features_prop_header):
name = "OFPTFPT_APPLY_SETFIELD"
fields_desc = [ ShortField("type", 14),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTApplySetFieldMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_APPLY_SETFIELD_MISS"
fields_desc = [ ShortField("type", 15),
ShortField("length", None),
OXMIDPacketListField("oxm_ids", [], Packet,
length_from=lambda pkt:pkt.length-4) ]
class OFPTFPTExperimenter(_ofp_table_features_prop_header):
name = "OFPTFPT_EXPERIMENTER"
fields_desc = [ ShortField("type", 65534),
ShortField("length", None),
IntField("experimenter", 0),
IntField("exp_type", 0),
PacketField("experimenter_data", None, Raw) ]
class OFPTFPTExperimenterMiss(_ofp_table_features_prop_header):
name = "OFPTFPT_EXPERIMENTER_MISS"
fields_desc = [ ShortField("type", 65535),
ShortField("length", None),
IntField("experimenter", 0),
IntField("exp_type", 0),
PacketField("experimenter_data", None, Raw) ]
ofp_table_features_prop_cls = { 0: OFPTFPTInstructions,
1: OFPTFPTInstructionsMiss,
2: OFPTFPTNextTables,
3: OFPTFPTNextTablesMiss,
4: OFPTFPTWriteActions,
5: OFPTFPTWriteActionsMiss,
6: OFPTFPTApplyActions,
7: OFPTFPTApplyActionsMiss,
8: OFPTFPTMatch,
10: OFPTFPTWildcards,
12: OFPTFPTWriteSetField,
13: OFPTFPTWriteSetFieldMiss,
14: OFPTFPTApplySetField,
15: OFPTFPTApplySetFieldMiss,
65534: OFPTFPTExperimenter,
65535: OFPTFPTExperimenterMiss }
class TableFeaturesPropPacketListField(PacketListField):
@staticmethod
def _get_table_features_prop_length(s):
return struct.unpack("!H", s[2:4])[0]
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_table_features_prop_cls.get(t, Raw)(s)
def getfield(self, pkt, s):
lst = []
remain = s
while remain and len(remain) >= 4:
l = TableFeaturesPropPacketListField._get_table_features_prop_length(remain)
# add padding !
lpad = l + (8 - l%8)%8
if l < 4 or len(remain) < lpad:
# no zero length nor incoherent length
break
current = remain[:lpad]
remain = remain[lpad:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
class OFPTableFeatures(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_TABLE_FEATURES"
fields_desc = [ ShortField("length", None),
ByteEnumField("table_id", 0, ofp_table),
XBitField("pad", 0, 40),
StrFixedLenField("table_name", "", 32),
LongField("metadata_match", 0),
LongField("metadata_write", 0),
IntEnumField("config", 0, { 0: "OFPTC_NO_MASK",
3: "OFPTC_DEPRECATED_MASK" }),
IntField("max_entries", 0),
TableFeaturesPropPacketListField("properties", [], Packet,
length_from=lambda pkt:pkt.length-64) ]
class TableFeaturesPacketListField(PacketListField):
@staticmethod
def _get_table_features_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = TableFeaturesPacketListField._get_table_features_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPTableFeatures(current)
lst.append(p)
return remain, lst
class OFPMPRequestTableFeatures(_ofp_header):
name = "OFPMP_REQUEST_TABLE_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 12, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
TableFeaturesPacketListField("table_features", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyTableFeatures(_ofp_header):
name = "OFPMP_REPLY_TABLE_FEATURES"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 12, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
TableFeaturesPacketListField("table_features", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
############### end of table features ###############
class OFPMPRequestPortDesc(_ofp_header):
name = "OFPMP_REQUEST_PORT_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 13, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntEnumField("port_no", 0, ofp_port_no),
XIntField("pad", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyPortDesc(_ofp_header):
name = "OFPMP_REPLY_PORT_DESC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 13, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
PacketListField("ports", None, OFPPort,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPMPRequestExperimenter(_ofp_header):
name = "OFPST_REQUEST_EXPERIMENTER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 18, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 65535, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_request_flags),
XIntField("pad1", 0),
IntField("experimenter", 0),
IntField("exp_type", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPMPReplyExperimenter(_ofp_header):
name = "OFPST_REPLY_EXPERIMENTER"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 19, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("mp_type", 65535, ofp_multipart_types),
FlagsField("flags", 0, 16, ofpmp_reply_flags),
XIntField("pad1", 0),
IntField("experimenter", 0),
IntField("exp_type", 0) ]
overload_fields = {TCP: {"dport": 6653}}
# ofp_multipart_request/reply_cls allows generic method OpenFlow()
# to choose the right class for dissection
ofp_multipart_request_cls = { 0: OFPMPRequestDesc,
1: OFPMPRequestFlow,
2: OFPMPRequestAggregate,
3: OFPMPRequestTable,
4: OFPMPRequestPortStats,
5: OFPMPRequestQueue,
6: OFPMPRequestGroup,
7: OFPMPRequestGroupDesc,
8: OFPMPRequestGroupFeatures,
9: OFPMPRequestMeter,
10: OFPMPRequestMeterConfig,
11: OFPMPRequestMeterFeatures,
12: OFPMPRequestTableFeatures,
13: OFPMPRequestPortDesc,
65535: OFPMPRequestExperimenter }
ofp_multipart_reply_cls = { 0: OFPMPReplyDesc,
1: OFPMPReplyFlow,
2: OFPMPReplyAggregate,
3: OFPMPReplyTable,
4: OFPMPReplyPortStats,
5: OFPMPReplyQueue,
6: OFPMPReplyGroup,
7: OFPMPReplyGroupDesc,
8: OFPMPReplyGroupFeatures,
9: OFPMPReplyMeter,
10: OFPMPReplyMeterConfig,
11: OFPMPReplyMeterFeatures,
12: OFPMPReplyTableFeatures,
13: OFPMPReplyPortDesc,
65535: OFPMPReplyExperimenter }
############## end of OFPT_MULTIPART ################
class OFPTBarrierRequest(_ofp_header):
name = "OFPT_BARRIER_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 20, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTBarrierReply(_ofp_header):
name = "OFPT_BARRIER_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 21, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTQueueGetConfigRequest(_ofp_header):
name = "OFPT_QUEUE_GET_CONFIG_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 22, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("port_no", "ANY", ofp_port_no),
XIntField("pad", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTQueueGetConfigReply(_ofp_header):
name = "OFPT_QUEUE_GET_CONFIG_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 23, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("port", 0, ofp_port_no),
XIntField("pad", 0),
QueuePacketListField("queues", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTRoleRequest(_ofp_header):
name = "OFPT_ROLE_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 24, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("role", 0, { 0: "OFPCR_ROLE_NOCHANGE",
1: "OFPCR_ROLE_EQUAL",
2: "OFPCR_ROLE_MASTER",
3: "OFPCR_ROLE_SLAVE" }),
XIntField("pad", 0),
LongField("generation_id", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTRoleReply(_ofp_header):
name = "OFPT_ROLE_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 25, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("role", 0, { 0: "OFPCR_ROLE_NOCHANGE",
1: "OFPCR_ROLE_EQUAL",
2: "OFPCR_ROLE_MASTER",
3: "OFPCR_ROLE_SLAVE" }),
XIntField("pad", 0),
LongField("generation_id", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTGetAsyncRequest(_ofp_header):
name = "OFPT_GET_ASYNC_REQUEST"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 26, ofp_type),
ShortField("len", 8),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
ofp_packet_in_reason = [ "NO_MATCH",
"ACTION",
"INVALID_TTL" ]
ofp_port_reason = [ "ADD",
"DELETE",
"MODIFY" ]
ofp_flow_removed_reason = [ "IDLE_TIMEOUT",
"HARD_TIMEOUT",
"DELETE",
"GROUP_DELETE" ]
class OFPTGetAsyncReply(_ofp_header):
name = "OFPT_GET_ASYNC_REPLY"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 27, ofp_type),
ShortField("len", 32),
IntField("xid", 0),
FlagsField("packet_in_mask_master", 0, 32, ofp_packet_in_reason),
FlagsField("packet_in_mask_slave", 0, 32, ofp_packet_in_reason),
FlagsField("port_status_mask_master", 0, 32, ofp_port_reason),
FlagsField("port_status_mask_slave", 0, 32, ofp_port_reason),
FlagsField("flow_removed_mask_master", 0, 32, ofp_flow_removed_reason),
FlagsField("flow_removed_mask_slave", 0, 32, ofp_flow_removed_reason) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTSetAsync(_ofp_header):
name = "OFPT_SET_ASYNC"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 28, ofp_type),
ShortField("len", 32),
IntField("xid", 0),
FlagsField("packet_in_mask_master", 0, 32, ofp_packet_in_reason),
FlagsField("packet_in_mask_slave", 0, 32, ofp_packet_in_reason),
FlagsField("port_status_mask_master", 0, 32, ofp_port_reason),
FlagsField("port_status_mask_slave", 0, 32, ofp_port_reason),
FlagsField("flow_removed_mask_master", 0, 32, ofp_flow_removed_reason),
FlagsField("flow_removed_mask_slave", 0, 32, ofp_flow_removed_reason) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTMeterMod(_ofp_header):
name = "OFPT_METER_MOD"
fields_desc = [ ByteEnumField("version", 0x04, ofp_version),
ByteEnumField("type", 29, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("cmd", 0, { 0: "OFPMC_ADD",
1: "OFPMC_MODIFY",
2: "OFPMC_DELETE" }),
FlagsField("flags", 0, 16, [ "KBPS",
"PKTPS",
"BURST",
"STATS" ]),
IntEnumField("meter_id", 1, ofp_meter),
MeterBandPacketListField("bands", [], Packet,
length_from=lambda pkt:pkt.len-16) ]
overload_fields = {TCP: {"sport": 6653}}
# ofpt_cls allows generic method OpenFlow() to choose the right class for dissection
ofpt_cls = { 0: OFPTHello,
#1: OFPTError,
2: OFPTEchoRequest,
3: OFPTEchoReply,
4: OFPTExperimenter,
5: OFPTFeaturesRequest,
6: OFPTFeaturesReply,
7: OFPTGetConfigRequest,
8: OFPTGetConfigReply,
9: OFPTSetConfig,
10: OFPTPacketIn,
11: OFPTFlowRemoved,
12: OFPTPortStatus,
13: OFPTPacketOut,
14: OFPTFlowMod,
15: OFPTGroupMod,
16: OFPTPortMod,
17: OFPTTableMod,
#18: OFPTMultipartRequest,
#19: OFPTMultipartReply,
20: OFPTBarrierRequest,
21: OFPTBarrierReply,
22: OFPTQueueGetConfigRequest,
23: OFPTQueueGetConfigReply,
24: OFPTRoleRequest,
25: OFPTRoleReply,
26: OFPTGetAsyncRequest,
27: OFPTGetAsyncReply,
28: OFPTSetAsync,
29: OFPTMeterMod }
TCP_guess_payload_class_copy = TCP.guess_payload_class
def OpenFlow(self, payload):
if self is None or self.dport == 6653 or self.dport == 6633 or self.sport == 6653 or self.sport == 6653:
# port 6653 has been allocated by IANA, port 6633 should no longer be used
# OpenFlow function may be called with None self in OFPPacketField
of_type = ord(payload[1])
if of_type == 1:
err_type = ord(payload[9])
# err_type is a short int, but last byte is enough
if err_type == 255: err_type = 65535
return ofp_error_cls[err_type]
elif of_type == 18:
mp_type = ord(payload[9])
if mp_type == 255: mp_type = 65535
return ofp_multipart_request_cls[mp_type]
elif of_type == 19:
mp_type = ord(payload[9])
if mp_type == 255: mp_type = 65535
return ofp_multipart_reply_cls[mp_type]
else:
return ofpt_cls[of_type]
else:
return TCP_guess_payload_class_copy(self, payload)
TCP.guess_payload_class = OpenFlow
| 42.04184 | 127 | 0.506483 |
ace4dd9f088b57f01b4259e9979070862037e557 | 3,774 | py | Python | src/_cffi_src/openssl/crypto.py | aganders3/cryptography | dcf82c6c00bb006b6355c51d02c816769cb534a3 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/_cffi_src/openssl/crypto.py | aganders3/cryptography | dcf82c6c00bb006b6355c51d02c816769cb534a3 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/_cffi_src/openssl/crypto.py | aganders3/cryptography | dcf82c6c00bb006b6355c51d02c816769cb534a3 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
INCLUDES = """
#include <openssl/crypto.h>
"""
TYPES = """
static const long Cryptography_HAS_MEM_FUNCTIONS;
static const long Cryptography_HAS_OPENSSL_CLEANUP;
static const int SSLEAY_VERSION;
static const int SSLEAY_CFLAGS;
static const int SSLEAY_PLATFORM;
static const int SSLEAY_DIR;
static const int SSLEAY_BUILT_ON;
static const int OPENSSL_VERSION;
static const int OPENSSL_CFLAGS;
static const int OPENSSL_BUILT_ON;
static const int OPENSSL_PLATFORM;
static const int OPENSSL_DIR;
"""
FUNCTIONS = """
void OPENSSL_cleanup(void);
/* SSLeay was removed in 1.1.0 */
unsigned long SSLeay(void);
const char *SSLeay_version(int);
/* these functions were added to replace the SSLeay functions in 1.1.0 */
unsigned long OpenSSL_version_num(void);
const char *OpenSSL_version(int);
/* this is a macro in 1.1.0 */
void *OPENSSL_malloc(size_t);
void OPENSSL_free(void *);
/* Signature changed significantly in 1.1.0, only expose there for sanity */
int Cryptography_CRYPTO_set_mem_functions(
void *(*)(size_t, const char *, int),
void *(*)(void *, size_t, const char *, int),
void (*)(void *, const char *, int));
void *Cryptography_malloc_wrapper(size_t, const char *, int);
void *Cryptography_realloc_wrapper(void *, size_t, const char *, int);
void Cryptography_free_wrapper(void *, const char *, int);
"""
CUSTOMIZATIONS = """
/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the
values so you can use either one. This is so we can use the new function
names no matter what OpenSSL we're running on, but users on older pyOpenSSL
releases won't see issues if they're running OpenSSL 1.1.0 */
#if !defined(SSLEAY_VERSION)
# define SSLeay OpenSSL_version_num
# define SSLeay_version OpenSSL_version
# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER
# define SSLEAY_VERSION OPENSSL_VERSION
# define SSLEAY_CFLAGS OPENSSL_CFLAGS
# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON
# define SSLEAY_PLATFORM OPENSSL_PLATFORM
# define SSLEAY_DIR OPENSSL_DIR
#endif
#if !defined(OPENSSL_VERSION)
# define OpenSSL_version_num SSLeay
# define OpenSSL_version SSLeay_version
# define OPENSSL_VERSION SSLEAY_VERSION
# define OPENSSL_CFLAGS SSLEAY_CFLAGS
# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON
# define OPENSSL_PLATFORM SSLEAY_PLATFORM
# define OPENSSL_DIR SSLEAY_DIR
#endif
#if CRYPTOGRAPHY_IS_LIBRESSL
static const long Cryptography_HAS_OPENSSL_CLEANUP = 0;
void (*OPENSSL_cleanup)(void) = NULL;
#else
static const long Cryptography_HAS_OPENSSL_CLEANUP = 1;
#endif
#if CRYPTOGRAPHY_IS_LIBRESSL || CRYPTOGRAPHY_IS_BORINGSSL
static const long Cryptography_HAS_MEM_FUNCTIONS = 0;
int (*Cryptography_CRYPTO_set_mem_functions)(
void *(*)(size_t, const char *, int),
void *(*)(void *, size_t, const char *, int),
void (*)(void *, const char *, int)) = NULL;
#else
static const long Cryptography_HAS_MEM_FUNCTIONS = 1;
int Cryptography_CRYPTO_set_mem_functions(
void *(*m)(size_t, const char *, int),
void *(*r)(void *, size_t, const char *, int),
void (*f)(void *, const char *, int)
) {
return CRYPTO_set_mem_functions(m, r, f);
}
#endif
void *Cryptography_malloc_wrapper(size_t size, const char *path, int line) {
return malloc(size);
}
void *Cryptography_realloc_wrapper(void *ptr, size_t size, const char *path,
int line) {
return realloc(ptr, size);
}
void Cryptography_free_wrapper(void *ptr, const char *path, int line) {
free(ptr);
}
"""
| 32.534483 | 79 | 0.72655 |
ace4df016a18bdb5379c5f10e5b365f4c9c9a38f | 705 | py | Python | sympy/simplify/__init__.py | shipci/sympy | 4b59927bed992b980c9b3faac01becb36feef26b | [
"BSD-3-Clause"
] | 319 | 2016-09-22T15:54:48.000Z | 2022-03-18T02:36:58.000Z | sympy/simplify/__init__.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | 9 | 2016-11-03T21:56:41.000Z | 2020-08-09T19:27:37.000Z | sympy/simplify/__init__.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | 27 | 2016-10-06T16:05:32.000Z | 2022-03-18T02:37:00.000Z | """The module helps converting SymPy expressions into shorter forms of them.
for example:
the expression E**(pi*I) will be converted into -1
the expression (x+x)**2 will be converted into 4*x**2
"""
from .simplify import (collect, rcollect, radsimp, ratsimp, fraction,
simplify, trigsimp, powsimp, combsimp, hypersimp, hypersimilar, nsimplify,
logcombine, separatevars, numer, denom, powdenest, posify, polarify,
unpolarify, collect_const, signsimp, besselsimp, ratsimpmodprime,
exptrigsimp)
from .fu import FU, fu
from .sqrtdenest import sqrtdenest
from .cse_main import cse
from .traversaltools import use
from .epathtools import epath, EPath
from .hyperexpand import hyperexpand
| 29.375 | 78 | 0.767376 |
ace4e059c86c884930b24b56fbce7700d18db78a | 6,227 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/models/__init__.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/models/__init__.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/models/__init__.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import DataDiskImageEncryption
from ._models_py3 import Disallowed
from ._models_py3 import DiskImageEncryption
from ._models_py3 import EncryptionImages
from ._models_py3 import Gallery
from ._models_py3 import GalleryApplication
from ._models_py3 import GalleryApplicationUpdate
from ._models_py3 import GalleryApplicationVersion
from ._models_py3 import GalleryApplicationVersionPublishingProfile
from ._models_py3 import GalleryApplicationVersionUpdate
from ._models_py3 import GalleryArtifactPublishingProfileBase
from ._models_py3 import GalleryArtifactSource
from ._models_py3 import GalleryArtifactVersionSource
from ._models_py3 import GalleryDataDiskImage
from ._models_py3 import GalleryDiskImage
from ._models_py3 import GalleryIdentifier
from ._models_py3 import GalleryImage
from ._models_py3 import GalleryImageIdentifier
from ._models_py3 import GalleryImageUpdate
from ._models_py3 import GalleryImageVersion
from ._models_py3 import GalleryImageVersionPublishingProfile
from ._models_py3 import GalleryImageVersionStorageProfile
from ._models_py3 import GalleryImageVersionUpdate
from ._models_py3 import GalleryOSDiskImage
from ._models_py3 import GalleryUpdate
from ._models_py3 import ImagePurchasePlan
from ._models_py3 import InnerError
from ._models_py3 import ManagedArtifact
from ._models_py3 import OSDiskImageEncryption
from ._models_py3 import RecommendedMachineConfiguration
from ._models_py3 import RegionalReplicationStatus
from ._models_py3 import ReplicationStatus
from ._models_py3 import Resource
from ._models_py3 import ResourceRange
from ._models_py3 import TargetRegion
from ._models_py3 import UpdateResourceDefinition
from ._models_py3 import UserArtifactSource
except (SyntaxError, ImportError):
from ._models import ApiError
from ._models import ApiErrorBase
from ._models import DataDiskImageEncryption
from ._models import Disallowed
from ._models import DiskImageEncryption
from ._models import EncryptionImages
from ._models import Gallery
from ._models import GalleryApplication
from ._models import GalleryApplicationUpdate
from ._models import GalleryApplicationVersion
from ._models import GalleryApplicationVersionPublishingProfile
from ._models import GalleryApplicationVersionUpdate
from ._models import GalleryArtifactPublishingProfileBase
from ._models import GalleryArtifactSource
from ._models import GalleryArtifactVersionSource
from ._models import GalleryDataDiskImage
from ._models import GalleryDiskImage
from ._models import GalleryIdentifier
from ._models import GalleryImage
from ._models import GalleryImageIdentifier
from ._models import GalleryImageUpdate
from ._models import GalleryImageVersion
from ._models import GalleryImageVersionPublishingProfile
from ._models import GalleryImageVersionStorageProfile
from ._models import GalleryImageVersionUpdate
from ._models import GalleryOSDiskImage
from ._models import GalleryUpdate
from ._models import ImagePurchasePlan
from ._models import InnerError
from ._models import ManagedArtifact
from ._models import OSDiskImageEncryption
from ._models import RecommendedMachineConfiguration
from ._models import RegionalReplicationStatus
from ._models import ReplicationStatus
from ._models import Resource
from ._models import ResourceRange
from ._models import TargetRegion
from ._models import UpdateResourceDefinition
from ._models import UserArtifactSource
from ._paged_models import GalleryApplicationPaged
from ._paged_models import GalleryApplicationVersionPaged
from ._paged_models import GalleryImagePaged
from ._paged_models import GalleryImageVersionPaged
from ._paged_models import GalleryPaged
from ._compute_management_client_enums import (
OperatingSystemTypes,
AggregatedReplicationState,
ReplicationState,
OperatingSystemStateTypes,
HyperVGeneration,
StorageAccountType,
HostCaching,
ReplicationStatusTypes,
)
__all__ = [
'ApiError',
'ApiErrorBase',
'DataDiskImageEncryption',
'Disallowed',
'DiskImageEncryption',
'EncryptionImages',
'Gallery',
'GalleryApplication',
'GalleryApplicationUpdate',
'GalleryApplicationVersion',
'GalleryApplicationVersionPublishingProfile',
'GalleryApplicationVersionUpdate',
'GalleryArtifactPublishingProfileBase',
'GalleryArtifactSource',
'GalleryArtifactVersionSource',
'GalleryDataDiskImage',
'GalleryDiskImage',
'GalleryIdentifier',
'GalleryImage',
'GalleryImageIdentifier',
'GalleryImageUpdate',
'GalleryImageVersion',
'GalleryImageVersionPublishingProfile',
'GalleryImageVersionStorageProfile',
'GalleryImageVersionUpdate',
'GalleryOSDiskImage',
'GalleryUpdate',
'ImagePurchasePlan',
'InnerError',
'ManagedArtifact',
'OSDiskImageEncryption',
'RecommendedMachineConfiguration',
'RegionalReplicationStatus',
'ReplicationStatus',
'Resource',
'ResourceRange',
'TargetRegion',
'UpdateResourceDefinition',
'UserArtifactSource',
'GalleryPaged',
'GalleryImagePaged',
'GalleryImageVersionPaged',
'GalleryApplicationPaged',
'GalleryApplicationVersionPaged',
'OperatingSystemTypes',
'AggregatedReplicationState',
'ReplicationState',
'OperatingSystemStateTypes',
'HyperVGeneration',
'StorageAccountType',
'HostCaching',
'ReplicationStatusTypes',
]
| 38.438272 | 76 | 0.768107 |
ace4e31c2616f3d8ab1d5b15ab150857fb8603b2 | 4,167 | py | Python | rand_param_envs/gym/scoreboard/client/http_client.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 24 | 2021-03-24T07:14:52.000Z | 2022-03-17T08:15:44.000Z | rand_param_envs/gym/scoreboard/client/http_client.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 12 | 2021-02-02T22:53:59.000Z | 2022-03-12T00:41:30.000Z | rand_param_envs/gym/scoreboard/client/http_client.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 6 | 2021-04-12T18:49:47.000Z | 2021-09-07T05:33:22.000Z | import logging
import requests
import textwrap
import six
from rand_param_envs.gym import error
from rand_param_envs.gym.scoreboard.client import util
logger = logging.getLogger(__name__)
warned = False
def render_post_data(post_data):
if hasattr(post_data, 'fileno'): # todo: is this the right way of checking if it's a file?
return '%r (%d bytes)' % (post_data, util.file_size(post_data))
elif isinstance(post_data, (six.string_types, six.binary_type)):
return '%r (%d bytes)' % (post_data, len(post_data))
else:
return None
class RequestsClient(object):
name = 'requests'
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
self.session = requests.Session()
def request(self, method, url, headers, post_data=None, files=None):
global warned
kwargs = {}
# Really, really only turn this off while debugging.
if not self._verify_ssl_certs:
if not warned:
logger.warn('You have disabled SSL cert verification in OpenAI Gym, so we will not verify SSL certs. This means an attacker with control of your network could snoop on or modify your data in transit.')
warned = True
kwargs['verify'] = False
try:
try:
result = self.session.request(method,
url,
headers=headers,
data=post_data,
timeout=200,
files=files,
**kwargs)
except TypeError as e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with OpenAI Gym\'s'
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are susceptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception as e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e, method, url)
if logger.level <= logging.DEBUG:
logger.debug(
"""API request to %s returned (response code, response body) of
(%d, %r)
Request body was: %s""", url, status_code, content, render_post_data(post_data))
elif logger.level <= logging.INFO:
logger.info('HTTP request: %s %s %d', method.upper(), url, status_code)
return content, status_code, result.headers
def _handle_request_error(self, e, method, url):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with OpenAI Gym "
"(while calling {} {}). "
"If this problem persists, let us know at "
"gym@openai.com.".format(method, url))
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with OpenAI Gym. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at gym@openai.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg, width=140) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
| 43.863158 | 217 | 0.559875 |
ace4e3dd6754eec4aac74720a5ef16d02791adf0 | 4,591 | py | Python | 08-Python_1/applied_databases_exercises_week_08.py | andkoc001/DataBase_references | 466d3e9a28a25a422e3b4b52158f8e359a240e6d | [
"MIT"
] | null | null | null | 08-Python_1/applied_databases_exercises_week_08.py | andkoc001/DataBase_references | 466d3e9a28a25a422e3b4b52158f8e359a240e6d | [
"MIT"
] | null | null | null | 08-Python_1/applied_databases_exercises_week_08.py | andkoc001/DataBase_references | 466d3e9a28a25a422e3b4b52158f8e359a240e6d | [
"MIT"
] | null | null | null | # Applied Databases, GMIT 2020
# Exercises from week 08 lecture
# Author: Andrzej Kocielski
# ------------------------------
# -----
'''
Exercise 01
Write a Pyton program that has two arrays in the main function:
- one containing several elements which are numbers,
- the other is empty.
Write another function which accepts a number as a parameter
and returns the number doubled.
The main function should call this function for each element of the first array
and populate the second array with the doubled values.
When the second array is full, it should be printed out.
'''
arr_1 = [4, 7, 5, 1, 0]
arr_2 = []
# double_value(arr_1)
# print("Range:", range(len(arr_1))) # Range: range(0, 5)
# print("Len:", len(arr_1)) # Len: 5
# print("Array:", (arr_1)) # Array: [4, 7, 5, 1, 0]
# print("Array elem.1:", (arr_1[0])) # Array elem.1: 4
def double_array(array):
for item in range(len(array)):
arr_2.append((arr_1[item] * 2))
return arr_2
def double_number(num):
num = num * 2
return num
def main_01(array):
# print(double_array(arr_1))
for i in range(len(array)):
arr_2.append(double_number(array[i]))
print(arr_2)
# main_01(arr_1)
# -----
'''
Exercise 02
From Q2.py, paste the code below and modifiy it to make it behave as follows.
When run a main menu is shown as follows:
Menu
====
1 - Fill Array
2 - Print Array
3 - Find > in Array
4 - Exit
Enter choice:
* If the user chooses 1:
He/She should be prompted to keep entering numbers until -1 is entered.
All numbers up to, but not including -1, should be sored in an array:
Enter choice: 1
Enter Number: 9
Enter Number: 23
Enter Number: -1
* If the user chooses 2:
The content of the array should be printed.
Enter choice: 2
[1, 9, 23]
* If the user chooses 3:
He/She should be prompted to enter a number.
Any number in the array greater than the number entered should be printed:
Enter choice: 3
Enter Number: 12
[23]
* If the user chooses 4:
The program should end.
* If the user enters anything else, the program menu should be displayed again.
The main_02() function should not be changed.
The definition of the functions fill_array() and find_gt_in_array() should not be changed.
The necessary code should be written in the two funtions mentioned above,
so that the program performs as described.
'''
# Main function
def main_02():
# Initialise array
array = [4, 4, 25, 6, 1, 78, 0, 51]
display_menu()
while True:
choice = input("Enter choice: ")
if (choice == "1"):
array = fill_array()
display_menu()
elif (choice == "2"):
print(array)
display_menu()
elif (choice == "3"):
find_gt_in_array(array)
display_menu()
elif (choice == "4"):
break
else:
display_menu()
def fill_array():
# Write the necessary code to fill the array.
# -1 should not be part of the array
# Initialise array
arr_a = []
while True:
next_number = int(input("Enter next number (-1 to end): "))
if next_number == -1:
break
else:
arr_a.append(next_number)
return arr_a
def find_gt_in_array(array):
# Write the necessary code to get a number from the user
# and print out all numbers in the array that are greater
# than this number
gt_number = int(input("Enter cut-off number: "))
gt_array = []
for x in array: # for x in range(len(array))
if x > gt_number: # if array[x] > gt_number
gt_array.append(x) # gt_array.append(array[x])
print(gt_array)
return
def display_menu():
print("")
print("MENU")
print("=" * 4)
print("1 - Fill Array")
print("2 - Print Array")
print("3 - Find > in Array")
print("4 - Exit")
if __name__ == "__main__":
# execute only if run as a script
main_02()
# -----
'''
Exercise 11
Write a Pyton program that takes a name and age in form the console.
If the age is less than 18, the program prints "Too young",
otherwise the program prints the name followed by "@gmit.ie"
and the age incremented by 2.
'''
def main_11():
name = input("Enter your name: ")
try:
age = int(input("Enter your age: "))
except:
print("Invalid age")
if age < 18:
return print("Too young")
else:
return print(name + "@gmit.ie, age:", age+2)
print()
# main_11()
| 21.453271 | 90 | 0.614245 |
ace4e50b2d05e99838ad98843b5621b79b7a76c5 | 47 | py | Python | MyTestProject/src/root/nested/example.py | jlmurphy3rd/code-outhouse | c18d1fc01299cb70545ebdc8b928023401c25078 | [
"MIT"
] | 2 | 2018-04-21T14:04:42.000Z | 2019-12-18T23:21:28.000Z | MyTestProject/src/root/nested/example.py | jlmurphy3rd/code-outhouse | c18d1fc01299cb70545ebdc8b928023401c25078 | [
"MIT"
] | null | null | null | MyTestProject/src/root/nested/example.py | jlmurphy3rd/code-outhouse | c18d1fc01299cb70545ebdc8b928023401c25078 | [
"MIT"
] | null | null | null | '''
Created on Feb 18, 2016
@author: John
'''
| 7.833333 | 23 | 0.595745 |
ace4e528cdd8959a7324a794b40edd66bbde4b5a | 4,933 | py | Python | conftest.py | copyit/archivy | 56d68f3562be5ad37add311c49e134a7f3732448 | [
"MIT"
] | null | null | null | conftest.py | copyit/archivy | 56d68f3562be5ad37add311c49e134a7f3732448 | [
"MIT"
] | 6 | 2020-11-01T17:55:42.000Z | 2020-12-06T11:03:40.000Z | conftest.py | copyit/archivy | 56d68f3562be5ad37add311c49e134a7f3732448 | [
"MIT"
] | null | null | null | import os
import shutil
import tempfile
import click
import pytest
import responses
from archivy import app, cli
from archivy.click_web import create_click_web_app, _flask_app
from archivy.helpers import get_db
from archivy.models import DataObj, User
_app = None
@pytest.fixture
def test_app():
"""Instantiate the app for each test with its own temporary data directory
Each test using this fixture will use its own db.json and its own data
directory, and then delete them.
"""
# create a temporary file to isolate the database for each test
global _app
if _app is None:
_app = create_click_web_app(cli, cli.cli, app)
app_dir = tempfile.mkdtemp()
_app.config["INTERNAL_DIR"] = app_dir
_app.config["USER_DIR"] = app_dir
data_dir = os.path.join(app_dir, "data")
os.mkdir(data_dir)
_app.config["TESTING"] = True
_app.config["WTF_CSRF_ENABLED"] = False
# This setups a TinyDB instance, using the `app_dir` temporary
# directory defined above
# Required so that `flask.current_app` can be called in data.py and
# models.py
# See https://flask.palletsprojects.com/en/1.1.x/appcontext/ for more
# information.
with _app.app_context():
_ = get_db()
user = {
"username": "halcyon",
"password": "password"
}
User(**user).insert()
yield _app
# close and remove the temporary database
shutil.rmtree(app_dir)
@pytest.fixture
def client(test_app):
""" HTTP client for calling a test instance of the app"""
with test_app.test_client() as client:
client.post("/login", data={"username": "halcyon", "password": "password"})
yield client
@pytest.fixture
def mocked_responses():
"""
Setup mock responses using the `responses` python package.
Using https://pypi.org/project/responses/, this fixture will mock out
HTTP calls made by the requests library.
For example,
>>> mocked_responses.add(responses.GET, "http://example.org",
json={'key': 'val'}
)
>>> r = requests.get("http://example.org")
>>> print(r.json())
{'key': 'val'}
"""
with responses.RequestsMock() as rsps:
# this ensure that all requests calls are mocked out
rsps.assert_all_requests_are_fired = False
yield rsps
@pytest.fixture
def note_fixture(test_app):
note_dict = {
"type": "note", "title": "Test Note",
"desc": "A note to test model functionality",
"tags": ["testing", "archivy"], "path": ""
}
with test_app.app_context():
note = DataObj(**note_dict)
note.insert()
return note
@pytest.fixture
def bookmark_fixture(test_app, mocked_responses):
mocked_responses.add(responses.GET, "https://example.com/", body="""<html>
<head><title>Example</title></head><body><p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit
<script>console.log("this should be sanitized")</script>
<img src="/images/image1.png">
<a href="/testing-absolute-url">
</p></body></html>
""")
datapoints = {
"type": "bookmark", "title": "Test Bookmark",
"desc": "",
"tags": ["testing", "archivy"], "path": "",
"url": "https://example.com/"
}
with test_app.app_context():
bookmark = DataObj(**datapoints)
bookmark.process_bookmark_url()
bookmark.insert()
return bookmark
@pytest.fixture()
def pocket_fixture(test_app, mocked_responses):
"""Sets up pocket key and mocked responses for testing pocket sync
When using this fixture, all calls to https://getpocket.com/v3/get will
succeed and return a single article whose url is https://example.com.
"""
with test_app.app_context():
db = get_db()
mocked_responses.add(
responses.POST,
"https://getpocket.com/v3/oauth/authorize",
json={
"access_token": "5678defg-5678-defg-5678-defg56",
"username": "test_user"
})
# fake /get response from pocket API
mocked_responses.add(responses.POST, "https://getpocket.com/v3/get", json={
'status': 1, 'complete': 1, 'list': {
'3088163616': {
'given_url': 'https://example.com', 'status': '0',
'resolved_url': 'https://example.com',
'excerpt': 'Lorem ipsum', 'is_article': '1',
},
},
})
pocket_key = {
"type": "pocket_key",
"consumer_key": "1234-abcd1234abcd1234abcd1234",
"code": "dcba4321-dcba-4321-dcba-4321dc",
}
db.insert(pocket_key)
return pocket_key
@pytest.fixture()
def click_cli():
yield cli.cli
@pytest.fixture()
def ctx(click_cli):
with click.Context(click_cli, info_name=click_cli, parent=None) as ctx:
yield ctx
@pytest.fixture()
def cli_runner():
yield click.testing.CliRunner()
| 28.680233 | 83 | 0.628015 |
ace4e5695ae5b7b8351fada0faee358ce3edc954 | 3,094 | py | Python | amazon/amazon/settings.py | grv190/amazon_django | b9effe467cfeb7de9e47b8f5b733dfe61c33e675 | [
"Apache-2.0"
] | null | null | null | amazon/amazon/settings.py | grv190/amazon_django | b9effe467cfeb7de9e47b8f5b733dfe61c33e675 | [
"Apache-2.0"
] | null | null | null | amazon/amazon/settings.py | grv190/amazon_django | b9effe467cfeb7de9e47b8f5b733dfe61c33e675 | [
"Apache-2.0"
] | null | null | null | """
Django settings for amazon project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@b(csf9rdx)r5*ssik6en(t_885sl6tntjq(q1qgne_kpjz4d7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'amazon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'amazon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.570248 | 91 | 0.698772 |
ace4e5a3dce3988df2b726b969db28522b8b95c0 | 1,332 | py | Python | keepers/admin.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | keepers/admin.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | 13 | 2019-12-12T03:07:55.000Z | 2022-03-07T12:59:27.000Z | keepers/admin.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Keeper
class KeeperAdmin(admin.ModelAdmin):
list_display = (
"iItemNumb",
"cDescription",
"bBestOfferable",
"tTimeBeg",
"tTimeEnd",
"cEbayItemURL",
"cListingType",
"cLocation",
"cPaymentMethods",
"cGalleryURL",
"cPictureURLs",
"cPostalCode",
"iCategoryID",
"iQuantity",
"cSellerID",
"iFeedbackScore",
"cFeedbackPercent",
"iBidCount",
"dConvertPrice",
"cConvertCurrency",
"lLocalPrice",
"lLocalCurrency",
"cHighBidder",
"cListingStatus",
"iQuantitySold",
"cShipToLocations",
"cSite",
"cTitle",
"iHitCount",
"cCategoryIDs",
"cCategoryNames",
"cCountry",
"cReturnPolicy",
"dMinimumBid",
"cBidCurrency",
"iConditionID",
"cCondition",
"bGlobalShipping",
"bBuyItNowable",
"lBuyItNowPrice",
"lBuyItNowCurrenc",
"dBuyItNowPrice",
"cBuyItNowConvert",
"tCreate",
"tModify",
"bGotPictures",
"tGotPictures",
"iGotPictures" )
admin.site.register( Keeper, KeeperAdmin )
| 21.836066 | 42 | 0.531532 |
ace4e5da16d3276354a25ee503c5a50c2d55cbc4 | 3,265 | py | Python | bluebottle/segments/serializers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | bluebottle/segments/serializers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | bluebottle/segments/serializers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | from builtins import object
from rest_framework import serializers
from bluebottle.activities.models import Activity
from bluebottle.activities.utils import get_stats_for_activities
from bluebottle.bluebottle_drf2.serializers import SorlImageField
from bluebottle.initiatives.models import Initiative
from bluebottle.segments.models import Segment, SegmentType
from bluebottle.utils.fields import SafeField
class SegmentTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False)
included_serializers = {
'segments': 'bluebottle.segments.serializers.SegmentListSerializer',
}
class Meta(object):
model = SegmentType
fields = (
'id', 'name', 'slug', 'inherit', 'required',
'enable_search', 'user_editable', 'segments'
)
class JSONAPIMeta(object):
included_resources = ['segments', ]
resource_name = 'segment-types'
class SegmentListSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False)
logo = SorlImageField('180x180', crop='center')
cover_image = SorlImageField('384x288', crop='center')
story = SafeField(required=False, allow_blank=True, allow_null=True)
included_serializers = {
'segment_type': 'bluebottle.segments.serializers.SegmentTypeSerializer',
}
class Meta(object):
model = Segment
fields = (
'id', 'name', 'segment_type', 'email_domains', 'slug', 'tag_line', 'background_color',
'text_color', 'logo', 'cover_image', 'story', 'closed',
)
class JSONAPIMeta(object):
included_resources = ['segment_type', ]
resource_name = 'segments'
class SegmentDetailSerializer(SegmentListSerializer):
initiatives_count = serializers.SerializerMethodField()
activities_count = serializers.SerializerMethodField()
stats = serializers.SerializerMethodField()
def get_initiatives_count(self, obj):
return len(Initiative.objects.filter(status='approved', activities__segments=obj).distinct())
def get_activities_count(self, obj):
return len(
Activity.objects.filter(
segments=obj
).exclude(
status__in=(
'draft', 'needs_work', 'submitted', 'deleted',
'closed', 'cancelled', 'rejected'
)
)
)
def get_stats(self, obj):
return get_stats_for_activities(obj.activities.all())
class Meta(SegmentListSerializer.Meta):
fields = SegmentListSerializer.Meta.fields + (
'initiatives_count', 'activities_count', 'stats'
)
meta_fields = ['initiatives_count', 'activities_count', 'stats']
class SegmentPublicDetailSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False)
logo = SorlImageField('180x180', crop='center')
cover_image = SorlImageField('1200x900', crop='center')
class Meta(object):
model = Segment
fields = (
'id', 'name', 'logo', 'cover_image', 'email_domains', 'background_color', 'closed',
'text_color'
)
class JSONAPIMeta(object):
resource_name = 'segment-previews'
| 32.979798 | 101 | 0.6683 |
ace4e6304a296ede32c36aae2cafee0a3ca50f8a | 2,534 | py | Python | src/chapter5/chapter5_1.py | DuGuPeefy/CLRS_dugu_code-master | cc0b44f76c1306915e11c744f7f10aa20c98ac0d | [
"Apache-2.0"
] | null | null | null | src/chapter5/chapter5_1.py | DuGuPeefy/CLRS_dugu_code-master | cc0b44f76c1306915e11c744f7f10aa20c98ac0d | [
"Apache-2.0"
] | null | null | null | src/chapter5/chapter5_1.py | DuGuPeefy/CLRS_dugu_code-master | cc0b44f76c1306915e11c744f7f10aa20c98ac0d | [
"Apache-2.0"
] | null | null | null |
# python src/chapter5/chapter5_1.py
# python3 src/chapter5/chapter5_1.py
import sys
import math
from random import randint
from copy import copy
from copy import deepcopy
import numpy as np
from numpy import arange
from matplotlib.pyplot import plot
from matplotlib.pyplot import figure
from matplotlib.pyplot import show
class Chapter5_1:
'''
CLRS 第五章 5.1 算法函数和笔记
'''
def myRandom(self, a = 0, b = 1):
'''
产生[a,b]之间的随机整数
'''
return randint(a, b)
def myBiasedRandom(self):
pass
def note(self):
'''
Summary
=
Print chapter5.1 note
Example
=
>>> Chapter5_1().note()
'''
print('第五章 概率分析和随机算法')
print('5.1 雇佣问题')
print('假设需要雇佣一个一个新的办公室助理,之前的雇佣都失败了,所以决定找一个雇佣代理,雇佣代理每天推荐一个应聘者')
print('每找到一个更好地应聘者,就辞掉之前的应聘者')
print('当然雇佣,面试,开除,给代理中介费都需要一定的\"代价\"')
print('HIRE-ASSISTANT(n)过程伪代码')
print(' 1. best <- 0')
print(' 2. for i <- 1 to n')
print(' 3. interview candidate i')
print(' 4. if candidate i is better than candidate best')
print(' 5. then best <- i')
print(' 6. hire candidate i')
print('关心的重点不是HIRE-ASSISTANT的执行时间,而是面试和雇佣所花的费用')
print('最坏情况分析')
print('在最坏情况下,我们雇佣了每个面试的应聘者。当应聘者的资质逐渐递增时,就会出现这种情况,此时我们雇佣了n次,总的费用O(nc)')
print('事实上既不能得知应聘者的出现次序,也不能控制这个次序。因此,通常我们预期的是一般或平均情况')
print('概率分析是在问题的分析中应用概率技术,大多数情况下,使用概率分析来分析一个算法的运行时间')
print('为了进行概率分析,必须使用关于输入分布的知识或对其假设,然后分析算法,计算出一个期望的运行时间')
print('在所有应聘者的资格之间,存在一个全序关系。因此可以使用从1到n的唯一号码来讲应聘者排列名次')
print('用rank(i)表示应聘者i的名次,并约定较高的名次对应较有资格的应聘者')
print('这个有序序列rank(1),rank(2),...,rank(3)是序列1,2,...,n的一个排列')
print('应聘者以随机的顺序出现,就等于说这个排名列表是数字1到n的n!(n的阶乘)')
print('或者,也可以称这些排名构成一个均匀的随机排列;亦即在n!中可能的组合中,每一种都以相等的概率出现')
print('随机算法:为了利用概率分析,需要了解关于输入分布的一些情况。在许多情况下,我们对输入分布知之甚少')
print('一般的,如果一个算法的输入行为不只是由输入决定,同时也由随机数生成器所产生的数值决定,则称这个算法是随机的')
print('练习5.1-1:每次HIRE应聘者是有一个顺序的,HIRE的时候同时把次序压如栈中,就得到了排名的总次序')
random_list = [self.myRandom(), self.myRandom(), self.myRandom(), self.myRandom(), self.myRandom(),]
print('产生5个[0,1]的随机整数', random_list)
# python src/chapter5/chapter5_1.py
# python3 src/chapter5/chapter5_1.py
return self
_instance = Chapter5_1()
note = _instance.note
if __name__ == '__main__':
print('Run main : single chapter five!')
Chapter5_1().note()
else:
pass
| 30.166667 | 108 | 0.63457 |
ace4e6ac90183dca7feaf0a9957b1d51706c4580 | 10,729 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/19-extending_bound_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/19-extending_bound_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/19-extending_bound_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
return frozenset(res)
| 35.292763 | 89 | 0.623544 |
ace4e77849728a3c24e228ee7e79e69d348eae2d | 1,996 | py | Python | steps/02_data_container/benchmarks/test_tsf.py | karthikn72/enhancement-proposals | 8200d8d5ec409bf76a2f8af6d12b17fe201afa0a | [
"BSD-3-Clause"
] | 7 | 2020-10-16T14:56:54.000Z | 2022-02-17T11:50:09.000Z | steps/02_data_container/benchmarks/test_tsf.py | karthikn72/enhancement-proposals | 8200d8d5ec409bf76a2f8af6d12b17fe201afa0a | [
"BSD-3-Clause"
] | 11 | 2020-11-02T17:55:21.000Z | 2021-10-19T09:26:39.000Z | steps/02_data_container/benchmarks/test_tsf.py | karthikn72/enhancement-proposals | 8200d8d5ec409bf76a2f8af6d12b17fe201afa0a | [
"BSD-3-Clause"
] | 13 | 2020-11-26T23:04:06.000Z | 2022-02-09T11:22:17.000Z | #!/usr/bin/env python3 -u
# coding: utf-8
__author__ = ["Markus Löning"]
__all__ = []
import numpy as np
from sklearn.model_selection import train_test_split
from sktime.classification.interval_based import TimeSeriesForest
from sktime.utils._testing.series_as_features import \
make_classification_problem
from .tsf import TimeSeriesForest_3d_np
from .tsf import TimeSeriesForest_ak_3d
from .tsf import TimeSeriesForest_ak_record
from .utils import ak_3d_arr
from .utils import ak_record_arr
from .utils import np_3d_arr
def _fit_predict(estimator, X_train, y_train, X_test):
return estimator.fit(X_train, y_train).predict_proba(X_test)
PARAMS = {"n_estimators": 100, "random_state": 1}
X, y = make_classification_problem(n_instances=100, n_timepoints=200)
X_train, X_test, y_train, y_test = train_test_split(X, y)
expected = _fit_predict(TimeSeriesForest(**PARAMS),
X_train, y_train, X_test)
def test_tsf_3_np(benchmark):
X_train_np, X_test_np = np_3d_arr(X_train), np_3d_arr(X_test)
estimator = TimeSeriesForest_3d_np(**PARAMS)
actual = benchmark(_fit_predict, estimator, X_train_np, y_train, X_test_np)
np.testing.assert_array_equal(actual, expected)
def test_tsf_tabularize(benchmark):
estimator = TimeSeriesForest(**PARAMS)
actual = benchmark(_fit_predict, estimator, X_train, y_train, X_test)
np.testing.assert_array_equal(actual, expected)
def test_tsf_ak_record(benchmark):
X_train_ak, X_test_ak = ak_record_arr(X_train), ak_record_arr(X_test)
estimator = TimeSeriesForest_ak_record(**PARAMS)
actual = benchmark(_fit_predict, estimator, X_train_ak, y_train, X_test_ak)
np.testing.assert_array_equal(actual, expected)
def test_tsf_ak_3d(benchmark):
X_train_ak, X_test_ak = ak_3d_arr(X_train), ak_3d_arr(X_test)
estimator = TimeSeriesForest_ak_3d(**PARAMS)
actual = benchmark(_fit_predict, estimator, X_train_ak, y_train, X_test_ak)
np.testing.assert_array_equal(actual, expected)
| 34.413793 | 79 | 0.778056 |
ace4e94a4d334ad8dd94b2cf0eabd97946fd1e17 | 5,326 | py | Python | oslo/db/sqlalchemy/test_base.py | redhat-openstack/oslo.db | 8e1c6666238fd562d8ae203b83c01f91233ca9bf | [
"Apache-2.0"
] | null | null | null | oslo/db/sqlalchemy/test_base.py | redhat-openstack/oslo.db | 8e1c6666238fd562d8ae203b83c01f91233ca9bf | [
"Apache-2.0"
] | null | null | null | oslo/db/sqlalchemy/test_base.py | redhat-openstack/oslo.db | 8e1c6666238fd562d8ae203b83c01f91233ca9bf | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import fixtures
try:
from oslotest import base as test_base
except ImportError:
raise NameError('Oslotest is not installed. Please add oslotest in your'
' test-requirements')
import six
import testtools
from oslo.db.sqlalchemy import provision
from oslo.db.sqlalchemy import session
from oslo.db.sqlalchemy import utils
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.addCleanup(self.test.engine.dispose)
self.test.sessionmaker = session.get_maker(self.test.engine)
class DbTestCase(test_base.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@functools.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
@six.add_metaclass(abc.ABCMeta)
class OpportunisticFixture(DbFixture):
"""Base fixture to use default CI databases.
The databases exist in OpenStack CI infrastructure. But for the
correct functioning in local environment the databases must be
created manually.
"""
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
_uri = None
def _get_uri(self):
if self._uri is not None:
return self._uri
credentials = {
'backend': self.DRIVER,
'user': self.USERNAME,
'passwd': self.PASSWORD,
'database': self.DBNAME}
if self.DRIVER and not utils.is_backend_avail(**credentials):
msg = '%s backend is not available.' % self.DRIVER
raise testtools.testcase.TestSkipped(msg)
self._provisioning_engine = provision.get_engine(
utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
)
self._uri = provision.create_database(self._provisioning_engine)
self.addCleanup(
provision.drop_database, self._provisioning_engine, self._uri)
self.addCleanup(setattr, self, '_uri', None)
return self._uri
@six.add_metaclass(abc.ABCMeta)
class OpportunisticTestCase(DbTestCase):
"""Base test case to use default CI databases.
The subclasses of the test case are running only when openstack_citest
database is available otherwise tests will be skipped.
"""
FIXTURE = abc.abstractproperty(lambda: None)
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
DBNAME = '' # connect to MySQL server, but not to the openstack_citest db
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
DBNAME = 'postgres' # PostgreSQL requires the db name here,use service one
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture
| 31.892216 | 79 | 0.674803 |
ace4e9a3295c292893f4768dfab881daf0cf3084 | 8,801 | py | Python | DCFR_Khun.py | AirisFiorentini/CFR | 916be715685c4efc584867b0c726a8a1f03b7f17 | [
"Apache-2.0"
] | null | null | null | DCFR_Khun.py | AirisFiorentini/CFR | 916be715685c4efc584867b0c726a8a1f03b7f17 | [
"Apache-2.0"
] | null | null | null | DCFR_Khun.py | AirisFiorentini/CFR | 916be715685c4efc584867b0c726a8a1f03b7f17 | [
"Apache-2.0"
] | null | null | null | from pytreemap import TreeMap
# from typing import List
import numpy as np
import math # math.inf
# Kuhn Poker definitions
# PASS = 0
# BET = 1
# NUM_ACTIONS = 2
# NUM_HANDS = 2
class MNode:
# Kuhn node definitions for matrices
def __init__(self,
NUM_ACTIONS: int,
NUM_CARDS: int,
NUM_HANDS: int = 2):
self.positiveRegretSum = np.zeros((NUM_CARDS, NUM_ACTIONS))
self.negativeRegretSum = np.zeros((NUM_CARDS, NUM_ACTIONS))
self.strategy = np.zeros((NUM_CARDS, NUM_ACTIONS))
self.strategySum = np.zeros((NUM_CARDS, NUM_ACTIONS))
self.infoSet = ""
self.NUM_ACTIONS = NUM_ACTIONS
self.NUM_CARDS = NUM_CARDS
self.NUM_HANDS = NUM_HANDS
# Get current information set mixed strategy through regret-matching
def getStrategy(self,
t: int,
gamma: float,
realizationWeight: np.ndarray,
active_player_n: int,
curr_player_n: int) -> np.ndarray:
regretSum = self.positiveRegretSum + self.negativeRegretSum
_gamma = (t / (t + 1)) ** gamma
if active_player_n == curr_player_n:
self.strategySum *= _gamma
normalizingSum = np.zeros(self.NUM_CARDS)
for k in range(self.NUM_CARDS):
for a in range(self.NUM_ACTIONS):
self.strategy[k][a] = regretSum[k][a] if regretSum[k][a] > 0 else 0
normalizingSum[k] += self.strategy[k][a]
for k in range(self.NUM_CARDS):
for a in range(self.NUM_ACTIONS):
if normalizingSum[k] > 0:
self.strategy[k][a] /= normalizingSum[k]
else:
self.strategy[k][a] = 1.0 / self.NUM_ACTIONS
if active_player_n == curr_player_n:
self.strategySum[k][a] += realizationWeight[k] * self.strategy[k][a]
return self.strategy
# Get average information set mixed strategy across all training iterations
def getAverageStrategy(self) -> np.ndarray:
avgStrategy = np.zeros((self.NUM_CARDS, self.NUM_ACTIONS))
normalizingSum = np.zeros(self.NUM_CARDS)
for k in range(self.NUM_CARDS):
normalizingSum[k] += np.sum(self.strategySum[k])
for k in range(self.NUM_CARDS):
if normalizingSum[k] > 0:
avgStrategy[k] = self.strategySum[k] / normalizingSum[k]
else:
avgStrategy[k] = 1 / self.NUM_ACTIONS
return avgStrategy
# show the results with i-card
def toString_m(self): # Get information set string representation
AvSt = self.getAverageStrategy()
for i in range(3):
print('{:4}: {},\n regret {}'.format(str(i + 1) + self.infoSet, AvSt[i],
(self.positiveRegretSum + self.negativeRegretSum)[i]))
def toString(self) -> str: # Get information set string representation
return '{:4}: {},\n regret {}'.format(self.infoSet,
self.getAverageStrategy(),
self.positiveRegretSum + self.negativeRegretSum)
class MKuhnTrainer:
# Kuhn Poker definitions
def __init__(self):
self.nodeMap = TreeMap()
self.PASS = 0
self.BET = 1
self.NUM_ACTIONS = 2
self.NUM_CARDS = 3
def is_terminal(self, history: str) -> bool:
return history in ['bp', 'bb', 'pp', 'pbb', 'pbp']
def count_param(self,
phi: float,
iter_n: int) -> float:
if math.isinf(phi) and phi > 0:
return 1
elif math.isinf(phi) and phi < 0:
return 0
else:
return (iter_n ** phi) / (iter_n ** phi + 1)
# Information set node class definition (node class above)
# Counterfactual regret minimization iteration
def m_dcfr(self,
iter_n: int,
history: str,
p0: np.ndarray,
p1: np.ndarray,
curr_player_n: int,
alpha: float,
beta: float,
gamma: float) -> np.ndarray: # curr_player_n - the number of player, which we count regrets for
plays = len(history)
player = plays % 2 # active player
# current player, we count for
# return payoff for terminal states
if plays > 1:
if self.is_terminal(history):
terminalPass = history[plays - 1] == 'p'
doubleBet = history[plays - 2: plays] == "bb"
U = np.zeros((self.NUM_CARDS, self.NUM_CARDS))
for i in range(self.NUM_CARDS):
for j in range(self.NUM_CARDS):
if i != j:
if terminalPass:
if history == "pp":
U[i][j] = 1 if i > j else -1
elif history == 'bp':
U[i][j] = 1
else: # pbp
U[i][j] = -1
elif doubleBet:
U[i][j] = 2 if i > j else -2
return U
infoSet = history
# Get information set node or create it if nonexistent
node = self.nodeMap.get(infoSet)
if node is None:
node = MNode(self.NUM_ACTIONS, self.NUM_CARDS)
node.infoSet = infoSet
self.nodeMap.put(infoSet, node)
# For each action, recursively call m_cfr with additional history and probability
strategy = node.getStrategy(iter_n, gamma, p0 if player == 0 else p1, player, curr_player_n)
util = np.zeros((self.NUM_CARDS, self.NUM_CARDS, self.NUM_ACTIONS))
nodeUtil = np.zeros((self.NUM_CARDS, self.NUM_CARDS))
for a in range(self.NUM_ACTIONS):
nextHistory = history + ("p" if a == 0 else "b")
if player == 0:
util[:, :, a] = self.m_dcfr(iter_n, nextHistory, p0 * strategy[:, a], p1, curr_player_n,
alpha, beta, gamma)
for i in range(self.NUM_CARDS):
nodeUtil[i, :] += util[i, :, a] * strategy[i, a]
else:
util[:, :, a] = self.m_dcfr(iter_n, nextHistory, p0, p1 * strategy[:, a], curr_player_n,
alpha, beta, gamma)
for j in range(self.NUM_CARDS):
nodeUtil[:, j] += util[:, j, a] * strategy[j, a]
# For each action, compute and accumulate counterfactual regret
# refresh only current player regret if it's their move
if curr_player_n == player:
_alpha = self.count_param(alpha, iter_n)
_beta = self.count_param(beta, iter_n)
# print(_alpha, _beta)
# if math.isinf(alpha) and alpha > 0:
# _alpha = 1
# else:
# _alpha = iter_n ** alpha / (iter_n ** alpha + 1)
# if math.isinf(beta) and beta > 0:
# _beta = 1
# else:
# _beta = iter_n ** beta / (iter_n ** beta + 1)
for a in range(self.NUM_ACTIONS):
node.positiveRegretSum *= _alpha
node.negativeRegretSum *= _beta
regret = util[:, :, a] - nodeUtil
if player == 0:
r_new = np.dot(regret, p1)
else:
r_new = np.dot(p0, -regret)
r_new_sign = r_new >= 0
for i in range(node.NUM_CARDS):
if r_new_sign[i]:
node.positiveRegretSum[i, a] += r_new[i]
else:
node.negativeRegretSum[i, a] += r_new[i]
return nodeUtil
# train Kuhn poker
def train(self,
iterations: int):
util = np.zeros((3, 3))
for i in range(1, iterations + 1):
for player_n in range(2):
util += self.m_dcfr(i, "", np.array([1] * 3), np.array([1] * 3), player_n,
math.inf, -math.inf, 2)
# CFR+: math.inf, -math.inf, 2: 1500 // -0.055552097912113935
# 1.5, 0, 2: 1500 //-0.055527798514633075
# 1, 1, 1
agv = util / 2 / iterations / 6 # average game value
print(np.sum(agv))
print("Average game value: ", agv)
for n in self.nodeMap.values():
print(n.toString_m())
return self
if __name__ == '__main__':
trainer = MKuhnTrainer().train(5000)
| 40.004545 | 111 | 0.508465 |
ace4e9ce2c71c5bf78eb2ef9c472717f482ca314 | 6,510 | py | Python | test/utility.py | usc-isi-i2/datamart-api | da0792786fc342fc3cd6fbfcdeacae0eac6bff0d | [
"MIT"
] | null | null | null | test/utility.py | usc-isi-i2/datamart-api | da0792786fc342fc3cd6fbfcdeacae0eac6bff0d | [
"MIT"
] | 113 | 2020-06-22T18:40:53.000Z | 2021-07-06T04:18:40.000Z | test/utility.py | usc-isi-i2/datamart-api | da0792786fc342fc3cd6fbfcdeacae0eac6bff0d | [
"MIT"
] | 1 | 2020-10-23T07:37:51.000Z | 2020-10-23T07:37:51.000Z | import io
import csv
import os
import re
import typing
import pandas as pd
from requests import put
from requests import post, delete, get
_remove_pattern = re.compile(r'''["' ]''')
def edge_id(node1, label, node2):
return _remove_pattern.sub('', f'{node1}-{label}-{node2}')
def upload_data_put(file_path, url):
file_name = os.path.basename(file_path)
with open(file_path, mode='rb') as fd:
files = {
'file': (file_name, fd, 'application/octet-stream')
}
result = put(url, files=files)
return result
def create_dataset(p_url, return_edges=False, name='Unit Test Dataset', dataset_id='unittestdataset',
description='will be deleted in this unit test', url='http://unittest101.org'):
metadata = {
'name': name,
'dataset_id': dataset_id,
'description': description,
'url': url
}
if return_edges:
post_url = f'{p_url}/metadata/datasets?tsv=true'
else:
post_url = f'{p_url}/metadata/datasets'
return post(post_url, json=metadata)
def create_dataset_with_edges(
p_url, name='Unit Test Dataset', dataset_id='unittestdataset',
description='will be deleted in this unit test', url='http://unittest101.org',
extra_edges=[], delete_labels=[]):
qnode = 'Q' + dataset_id
edge_list = []
for label, node2 in [('P31', 'Q1172284'),
('label', f'"{name}"'),
('P1476', f'"{name}"'),
('description', f'"{description}"'),
('P2699', f'"{url}"'),
('P1813', f'"{dataset_id}"'),
('P5017', '^2021-03-05T10:14:11/14')] + extra_edges:
if label not in delete_labels:
edge_list.append([qnode, label, node2, f'{qnode}-{label}'])
edges = pd.DataFrame(edge_list, columns=['node1', 'label', 'node2', 'id'])
post_url = f'{p_url}/metadata/datasets'
return post(post_url, files={'file': io.StringIO(edges.to_csv(sep='\t', quoting=csv.QUOTE_NONE, index=False))})
def delete_dataset(url, dataset_id='unittestdataset'):
return delete(f'{url}/metadata/datasets/{dataset_id}')
def get_dataset(url, dataset_id='unittestdataset'):
return get(f'{url}/metadata/datasets/{dataset_id}')
def create_variable(p_url, dataset_id, variable_id='unittestvariable', name='unit test variable',
description: str = '', tag: typing.List[str] = [], return_edges=False):
metadata = {
'name': name,
'variable_id': variable_id
}
if description:
metadata['description'] = description
if tag:
metadata['tag'] = tag
if return_edges:
post_url = f'{p_url}/metadata/datasets/{dataset_id}/variables?tsv=true'
else:
post_url = f'{p_url}/metadata/datasets/{dataset_id}/variables'
return post(post_url, json=metadata)
def variable_edges(
variable_id: str, dataset_qnode: str = 'Qunittestdataset', property_pnode: str = "P1687",
label: str = "", description: str = "", qualifier_properties: typing.List[str] = [],
delete_labels = []
):
var_qnode = f'Q{variable_id}'
if not label:
label = f'Variable-{var_qnode}'
if not description:
description = f'Variable {var_qnode} for dataset {dataset_qnode}'
if 'P585' not in qualifier_properties:
qualifier_properties += ['P585']
if 'P248' not in qualifier_properties:
qualifier_properties += ['P248']
triples = [(var_qnode, 'P31', 'Q50701'),
(var_qnode, 'label', f'"{label}"'),
(var_qnode, 'P1476', f'"{label}"'), # Full name
(var_qnode, 'P1813', f'"{variable_id}"'), # Short name, i.e. variable identifier
(var_qnode, 'description', f'"{description}"'),
(var_qnode, 'P1687', property_pnode),
(var_qnode, 'P2006020004', dataset_qnode),
(dataset_qnode, 'P2006020003', var_qnode)]
triples += [(var_qnode, 'P2006020002', q) for q in qualifier_properties]
edge_list = []
for node1, label, node2 in triples:
if label not in delete_labels:
edge_list.append([node1, label, node2, edge_id(node1, label, node2)])
edges = pd.DataFrame(edge_list, columns=['node1', 'label', 'node2', 'id'])
return edges
def create_variables_with_edges(
p_url, dataset_id, tag: typing.List[str] = [], return_edges=False):
dataset_qnode = 'Q' + dataset_id
edges = pd.DataFrame.append(
variable_edges(f'variable-{dataset_id}-001', dataset_qnode),
variable_edges(f'variable-{dataset_id}-002', dataset_qnode)).reset_index(drop=True)
if return_edges:
return edges
else:
post_url = f'{p_url}/metadata/datasets/{dataset_id}/variables'
return post(post_url, files={'file': io.StringIO(edges.to_csv(sep='\t', quoting=csv.QUOTE_NONE, index=False))})
def get_variable(p_url, dataset_id='unittestdataset', variable_id='unittestvariable'):
if variable_id is None:
return get(f'{p_url}/metadata/datasets/{dataset_id}/variables')
else:
return get(f'{p_url}/metadata/datasets/{dataset_id}/variables/{variable_id}')
def delete_variable(url, dataset_id='unittestdataset', variable_id='unittestvariable'):
return delete(f'{url}/metadata/datasets/{dataset_id}/variables/{variable_id}')
def delete_variable_data(url, dataset_id='unittestdataset', variable_id='unittestvariable'):
return delete(f'{url}/datasets/{dataset_id}/variables/{variable_id}')
def update_variable_metadata(url, dataset_id='unittestdataset', variable_id='unittestvariable', name=None, description=None, tag=[]):
update = {}
if name:
update['name'] = name
if description:
update['description'] = description
if tag:
update['tag'] = tag
return put(f'{url}/metadata/datasets/{dataset_id}/variables/{variable_id}', json=update)
def update_dataset_metadata(datamart_url, dataset_id='unittestdataset', name=None, description=None, url=None):
update = {}
if name:
update['name'] = name
if description:
update['description'] = description
if url:
update['url'] = url
return put(f'{datamart_url}/metadata/datasets/{dataset_id}', json=update)
def get_data(datamart_url, dataset_id='unittestdataset', variable_id='unittestvariable'):
url = f'{datamart_url}/datasets/{dataset_id}/variables/{variable_id}'
return get(url)
| 37.413793 | 133 | 0.638095 |
ace4e9d3b8b18aace52631fdd5824bfd21c2f686 | 20,464 | py | Python | PythonAPI/my_workplace/repository/procgen.py | joao-paulo-alves/carla_vts | 92542ad69a9119734af7b97cf0dc4d887ecf354b | [
"MIT"
] | null | null | null | PythonAPI/my_workplace/repository/procgen.py | joao-paulo-alves/carla_vts | 92542ad69a9119734af7b97cf0dc4d887ecf354b | [
"MIT"
] | null | null | null | PythonAPI/my_workplace/repository/procgen.py | joao-paulo-alves/carla_vts | 92542ad69a9119734af7b97cf0dc4d887ecf354b | [
"MIT"
] | null | null | null | import glob
import os
import sys
import time
import carla
from carla import VehicleLightState as vls
import logging
import math
from numpy import random
##import imageio
from queue import Queue
from queue import Empty
from configparser import ConfigParser
import threading
from concurrent.futures import ThreadPoolExecutor
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
def saving(s):
s[0].save_to_disk('output/%06d.png' % s[1])
def Weather(world, parser):
day_choice = parser.getint('worldsettings', 'timeofday')
if day_choice == 1:
choice = random.randint(1, 28)
while 19 <= choice <= 25:
choice = random.randint(1, 28)
elif day_choice == 2:
choice = random.randint(1, 28)
while 1 <= choice <= 18 or 26 <= choice <= 28:
choice = random.randint(1, 28)
# ------------------------Cloudy Day---------------------------------
if choice == 1:
world.set_weather(carla.WeatherParameters.CloudyNoon)
elif choice == 2:
world.set_weather(carla.WeatherParameters.WetNoon)
elif choice == 3:
world.set_weather(carla.WeatherParameters.WetCloudyNoon)
elif choice == 4:
world.set_weather(carla.WeatherParameters.SoftRainNoon)
elif choice == 5:
world.set_weather(carla.WeatherParameters.MidRainyNoon)
elif choice == 6:
world.set_weather(carla.WeatherParameters.HardRainNoon)
elif choice == 7:
world.set_weather(carla.WeatherParameters.CloudySunset)
elif choice == 8:
world.set_weather(carla.WeatherParameters.WetSunset)
elif choice == 9:
world.set_weather(carla.WeatherParameters.WetCloudySunset)
elif choice == 10:
world.set_weather(carla.WeatherParameters.SoftRainSunset)
elif choice == 11:
world.set_weather(carla.WeatherParameters.MidRainSunset)
elif choice == 12:
world.set_weather(carla.WeatherParameters.HardRainSunset)
elif choice == 13:
weather_conversion = carla.WeatherParameters.CloudySunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 14:
weather_conversion = carla.WeatherParameters.WetSunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 15:
weather_conversion = carla.WeatherParameters.WetCloudySunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 16:
weather_conversion = carla.WeatherParameters.SoftRainSunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 17:
weather_conversion = carla.WeatherParameters.MidRainSunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 18:
weather_conversion = carla.WeatherParameters.HardRainSunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 19:
weather_conversion = carla.WeatherParameters.CloudyNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 20:
weather_conversion = carla.WeatherParameters.WetNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 21:
weather_conversion = carla.WeatherParameters.WetCloudyNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 22:
weather_conversion = carla.WeatherParameters.SoftRainNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 23:
weather_conversion = carla.WeatherParameters.MidRainyNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 24:
weather_conversion = carla.WeatherParameters.HardRainNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
# ------------------------------Clear Day----------------------------------------
elif choice == 25:
weather_conversion = carla.WeatherParameters.ClearNoon
weather_conversion.sun_altitude_angle = weather_conversion.sun_altitude_angle - 180 # Night
world.set_weather(weather_conversion)
elif choice == 26:
world.set_weather(carla.WeatherParameters.ClearSunset)
elif choice == 27:
weather_conversion = carla.WeatherParameters.ClearSunset
weather_conversion.sun_azimuth_angle = weather_conversion.sun_azimuth_angle + 180 # Dawn
world.set_weather(weather_conversion)
elif choice == 28:
world.set_weather(carla.WeatherParameters.ClearNoon)
return choice
def get_actor_blueprints(world, filter, generation):
bps = world.get_blueprint_library().filter(filter)
if generation.lower() == "all":
return bps
# If the filter returns only one bp, we assume that this one needed
# and therefore, we ignore the generation
if len(bps) == 1:
return bps
try:
int_generation = int(generation)
# Check if generation is in available generations
if int_generation in [1, 2]:
bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation]
return bps
else:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
except:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
def sensor_callback(sensor_data, sensor_queue, sensor_name):
# Do stuff with the sensor_data data like save it to disk
# Then you just need to add to the queue
# sensor_data.save_to_disk('output/%06d.png' % sensor_data.frame)
sensor_queue.put((sensor_data, sensor_data.frame))
def main():
parser = ConfigParser()
parser.read('config.ini')
number_of_vehicles = parser.getint('vehiclesettings', 'number_of_vehicles')
number_of_walkers = parser.getint('walkersettings', 'number_of_walkers')
seed = None
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
vehicles_list = []
walkers_list = []
all_id = []
client = carla.Client(parser.get('worldsettings', 'host'), parser.getint('worldsettings', 'port'))
client.set_timeout(40.0)
synchronous_master = False
random.seed(seed if seed is not None else int(time.time()))
try:
map_choice = random.randint(1, 6)
if map_choice == 6:
world = client.load_world('Town10HD')
else:
world = client.load_world('Town0%d' % map_choice)
# world = client.get_world()
traffic_manager = client.get_trafficmanager(parser.getint('worldsettings', 'tm_port'))
traffic_manager.set_global_distance_to_leading_vehicle(2.5)
if parser.getboolean('worldsettings', 'respawn'):
traffic_manager.set_respawn_dormant_vehicles(True)
if parser.getboolean('worldsettings', 'hybrid'):
traffic_manager.set_hybrid_physics_mode(True)
traffic_manager.set_hybrid_physics_radius(70.0)
if seed is not None:
traffic_manager.set_random_device_seed(seed)
settings = world.get_settings()
if not parser.getboolean('worldsettings', 'asynch'):
traffic_manager.set_synchronous_mode(True)
if not settings.synchronous_mode:
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = 1 / 60
settings.max_substep_delta_time = 1 / 60
else:
synchronous_master = False
else:
print("You are currently in asynchronous mode. If this is a traffic simulation, \
you could experience some issues. If it's not working correctly, switch to synchronous \
mode by using traffic_manager.set_synchronous_mode(True)")
if parser.getboolean('worldsettings', 'no_rendering'):
settings.no_rendering_mode = True
world.apply_settings(settings)
choice = Weather(world, parser)
blueprints = get_actor_blueprints(world, parser.get('worldsettings', 'filterv'),
parser.get('worldsettings', 'generationv'))
blueprintsWalkers = get_actor_blueprints(world, parser.get('worldsettings', 'filterw'),
parser.get('worldsettings', 'generationw'))
if parser.getboolean('worldsettings', 'safe'):
blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]
blueprints = [x for x in blueprints if not x.id.endswith('microlino')]
blueprints = [x for x in blueprints if not x.id.endswith('carlacola')]
blueprints = [x for x in blueprints if not x.id.endswith('cybertruck')]
blueprints = [x for x in blueprints if not x.id.endswith('t2')]
blueprints = [x for x in blueprints if not x.id.endswith('sprinter')]
blueprints = [x for x in blueprints if not x.id.endswith('firetruck')]
blueprints = [x for x in blueprints if not x.id.endswith('ambulance')]
blueprints = sorted(blueprints, key=lambda bp: bp.id)
spawn_points = world.get_map().get_spawn_points()
number_of_spawn_points = len(spawn_points)
if number_of_vehicles < number_of_spawn_points:
random.shuffle(spawn_points)
elif number_of_vehicles > number_of_spawn_points:
msg = 'requested %d vehicles, but could only find %d spawn points'
logging.warning(msg, number_of_vehicles, number_of_spawn_points)
number_of_vehicles = number_of_spawn_points
# @todo cannot import these directly.
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
blueprint_library = world.get_blueprint_library()
# --------------
# Spawn vehicles
# --------------
batch = []
hero = parser.getboolean('worldsettings', 'hero')
for n, transform in enumerate(spawn_points):
if n >= number_of_vehicles:
break
blueprint = random.choice(blueprints)
if hero:
blueprint = blueprint_library.find('vehicle.tesla.model3')
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if hero:
blueprint.set_attribute('role_name', 'hero')
hero = False
else:
blueprint.set_attribute('role_name', 'autopilot')
# spawn the cars and set their autopilot and light state all together
x = SpawnActor(blueprint, transform)
z = SetAutopilot(FutureActor, True, traffic_manager.get_port())
batch.append(x.then(z))
for response in client.apply_batch_sync(batch, synchronous_master):
if response.error:
logging.error(response.error)
else:
vehicles_list.append(response.actor_id)
# Set automatic vehicle lights update if specified
all_vehicle_actors = world.get_actors(vehicles_list)
attr = blueprint_library.find(parser.get('sensorsettings', 'bp'))
attr.set_attribute('image_size_x', parser.get('sensorsettings', 'x'))
attr.set_attribute('image_size_y', parser.get('sensorsettings', 'y'))
attr.set_attribute('fov', parser.get('sensorsettings', 'fov'))
attr.set_attribute('fstop', parser.get('sensorsettings', 'fstop'))
attr.set_attribute('sensor_tick', parser.get('sensorsettings', 'tick'))
sensor_location = carla.Location(1, 0, 1.2)
sensor_rotation = carla.Rotation(8.75, 0, 0)
sensor_transform = carla.Transform(sensor_location, sensor_rotation)
sensor = world.spawn_actor(attr, sensor_transform, attach_to=all_vehicle_actors[0],
attachment_type=carla.AttachmentType.Rigid)
if parser.getboolean('worldsettings', 'car_lights_on'):
if 1 <= choice <= 25:
all_vehicle_actors = world.get_actors(vehicles_list)
for actor in all_vehicle_actors:
traffic_manager.update_vehicle_lights(actor, True)
if 7 <= choice <= 27:
lights = world.get_lightmanager()
street = lights.get_all_lights(carla.LightGroup.Street)
lights.turn_on(street)
if 19 <= choice <= 25:
lights = world.get_lightmanager()
building = lights.get_all_lights(carla.LightGroup.Building)
lights.turn_on(building)
# -------------
# Spawn Walkers
# -------------
# some settings
percentagePedestriansRunning = parser.getfloat('walkersettings', 'perc_run') # how many pedestrians will run
percentagePedestriansCrossing = parser.getfloat('walkersettings',
'perc_cross') # how many pedestrians will walk through the road
if parser.getint('worldsettings', 'seedw'):
world.set_pedestrians_seed(parser.getint('worldsettings', 'seedw'))
random.seed(parser.getint('worldsettings', 'seedw'))
# 1. take all the random locations to spawn
spawn_points = []
for i in range(number_of_walkers):
spawn_point = carla.Transform()
loc = world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = random.choice(blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
logging.error(results[i].error)
else:
walkers_list.append({"id": results[i].actor_id})
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(walkers_list)):
batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"]))
results = client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
logging.error(results[i].error)
else:
walkers_list[i]["con"] = results[i].actor_id
# 4. we put together the walkers and controllers id to get the objects from their id
for i in range(len(walkers_list)):
all_id.append(walkers_list[i]["con"])
all_id.append(walkers_list[i]["id"])
all_actors = world.get_actors(all_id)
# wait for a tick to ensure client receives the last transform of the walkers we have just created
if parser.getboolean('worldsettings', 'asynch') or not synchronous_master:
world.wait_for_tick()
else:
world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(0, len(all_id), 2):
# start walker
all_actors[i].start()
# set walk to random point
all_actors[i].go_to_location(world.get_random_location_from_navigation())
# max speed
all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))
print('spawned %d vehicles and %d walkers, press Ctrl+C to exit.' % (len(vehicles_list), len(walkers_list)))
# Example of how to use Traffic Manager parameters
traffic_manager.global_percentage_speed_difference(30.0)
spectator = world.get_spectator()
sensor_queue = Queue()
timer = 0
sensor.listen(lambda data: sensor_callback(data, sensor_queue, "camera01"))
executor = ThreadPoolExecutor(16)
t0 = time.time()
while True:
if not parser.getboolean('worldsettings', 'asynch') and synchronous_master:
world.tick()
transform = all_vehicle_actors[0].get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=25),
carla.Rotation(pitch=-90)))
else:
world.wait_for_tick()
if timer > 10:
break
if sensor_queue.qsize() > 0:
s = sensor_queue.get(True, 0.01)
# t = threading.Thread(target=saving, args=(s,))
# t.start()
f = executor.submit(saving, s)
timer += 1 / 60
print(timer)
t1 = time.time()
print(f'Done in {t1 - t0} seconds.')
finally:
world.tick()
sensor.destroy()
if not parser.getboolean('worldsettings', 'asynch') and synchronous_master:
settings = world.get_settings()
settings.synchronous_mode = False
settings.no_rendering_mode = False
settings.fixed_delta_seconds = None
settings.max_substep_delta_time = 0.1
world.apply_settings(settings)
print('\ndestroying %d vehicles' % len(vehicles_list))
client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_list])
# stop walker controllers (list is [controller, actor, controller, actor ...])
for i in range(0, len(all_id), 2):
all_actors[i].stop()
print('\ndestroying %d walkers' % len(walkers_list))
client.apply_batch([carla.command.DestroyActor(x) for x in all_id])
time.sleep(0.5)
if __name__ == '__main__':
try:
#while True:
main()
except KeyboardInterrupt:
pass
finally:
print('\ndone.') | 42.901468 | 120 | 0.637021 |
ace4ea62b6b73bb9ee48ed4679ef4a3d489549be | 515 | py | Python | example.py | dwind/TextToCoNLL | cd712cd380604ca78cc65c8653287a26eb66c711 | [
"Apache-2.0"
] | 3 | 2020-06-09T18:16:31.000Z | 2021-02-18T10:14:29.000Z | example.py | dwind/TextToCoNLL | cd712cd380604ca78cc65c8653287a26eb66c711 | [
"Apache-2.0"
] | null | null | null | example.py | dwind/TextToCoNLL | cd712cd380604ca78cc65c8653287a26eb66c711 | [
"Apache-2.0"
] | 1 | 2020-10-09T12:24:31.000Z | 2020-10-09T12:24:31.000Z | import spacy
nlp = spacy.load('en_core_web_sm')
from TextToCoNLL import text_to_conll
text_to_conll(text='He is \n\nwalking \t\t on air. He is Michael Jordan.',
nlp=nlp,
delimiter=" ",
output_dir='TextToConll/output',
basename='example.conll',
spacy_attrs=['text', 'lemma_', 'ent_type_'],
default_values={'ent_type_': "O"},
exclude=['is_space'],
start_with_index=True,
verbose=1) | 30.294118 | 79 | 0.551456 |
ace4eb3a5f6836b98421c0f9c0262bb2e44cab2b | 68,586 | py | Python | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/ratio_based_results/cmp_namd/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/ratio_based_results/cmp_namd/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/ratio_based_results/cmp_namd/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.41227,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.526504,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.27169,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.840619,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.45565,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.834855,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 3.13112,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.482634,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 9.89611,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.42917,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0304731,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.372864,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.225367,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.802034,
'Execution Unit/Register Files/Runtime Dynamic': 0.25584,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 1.01394,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 2.17528,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 6.49413,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00155863,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00155863,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00136369,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000531254,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00323742,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00771837,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0147253,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.216651,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.45072,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.735845,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.42566,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.101684,
'L2/Runtime Dynamic': 0.0216703,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.52583,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.54786,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.171102,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.171102,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.3371,
'Load Store Unit/Runtime Dynamic': 3.56278,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.421909,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.843818,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.149737,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.151264,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0738898,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.81736,
'Memory Management Unit/Runtime Dynamic': 0.225154,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 31.6827,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.49728,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0610018,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.406378,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.96466,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 13.694,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.192857,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.354167,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.06275,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.339999,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.548406,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.276817,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.16522,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.225925,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.12548,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.200777,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0142611,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.174463,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.105469,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.37524,
'Execution Unit/Register Files/Runtime Dynamic': 0.11973,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.41577,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.898094,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.94259,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000723483,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000723483,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000631169,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000244892,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00151508,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00359321,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00690037,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.10139,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.210923,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.344368,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96396,
'Instruction Fetch Unit/Runtime Dynamic': 0.667175,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0475544,
'L2/Runtime Dynamic': 0.0100511,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.7124,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.19258,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0800813,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0800814,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.09056,
'Load Store Unit/Runtime Dynamic': 1.66759,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.197467,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.394934,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0700816,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0707948,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.034581,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.676491,
'Memory Management Unit/Runtime Dynamic': 0.105376,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 23.4935,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.528152,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0217673,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.162107,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.712026,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 6.10481,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.163763,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.331315,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.902532,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.288829,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.46587,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.235156,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.989855,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.191967,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.77844,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.170508,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0121148,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.148177,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0895962,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.318685,
'Execution Unit/Register Files/Runtime Dynamic': 0.101711,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.353117,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.762809,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.59107,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000615185,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000615185,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000536654,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0002082,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00128706,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00305408,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00586875,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0861311,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.47868,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.179212,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.29254,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.96309,
'Instruction Fetch Unit/Runtime Dynamic': 0.566807,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0402564,
'L2/Runtime Dynamic': 0.00852363,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.33947,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.01292,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0680161,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0680161,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.66066,
'Load Store Unit/Runtime Dynamic': 1.41637,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.167716,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.335432,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.059523,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0601267,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.340644,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0293817,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.599003,
'Memory Management Unit/Runtime Dynamic': 0.0895085,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 21.6309,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.448527,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0184896,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.137713,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.60473,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.277,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.107496,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.287121,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.592583,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.189896,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.306296,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.154608,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.6508,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.126335,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.10726,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.111952,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00796511,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0973519,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0589069,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.209303,
'Execution Unit/Register Files/Runtime Dynamic': 0.066872,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.231973,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.501255,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.91143,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000405582,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000405582,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000353742,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000137202,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000846202,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00201111,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00387152,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0566287,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.60207,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.117909,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.192336,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.9954,
'Instruction Fetch Unit/Runtime Dynamic': 0.372757,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0262862,
'L2/Runtime Dynamic': 0.00557896,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.61908,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.665934,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0447098,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0447098,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.83021,
'Load Store Unit/Runtime Dynamic': 0.931138,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.110247,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.220494,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.039127,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0395211,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.223963,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0193313,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.447285,
'Memory Management Unit/Runtime Dynamic': 0.0588524,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.9959,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.294494,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0121515,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0905535,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.397199,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.67695,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.05240383346806172,
'Runtime Dynamic': 0.05240383346806172,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.00813562,
'Runtime Dynamic': 0.00383982,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 94.8112,
'Peak Power': 127.923,
'Runtime Dynamic': 28.7567,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 94.803,
'Total Cores/Runtime Dynamic': 28.7528,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.00813562,
'Total L3s/Runtime Dynamic': 0.00383982,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.039387 | 124 | 0.681976 |
ace4ebe63bddfcf332645462e7373703242e4abe | 1,369 | py | Python | prediction.py | arturhgca/py-mpc | d62c26508bcdedaec31257985c59823acc62a1ac | [
"Apache-2.0"
] | 1 | 2019-07-20T13:46:13.000Z | 2019-07-20T13:46:13.000Z | prediction.py | arturhgca/py-mpc | d62c26508bcdedaec31257985c59823acc62a1ac | [
"Apache-2.0"
] | null | null | null | prediction.py | arturhgca/py-mpc | d62c26508bcdedaec31257985c59823acc62a1ac | [
"Apache-2.0"
] | null | null | null | # python libs
import numpy as np
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()})
global cymodels
def initialize_models():
global cymodels
# import cymodels # to be used with compiled models for improved performance
def prediction(models, controls, history_array, prediction_horizon, history_length, initial_tick, number_of_controls, number_of_variables):
predictions = [list() for i in range(number_of_controls)]
for t in range(prediction_horizon):
new_tick = history_array[-number_of_variables:].copy()
history_array = np.concatenate([[0], history_array])
for i in range(number_of_controls):
new_tick[i] = cymodels.eval(i, history_array)
predictions[i].append(new_tick[i])
# replacing controls:
# new_tick[desired_indexes] = controls[((number_of_controls - 1) * t):((number_of_controls - 1) * (t + 1))]
# update history with current tick and remove oldest tick to enforce history length
history_array = np.concatenate([history_array[number_of_variables:], new_tick])
return predictions
def prepare_tick_history(tick_history):
new_tick_history = tick_history[0].get_numpy_array()
for tick in tick_history[1:]:
new_tick_history = np.append(new_tick_history, tick.get_numpy_array())
return new_tick_history
| 37 | 139 | 0.725347 |
ace4ec2965210647f14be89f3625d0ce366da748 | 9,483 | py | Python | experiments/runs/mnist25-6-6-6-5tanh/run2.py | EI-research-group/deep-ei | c8f6f203f429deca73c08dd0d25aafa93a2ff749 | [
"MIT"
] | 8 | 2020-11-26T01:41:37.000Z | 2022-01-24T13:15:12.000Z | experiments/runs/mnist25-6-6-6-5tanh/run2.py | EI-research-group/deep-ei | c8f6f203f429deca73c08dd0d25aafa93a2ff749 | [
"MIT"
] | null | null | null | experiments/runs/mnist25-6-6-6-5tanh/run2.py | EI-research-group/deep-ei | c8f6f203f429deca73c08dd0d25aafa93a2ff749 | [
"MIT"
] | 2 | 2021-08-25T11:49:06.000Z | 2022-01-09T09:19:50.000Z | import os
from tqdm.auto import tqdm
from pathlib import Path
from random import shuffle
from math import isclose, ceil
from itertools import combinations_with_replacement
import gzip
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import skimage.transform
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import torchvision
import torchvision.transforms as transforms
from deep_ei import topology_of, ei_of_layer, sensitivity_of_layer, vector_ei_of_layer, vector_and_pairwise_ei
########### PARAMS ############
BINS = 8
LAYERS = [(25, 6), (6, 6), (6, 6), (6, 5)]
ACTIVATION = nn.Tanh()
RUNS = 3
FREQUENCY = 20 # epochs per measurement
EPOCHS = 500
BATCH_SIZE = 50
print("Total Measurements: {}".format(EPOCHS / FREQUENCY))
########### Set Device ############
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dtype = torch.float32
torch.set_default_dtype(dtype)
print("Using device: {}".format(device))
dir_path = Path().absolute()
dataset_path = dir_path.parent.parent / "data/mnist.pkl.gz"
if not dataset_path.exists():
print('Downloading dataset with curl ...')
if not dataset_path.parent.exists():
os.mkdir(dataset_path.parent)
url = 'http://ericjmichaud.com/downloads/mnist.pkl.gz'
os.system('curl -L {} -o {}'.format(url, dataset_path))
print('Download failed') if not dataset_path.exists() else print('Dataset acquired')
f = gzip.open(dataset_path, 'rb')
mnist = pickle.load(f)
f.close()
print('Loaded data to variable `mnist`')
mnist0_4 = list(filter(lambda t: t[1].argmax() < 5, mnist))
for i, t in enumerate(mnist0_4):
idx = t[1].argmax()
new_onehot = np.zeros(5,)
new_onehot[idx] = 1.0
mnist0_4[i] = (t[0], new_onehot)
print('Reduced dataset to only 0-4 examples')
# ///////////////////////////////////////////
# DEFINE `Dataset`
# ///////////////////////////////////////////
class MNISTDataset(Dataset):
"""MNIST Digits Dataset."""
def __init__(self, data, width=5, transform=None):
"""We save the dataset images as torch.tensor since saving
the dataset in memory inside a `Dataset` object as a
python list or a numpy array causes a multiprocessiing-related
memory leak."""
self.images, self.labels = zip(*data)
self.images = torch.from_numpy(np.array(self.images)).to(dtype)
self.labels = torch.tensor(self.labels).to(dtype)
self.width = width
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
image, label = self.images[idx], self.labels[idx]
image = skimage.transform.resize(image.reshape((28, 28)), (self.width, self.width))
if self.transform:
image, label = self.transform((image, label))
return image.flatten(), label
training_data = MNISTDataset(mnist0_4[:25000], width=5)
testing_data = MNISTDataset(mnist0_4[25000:], width=5)
training_loader = torch.utils.data.DataLoader(training_data,
batch_size=BATCH_SIZE,
shuffle=True)
training_metrics_loader = torch.utils.data.DataLoader(training_data,
batch_size=BATCH_SIZE,
shuffle=False)
testing_loader = torch.utils.data.DataLoader(testing_data,
batch_size=BATCH_SIZE,
shuffle=False)
testing_metrics_loader = torch.utils.data.DataLoader(testing_data,
batch_size=BATCH_SIZE,
shuffle=False)
batches_per_epoch = ceil(len(training_data) / BATCH_SIZE)
print("Batches / epoch: {}".format(batches_per_epoch))
print("Given frequency: {}".format(FREQUENCY))
print("Adjusted frequency: {}".format(1 / batches_per_epoch * round(FREQUENCY * batches_per_epoch)))
########### Weight Initializer ############
initializers = {
'kaiming': None, # (default)
'xavier_uniform': nn.init.xavier_uniform_,
'xavier_normal': nn.init.xavier_normal_,
'paper': nn.init.uniform_
}
def weight_initializer(name):
def init_weights(m):
if name == 'paper':
if isinstance(m, nn.Linear):
boundary = 1 / np.sqrt(m.in_features)
nn.init.uniform_(m.weight, a=-boundary, b=boundary)
return init_weights
for run in [2]:
######### Set Seeds ##########
torch.set_default_dtype(torch.float32)
np.random.seed(run)
torch.manual_seed(run)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print("STARTING RUN {}".format(run))
output_dir = Path("run{}-frames".format(run))
if not output_dir.exists():
output_dir.mkdir()
ls = []
for (in_n, out_n) in LAYERS:
ls.append(nn.Linear(in_n, out_n, bias=False))
ls.append(ACTIVATION)
network = nn.Sequential(*ls).to(device)
network.apply(weight_initializer('paper'))
print(network)
top = topology_of(network, input=torch.zeros((1, LAYERS[0][0])).to(device))
optimizer = torch.optim.SGD(network.parameters(), lr=1e-2)
loss_fn = torch.nn.MSELoss(reduction="sum")
def compute_metrics():
outof = 0
training_loss = 0
training_accuracy = 0
with torch.no_grad():
for sample, target in training_metrics_loader:
output = network(sample.to(device))
_, pred = torch.max(output, 1)
_, answer = torch.max(target.to(device), 1)
training_accuracy += (pred == answer).sum().item()
training_loss += loss_fn(output, target.to(device))
outof += len(target)
training_loss = float(training_loss / outof)
training_accuracy = training_accuracy / outof
outof = 0
testing_loss = 0
testing_accuracy = 0
with torch.no_grad():
for sample, target in testing_metrics_loader:
output = network(sample.to(device))
_, pred = torch.max(output, 1)
_, answer = torch.max(target.to(device), 1)
testing_accuracy += (pred == answer).sum().item()
testing_loss += loss_fn(output, target.to(device))
outof += len(target)
testing_loss = float(testing_loss / outof)
testing_accuracy = testing_accuracy / outof
metrics = {
'training_loss': training_loss,
'training_accuracy': training_accuracy,
'testing_loss': testing_loss,
'testing_accuracy': testing_accuracy
}
# for (start_i, end_i) in combinations_with_replacement(range(len(LAYERS)), 2):
for i in range(len(LAYERS)):
start_i = end_i = i
start_l = ls[start_i * 2]
end_l = ls[end_i * 2]
if i == 0:
in_r = (0, 1)
else:
in_r = (-1, 1)
vector_ei, pairwise_ei = vector_and_pairwise_ei(end_l, top,
samples=int(1e7),
in_layer=start_l,
in_range=in_r,
in_bins=BINS,
out_range=(-1, 1),
out_bins=BINS,
activation=ACTIVATION,
device=device)
pairwise_sensitivity = sensitivity_of_layer(end_l, top,
samples=5000,
in_layer=start_l,
in_range=in_r,
in_bins=BINS,
out_range=(-1, 1),
out_bins=BINS,
activation=ACTIVATION,
device=device)
metrics[f"pairwise-ei:{start_i}-{end_i}"] = pairwise_ei
metrics[f"pairwise-sensitivity:{start_i}-{end_i}"] = pairwise_sensitivity
metrics[f"vector-ei:{start_i}-{end_i}"] = vector_ei
return metrics
num_batches = 0
for epoch in tqdm(range(EPOCHS)):
#######################################
# Compute Measures #
#######################################
for sample, target in training_loader:
if isclose((num_batches / batches_per_epoch) % FREQUENCY, 0, abs_tol=1e-7):
#######################################
# Compute Measures #
#######################################
metrics = compute_metrics()
metrics['batches'] = num_batches
metrics['epochs'] = num_batches / batches_per_epoch
metrics['model'] = network.state_dict()
metrics['optimizer'] = optimizer.state_dict()
name = output_dir / "batchnum-{}.frame".format(num_batches)
torch.save(metrics, name)
optimizer.zero_grad()
batch_loss = loss_fn(network(sample.to(device)), target.to(device))
batch_loss.backward()
optimizer.step()
num_batches += 1
| 37.932 | 110 | 0.555415 |
ace4ec933176ed6c774fb47dd8b20ae266600745 | 4,818 | py | Python | samples/python/ecalhdf5_rw/ecalhdf5_rw.py | SirArep/ecal | 9860efeb4ce0ef168630136d33947da02ecf0490 | [
"Apache-2.0"
] | 493 | 2019-06-03T13:30:46.000Z | 2022-03-26T16:18:57.000Z | samples/python/ecalhdf5_rw/ecalhdf5_rw.py | SirArep/ecal | 9860efeb4ce0ef168630136d33947da02ecf0490 | [
"Apache-2.0"
] | 249 | 2019-06-04T09:01:24.000Z | 2022-03-31T23:37:39.000Z | samples/python/ecalhdf5_rw/ecalhdf5_rw.py | SirArep/ecal | 9860efeb4ce0ef168630136d33947da02ecf0490 | [
"Apache-2.0"
] | 114 | 2019-06-05T00:04:25.000Z | 2022-03-22T10:22:04.000Z | # ========================= eCAL LICENSE =================================
#
# Copyright (C) 2016 - 2019 Continental Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ========================= eCAL LICENSE =================================
import sys
import random
import ecal.measurement.hdf5 as ecalhdf5
def main():
"""
eCALHDF5 Write & Read Sample
Step 1: create hdf5 file and add some data
Step 2: read data from newly created file
"""
ENTRY_COUNT = 30
MAX_ENTRY_DATA_SIZE = 32767
# Step 1: create hdf5 file and add some data
# File properties
output_dir = "ecalhdf5_rw_meas_folder"
file_name = "measurement"
channels = []
channels.append(ecalhdf5.Channel("Image", b"Image description", "Image type"))
channels.append(ecalhdf5.Channel("Status", b"Status description", "Status type"))
max_size_per_file = 500 # MB
meas = ecalhdf5.Meas(output_dir, 1)
meas.set_file_base_name(file_name)
meas.set_max_size_per_file(max_size_per_file)
for channel in channels:
meas.set_channel_description(channel.name, channel.description)
meas.set_channel_type(channel.name, channel.type)
print("Creating {}/{}.hdf5 \n".format(output_dir, file_name))
# Generate timestamps and random data and write them
timestamp = 0
for entry_no in range(ENTRY_COUNT):
timestamp += 100000 # µs
data_size = random.randint(1, MAX_ENTRY_DATA_SIZE)
data = ""
for i in range(data_size):
data = data + chr(random.randint(0, 255))
# randomly select to which channel to add the generated data
index = random.randint(0, MAX_ENTRY_DATA_SIZE) % len(channels)
channel_to_write_to = channels[index].name
print(" Entry {}\t{}\ttimestamp: {}\tsize[b]: {}".format(entry_no, channel_to_write_to, timestamp, data_size))
# Write entry to file
meas.add_entry_to_file(str.encode(data), timestamp, timestamp, channel_to_write_to)
print("\nTotal entries written: {}\n".format(ENTRY_COUNT))
if meas.is_ok() == False:
print("Write error!")
sys.exit()
meas.close()
# Step 2: read data from newly created file
print("******************************************************************************\n")
meas = ecalhdf5.Meas(output_dir, 0)
# Alternately single file can be used
# meas = ecalhdf5.Meas(output_dir + "/" + file_name + ".hdf5", 0)
# Both file and directory path are supported
if meas.is_ok() == False:
print("Read error!")
sys.exit()
print("Reading {}\n".format(output_dir))
print(" File version: {}".format(meas.get_file_version()))
print(" Channels No: {}\n".format(len(meas.get_channel_names())))
print(" Channels: \n")
channel_names_set = meas.get_channel_names()
for channel_name in channel_names_set:
print(" Name: {}".format(channel_name))
print(" Type: {}".format(meas.get_channel_type(channel_name)))
print(" Description: {}".format(meas.get_channel_description(channel_name)))
print(" Min timestamp: {}".format(meas.get_min_timestamp(channel_name)))
print(" Max timestamp: {}".format(meas.get_max_timestamp(channel_name)))
entries_info_read = meas.get_entries_info(channel_name)
print(" Entries count: {}\n".format(len(entries_info_read)))
"""
# Alternately range entries info can be used, for example get the entries from the first timestamp until "the middle"
min_timestamp = meas.get_min_timestamp(channel_name)
max_timestamp = meas.get_max_timestamp(channel_name)
middle_timestamp = min_timestamp / 2 + max_timestamp / 2
entries_info_read = meas.get_entries_info_range(channel_name, min_timestamp, middle_timestamp)
print(" Entries count in timestamp interval [{}; {}]: {}\n\n".format(min_timestamp, middle_timestamp, len(entries_info_read)))
print(" Reading entries info(timestamp ordered): \n")
"""
for entry_read in entries_info_read:
data_size = meas.get_entry_data_size(entry_read['id'])
print(" Entry {}\tsnd_timestamp: {}\trcv_timestamp: {}\tsize[bytes]: {}".format(entry_read['id'], entry_read['snd_timestamp'], entry_read['rcv_timestamp'], data_size))
entry_data = meas.get_entry_data(entry_read['id'])
print("")
meas.close()
if __name__ == "__main__":
main()
| 35.167883 | 176 | 0.67144 |
ace4ed988c9fd9bdc9adfc6537d42c8ff45d1957 | 2,963 | py | Python | server/weaverbird/backends/sql_translator/steps/rank.py | davinov/weaverbird | 3f907f080729ba70be8872d6c5ed0fdcec9b8a9a | [
"BSD-3-Clause"
] | 54 | 2019-11-20T15:07:39.000Z | 2022-03-24T22:13:51.000Z | server/weaverbird/backends/sql_translator/steps/rank.py | ToucanToco/weaverbird | 7cbd3cc612437a876470cc872efba69526694d62 | [
"BSD-3-Clause"
] | 786 | 2019-10-20T11:48:37.000Z | 2022-03-23T08:58:18.000Z | server/weaverbird/backends/sql_translator/steps/rank.py | davinov/weaverbird | 3f907f080729ba70be8872d6c5ed0fdcec9b8a9a | [
"BSD-3-Clause"
] | 10 | 2019-11-21T10:16:16.000Z | 2022-03-21T10:34:06.000Z | from distutils import log
from weaverbird.backends.sql_translator.steps.utils.query_transformation import (
build_selection_query,
)
from weaverbird.backends.sql_translator.types import (
SQLPipelineTranslator,
SQLQuery,
SQLQueryDescriber,
SQLQueryExecutor,
SQLQueryRetriever,
)
from weaverbird.pipeline.steps import RankStep
def translate_rank(
step: RankStep,
query: SQLQuery,
index: int,
sql_query_retriever: SQLQueryRetriever = None,
sql_query_describer: SQLQueryDescriber = None,
sql_query_executor: SQLQueryExecutor = None,
sql_translate_pipeline: SQLPipelineTranslator = None,
subcall_from_other_pipeline_count: int = None,
) -> SQLQuery:
query_name = f'RANK_STEP_{index}'
log.debug(
'############################################################'
f'query_name: {query_name}\n'
'------------------------------------------------------------'
f'step: {step}\n'
f'query.transformed_query: {query.transformed_query}\n'
f'query.metadata_manager.query_metadata: {query.metadata_manager.retrieve_query_metadata()}\n'
)
rank_mode = "DENSE_RANK()" if step.method == "dense" else "RANK()"
step.new_column_name = (
f"{step.value_col}_RANK" if step.new_column_name is None else step.new_column_name
)
# the rank query
rank_query: str = ""
order_by_query: str = ""
if len(step.groupby) > 0:
rank_query = (
f", ({rank_mode} OVER (PARTITION BY {', '.join(step.groupby)} "
f"ORDER BY {step.value_col} {step.order})) AS {step.new_column_name}"
)
order_on_groupby = ' ASC, '.join(step.groupby)
if len(step.groupby) > 0:
order_on_groupby += ' ASC'
order_by_query = f"ORDER BY {step.new_column_name} ASC, {order_on_groupby}"
else:
rank_query = (
f", ({rank_mode} OVER ("
f"ORDER BY {step.value_col} {step.order})) AS {step.new_column_name}"
)
order_by_query = f"ORDER BY {step.new_column_name} ASC"
final_query = (
f" (SELECT {query.metadata_manager.retrieve_query_metadata_columns_as_str()}"
f"{rank_query}"
f" FROM {query.query_name} {order_by_query})"
)
# we add the column to the metadata
query.metadata_manager.add_query_metadata_column(step.new_column_name, 'int')
new_query = SQLQuery(
query_name=query_name,
transformed_query=f"{query.transformed_query}, {query_name} AS {final_query}",
selection_query=build_selection_query(
query.metadata_manager.retrieve_query_metadata_columns(), query_name
),
metadata_manager=query.metadata_manager,
)
log.debug(
'------------------------------------------------------------'
f'SQLquery: {new_query.transformed_query}'
'############################################################'
)
return new_query
| 33.670455 | 102 | 0.60513 |
ace4eebda0cf4144fcc5b2e9fd5d2eaded7df024 | 245 | py | Python | OrientacaoObjetos/Composicao/Main.py | DjCod3r/PythonScripts | 95e70ebb81d2bc37b0283daff8ee723c5d2a382c | [
"MIT"
] | null | null | null | OrientacaoObjetos/Composicao/Main.py | DjCod3r/PythonScripts | 95e70ebb81d2bc37b0283daff8ee723c5d2a382c | [
"MIT"
] | null | null | null | OrientacaoObjetos/Composicao/Main.py | DjCod3r/PythonScripts | 95e70ebb81d2bc37b0283daff8ee723c5d2a382c | [
"MIT"
] | null | null | null | from classEndereco import Endereco , Cliente
cliente1 = Cliente('Joao', 25)
cliente1.add_endereco('Sao Paulo', 'SP')
cliente2 = Cliente('Maria', 20)
cliente2.add_endereco('Rio de Janeiro', 'RJ')
print(cliente1.nome)
cliente1.mostra_endereco() | 24.5 | 45 | 0.755102 |
ace4ef55257ceca875871356b81e3eac982216fb | 3,470 | py | Python | Virus/settings.py | yyy624297803/Virus_git | 6c125f909131099355f3d40bd2e4cb514ccebcde | [
"Apache-2.0"
] | 1 | 2021-08-30T03:18:57.000Z | 2021-08-30T03:18:57.000Z | Virus/settings.py | yyy624297803/Virus_git | 6c125f909131099355f3d40bd2e4cb514ccebcde | [
"Apache-2.0"
] | null | null | null | Virus/settings.py | yyy624297803/Virus_git | 6c125f909131099355f3d40bd2e4cb514ccebcde | [
"Apache-2.0"
] | null | null | null | """
Django settings for Virus project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import pymysql
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p8xl*fbjb36z-*$bb%5+z_b#w-ji92lz6w&-i$!il=#t*@907%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Virus_s.apps.VirusSConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Virus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Virus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'Virus',
'USER': 'root',
'PASSWORD': '990123',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
] | 25.703704 | 91 | 0.67781 |
ace4ef71b6e7c3ae0a3fc5b6baa8c879709ed1b8 | 290 | py | Python | Intensivo-Python/Cap-8/Exer-Cap-8.10.py | RodrigoTAbreu/Python-3 | 9bf0578c1ed52283c8d8516a9052557bde038947 | [
"MIT"
] | null | null | null | Intensivo-Python/Cap-8/Exer-Cap-8.10.py | RodrigoTAbreu/Python-3 | 9bf0578c1ed52283c8d8516a9052557bde038947 | [
"MIT"
] | null | null | null | Intensivo-Python/Cap-8/Exer-Cap-8.10.py | RodrigoTAbreu/Python-3 | 9bf0578c1ed52283c8d8516a9052557bde038947 | [
"MIT"
] | null | null | null | magicos = ['josé','pedro','guilherme','antonio']
magic = []
def show_magicos(magicos):
for magico in magicos:
print('\t {}'.format(magico))
def make_great(magicos):
for magico in magicos:
magicos.append('O grande '+magico)
make_great(magicos)
show_magicos(magicos) | 24.166667 | 48 | 0.672414 |
ace4ef8ae9ad9bec15278725126484fd176c13cc | 4,525 | py | Python | dev_course/dl2/exp/nb_06.py | nebgor/fastai_docs | 9daa76023b701df07557332ef5e37d12f6e78828 | [
"Apache-2.0"
] | null | null | null | dev_course/dl2/exp/nb_06.py | nebgor/fastai_docs | 9daa76023b701df07557332ef5e37d12f6e78828 | [
"Apache-2.0"
] | null | null | null | dev_course/dl2/exp/nb_06.py | nebgor/fastai_docs | 9daa76023b701df07557332ef5e37d12f6e78828 | [
"Apache-2.0"
] | null | null | null |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/06_cuda_cnn_hooks_init.ipynb
from exp.nb_05b import *
torch.set_num_threads(2)
def normalize_to(train, valid):
m,s = train.mean(),train.std()
return normalize(train, m, s), normalize(valid, m, s)
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x): return self.func(x)
def flatten(x): return x.view(x.shape[0], -1)
class CudaCallback(Callback):
def begin_fit(self): self.model.cuda()
def begin_batch(self): self.run.xb,self.run.yb = self.xb.cuda(),self.yb.cuda()
class BatchTransformXCallback(Callback):
_order=2
def __init__(self, tfm): self.tfm = tfm
def begin_batch(self): self.run.xb = self.tfm(self.xb)
def view_tfm(*size):
def _inner(x): return x.view(*((-1,)+size))
return _inner
def get_runner(model, data, lr=0.6, cbs=None, opt_func=None, loss_func = F.cross_entropy):
if opt_func is None: opt_func = optim.SGD
opt = opt_func(model.parameters(), lr=lr)
learn = Learner(model, opt, loss_func, data)
return learn, Runner(cb_funcs=listify(cbs))
def children(m): return list(m.children())
class Hook():
def __init__(self, m, f): self.hook = m.register_forward_hook(partial(f, self))
def remove(self): self.hook.remove()
def __del__(self): self.remove()
def append_stats(hook, mod, inp, outp):
if not hasattr(hook,'stats'): hook.stats = ([],[])
means,stds = hook.stats
means.append(outp.data.mean())
stds .append(outp.data.std())
class ListContainer():
def __init__(self, items): self.items = listify(items)
def __getitem__(self, idx):
if isinstance(idx, (int,slice)): return self.items[idx]
if isinstance(idx[0],bool):
assert len(idx)==len(self) # bool mask
return [o for m,o in zip(idx,self.items) if m]
return [self.items[i] for i in idx]
def __len__(self): return len(self.items)
def __iter__(self): return iter(self.items)
def __setitem__(self, i, o): self.items[i] = o
def __delitem__(self, i): del(self.items[i])
def __repr__(self):
res = f'{self.__class__.__name__} ({len(self)} items)\n{self.items[:10]}'
if len(self)>10: res = res[:-1]+ '...]'
return res
from torch.nn import init
class Hooks(ListContainer):
def __init__(self, ms, f): super().__init__([Hook(m, f) for m in ms])
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
def __delitem__(self, i):
self[i].remove()
super().__delitem__(i)
def remove(self):
for h in self: h.remove()
def get_cnn_layers(data, nfs, layer, **kwargs):
nfs = [1] + nfs
return [layer(nfs[i], nfs[i+1], 5 if i==0 else 3, **kwargs)
for i in range(len(nfs)-1)] + [
nn.AdaptiveAvgPool2d(1), Lambda(flatten), nn.Linear(nfs[-1], data.c)]
def conv_layer(ni, nf, ks=3, stride=2, **kwargs):
return nn.Sequential(
nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride), GeneralRelu(**kwargs))
class GeneralRelu(nn.Module):
def __init__(self, leak=None, sub=None, maxv=None):
super().__init__()
self.leak,self.sub,self.maxv = leak,sub,maxv
def forward(self, x):
x = F.leaky_relu(x,self.leak) if self.leak is not None else F.relu(x)
if self.sub is not None: x.sub_(self.sub)
if self.maxv is not None: x.clamp_max_(self.maxv)
return x
def init_cnn(m, uniform=False):
f = init.kaiming_uniform_ if uniform else init.kaiming_normal_
for l in m:
if isinstance(l, nn.Sequential):
f(l[0].weight, a=0.1)
l[0].bias.data.zero_()
def get_cnn_model(data, nfs, layer, **kwargs):
return nn.Sequential(*get_cnn_layers(data, nfs, layer, **kwargs))
def get_learn_run(nfs, data, lr, layer, cbs=None, opt_func=None, uniform=False, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model, uniform=uniform)
return get_runner(model, data, lr=lr, cbs=cbs, opt_func=opt_func)
from IPython.display import display, Javascript
def nb_auto_export():
display(Javascript("""{
const ip = IPython.notebook
if (ip) {
ip.save_notebook()
console.log('a')
const s = `!python notebook2script.py ${ip.notebook_name}`
if (ip.kernel) {
console.log(s)
ip.kernel.execute()
}
}
}""")) | 33.518519 | 90 | 0.627845 |
ace4ef9554d534189df964748630834219be102b | 961 | py | Python | lib/hydroqc/winter_credit/timerange.py | KerberosMorphy/webtio-hydroqc-addon | 7db4fbb4e752ba9c340ab4bb96f30660da64b5f7 | [
"MIT"
] | null | null | null | lib/hydroqc/winter_credit/timerange.py | KerberosMorphy/webtio-hydroqc-addon | 7db4fbb4e752ba9c340ab4bb96f30660da64b5f7 | [
"MIT"
] | null | null | null | lib/hydroqc/winter_credit/timerange.py | KerberosMorphy/webtio-hydroqc-addon | 7db4fbb4e752ba9c340ab4bb96f30660da64b5f7 | [
"MIT"
] | null | null | null | """Class describing an interval of time."""
class TimeRange:
"""This class describe an interval of time."""
def __init__(self, start, end, is_critical):
"""Period constructor."""
self._start_date = start
self._end_date = end
self._is_critical = is_critical
@property
def start_date(self):
"""Get start date of the time range."""
return self._start_date
@property
def end_date(self):
"""Get end date of the time range."""
return self._end_date
@property
def is_critical(self):
"""Get critical status of the time range."""
return self._is_critical
def __repr__(self):
"""Make object repr more readable."""
if self.is_critical:
repr_str = f"<{self.__class__.__name__} - {self.start_date} - critical>"
else:
repr_str = f"<{self.__class__.__name__} - {self.start_date}>"
return repr_str
| 27.457143 | 84 | 0.604579 |
ace4efc6beaa750475e58023c0c3bee1613f9752 | 28,459 | py | Python | main/atlasRoutes.py | Splendens/atlas_biodiv_pdl | eff4bcc9193b76462ede0365b9faec3e0706d5d8 | [
"BSD-2-Clause"
] | 3 | 2018-07-31T14:30:18.000Z | 2020-11-21T06:43:18.000Z | main/atlasRoutes.py | Splendens/atlas_biodiv_pdl | eff4bcc9193b76462ede0365b9faec3e0706d5d8 | [
"BSD-2-Clause"
] | null | null | null | main/atlasRoutes.py | Splendens/atlas_biodiv_pdl | eff4bcc9193b76462ede0365b9faec3e0706d5d8 | [
"BSD-2-Clause"
] | 2 | 2018-11-23T10:00:30.000Z | 2018-11-23T22:33:11.000Z |
# -*- coding:utf-8 -*-
from flask import render_template, redirect, abort, url_for
from configuration import config
#from datetime import datetime
#from flask_weasyprint import HTML, render_pdf
from modeles.repositories import (
vmTaxonsRepository, vmObservationsRepository, vmAltitudesRepository,
vmMoisRepository, vmTaxrefRepository, vmStatsOrgaTaxonRepository,
vmCommunesRepository, vmEpciRepository, vmDepartementRepository,
vmObservationsMaillesRepository, vmMedias,
vmStatsOrgaCommRepository, vmStatsGroup2inpnCommRepository, vmStatsTaxonGroup2inpnCommRepository,
vmStatsOrgaEpciRepository, vmStatsGroup2inpnEpciRepository, vmStatsTaxonGroup2inpnEpciRepository,
vmStatsOrgaDptRepository, vmStatsGroup2inpnDptRepository, vmStatsTaxonGroup2inpnDptRepository,
vmStatsStatutTaxonCommRepository, vmStatsStatutTaxonEpciRepository, vmStatsStatutTaxonDptRepository,
vmCorTaxonAttribut, vmTaxonsMostView
)
from . import utils
from flask import Blueprint
main = Blueprint('main', __name__)
base_configuration = {
'STRUCTURE': config.STRUCTURE,
'NOM_APPLICATION': config.NOM_APPLICATION,
'URL_APPLICATION': config.URL_APPLICATION,
'AFFICHAGE_FOOTER': config.AFFICHAGE_FOOTER,
'ID_GOOGLE_ANALYTICS': config.ID_GOOGLE_ANALYTICS,
'STATIC_PAGES': config.STATIC_PAGES,
'TAXHUB_URL': config.TAXHUB_URL if hasattr(config, 'TAXHUB_URL') else None,
'GROS_JEU_DONNEES' : config.GROS_JEU_DONNEES
}
@main.route(
'/espece/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def especeMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/commune/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def communeMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/epci/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def epciMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/departement/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def departementMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/liste/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def listeMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/groupe/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def groupeMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route(
'/'+config.REMOTE_MEDIAS_PATH+'<image>',
methods=['GET', 'POST']
)
def indexMedias(image):
return redirect(config.REMOTE_MEDIAS_URL+config.REMOTE_MEDIAS_PATH+image)
@main.route('/', methods=['GET', 'POST'])
def index():
session = utils.loadSession()
connection = utils.engine.connect()
if config.AFFICHAGE_MAILLE:
observations = vmObservationsMaillesRepository.lastObservationsMailles(
connection, config.NB_DAY_LAST_OBS, config.ATTR_MAIN_PHOTO
)
else:
observations = vmObservationsRepository.lastObservations(
connection, config.NB_DAY_LAST_OBS, config.ATTR_MAIN_PHOTO
)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
mostViewTaxon = vmTaxonsMostView.mostViewTaxon(connection)
stat = vmObservationsRepository.statIndex(connection)
customStat = vmObservationsRepository.genericStat(
connection, config.RANG_STAT
)
customStatMedias = vmObservationsRepository.genericStatMedias(
connection, config.RANG_STAT
)
configuration = base_configuration.copy()
configuration.update({
'HOMEMAP': True,
'TEXT_LAST_OBS': config.TEXT_LAST_OBS,
'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
'AFFICHAGE_DERNIERES_OBS': config.AFFICHAGE_DERNIERES_OBS,
'AFFICHAGE_EN_CE_MOMENT': config.AFFICHAGE_EN_CE_MOMENT,
'AFFICHAGE_STAT_GLOBALES': config.AFFICHAGE_STAT_GLOBALES,
'AFFICHAGE_RANG_STAT': config.AFFICHAGE_RANG_STAT,
'COLONNES_RANG_STAT': config.COLONNES_RANG_STAT,
'RANG_STAT_FR': config.RANG_STAT_FR,
'MAP': config.MAP,
'AFFICHAGE_INTRODUCTION': config.AFFICHAGE_INTRODUCTION,
'AFFICHAGE_LOGOS_ORGAS': config.AFFICHAGE_LOGOS_ORGAS
})
connection.close()
session.close()
return render_template(
'templates/index.html',
observations=observations,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
mostViewTaxon=mostViewTaxon,
stat=stat,
customStat=customStat,
customStatMedias=customStatMedias,
configuration=configuration
)
@main.route('/espece/<int:cd_ref>', methods=['GET', 'POST'])
def ficheEspece(cd_ref):
session = utils.loadSession()
connection = utils.engine.connect()
cd_ref = int(cd_ref)
taxon = vmTaxrefRepository.searchEspece(connection, cd_ref)
statsorgataxon = vmStatsOrgaTaxonRepository.getStatsOrgaTaxonChilds(connection, cd_ref)
months = vmMoisRepository.getMonthlyObservationsChilds(connection, cd_ref)
synonyme = vmTaxrefRepository.getSynonymy(connection, cd_ref)
communes = vmCommunesRepository.getCommunesObservationsChilds(
connection, cd_ref
)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
taxonomyHierarchy = vmTaxrefRepository.getAllTaxonomy(session, cd_ref)
firstPhoto = vmMedias.getFirstPhoto(
connection, cd_ref, config.ATTR_MAIN_PHOTO
)
photoCarousel = vmMedias.getPhotoCarousel(
connection, cd_ref, config.ATTR_OTHER_PHOTO
)
videoAudio = vmMedias.getVideo_and_audio(
connection, cd_ref, config.ATTR_AUDIO, config.ATTR_VIDEO_HEBERGEE,
config.ATTR_YOUTUBE, config.ATTR_DAILYMOTION, config.ATTR_VIMEO
)
articles = vmMedias.getLinks_and_articles(
connection, cd_ref, config.ATTR_LIEN, config.ATTR_PDF
)
taxonDescription = vmCorTaxonAttribut.getAttributesTaxon(
connection, cd_ref, config.ATTR_DESC, config.ATTR_COMMENTAIRE,
config.ATTR_MILIEU, config.ATTR_CHOROLOGIE
)
orgas = vmObservationsRepository.getOrgasObservations(connection, cd_ref)
observers = vmObservationsRepository.getObservers(connection, cd_ref)
observationsMaille = vmObservationsMaillesRepository.getObservationsMaillesChilds(connection, cd_ref)
configuration = base_configuration.copy()
configuration.update({
'LIMIT_FICHE_LISTE_HIERARCHY': config.LIMIT_FICHE_LISTE_HIERARCHY,
'AFFICHAGE_ORGAS_OBS_FICHEESP': config.AFFICHAGE_ORGAS_OBS_FICHEESP,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION,
'GLOSSAIRE': config.GLOSSAIRE,
'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
'AFFICHAGE_SWITCHER': config.AFFICHAGE_SWITCHER,
'AFFICHAGE_ATLAS_MAILLE_COMMUNALE': config.AFFICHAGE_ATLAS_MAILLE_COMMUNALE,
'AFFICHAGE_ATLAS_MAILLE_CARREE': config.AFFICHAGE_ATLAS_MAILLE_CARREE,
'AFFICHAGE_ATLAS_POINT': config.AFFICHAGE_ATLAS_POINT,
'ZOOM_LEVEL_POINT': config.ZOOM_LEVEL_POINT,
'LIMIT_CLUSTER_POINT': config.LIMIT_CLUSTER_POINT,
'FICHE_ESPECE': True,
'MAP': config.MAP
})
connection.close()
session.close()
return render_template(
'templates/ficheEspece.html',
taxon=taxon,
listeTaxonsSearch=[],
observations=[],
cd_ref=cd_ref,
statsorgataxon=statsorgataxon,
months=months,
synonyme=synonyme,
communes=communes,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
observationsMaille=observationsMaille,
taxonomyHierarchy=taxonomyHierarchy,
firstPhoto=firstPhoto,
photoCarousel=photoCarousel,
videoAudio=videoAudio,
articles=articles,
taxonDescription=taxonDescription,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/commune/<insee>', methods=['GET', 'POST'])
def ficheCommune(insee):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getTaxonsCommunes(connection, insee)
listespeces = vmTaxonsRepository.getListeTaxonsCommunes(connection, insee)
taxonProPatri = vmStatsStatutTaxonCommRepository.getNbTaxonsProPatriCommunes(connection, insee)
statsStatutTaxonComm = vmStatsStatutTaxonCommRepository.getStatsStatutsTaxonsCommunes(connection, insee)
infosCommune = vmCommunesRepository.infosCommune(connection, insee)
epciCommune = vmCommunesRepository.epciCommune(connection, insee)
commune = vmCommunesRepository.getCommuneFromInsee(connection, insee)
statsorgataxoncomm = vmStatsOrgaCommRepository.getStatsOrgaTaxonCommChilds(connection, insee)
statsorgadatacomm = vmStatsOrgaCommRepository.getStatsOrgaDataCommChilds(connection, insee)
statsgroup2inpncomm = vmStatsGroup2inpnCommRepository.getStatsGroup2inpnCommChilds(connection, insee)
statstaxongroup2inpncomm = vmStatsTaxonGroup2inpnCommRepository.getStatsTaxonGroup2inpnCommChilds(connection, insee)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
if config.AFFICHAGE_MAILLE:
observations = vmObservationsMaillesRepository.lastObservationsCommuneMaille(
connection, config.NB_LAST_OBS, insee
)
else:
observations = vmObservationsRepository.lastObservationsCommune(
connection, config.NB_LAST_OBS, insee
)
orgas = vmObservationsRepository.getOrgasCommunes(connection, insee)
observers = vmObservationsRepository.getObserversCommunes(connection, insee)
configuration = base_configuration.copy()
configuration.update({
'NB_LAST_OBS': config.NB_LAST_OBS,
'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
'MAP': config.MAP,
'MYTYPE': 0,
'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
return render_template(
'templates/ficheCommune.html',
insee=insee,
listTaxons=listTaxons,
listespeces=listespeces,
taxonProPatri=taxonProPatri,
statsStatutTaxonComm=statsStatutTaxonComm,
infosCommune=infosCommune,
epciCommune=epciCommune,
referenciel=commune,
statsorgataxoncomm=statsorgataxoncomm,
statsorgadatacomm=statsorgadatacomm,
statsgroup2inpncomm=statsgroup2inpncomm,
statstaxongroup2inpncomm=statstaxongroup2inpncomm,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
observations=observations,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/epci/<nom_epci_simple>', methods=['GET', 'POST'])
def ficheEpci(nom_epci_simple):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getTaxonsEpci(connection, nom_epci_simple)
listespeces = vmTaxonsRepository.getListeTaxonsEpci(connection, nom_epci_simple)
taxonProPatri = vmStatsStatutTaxonEpciRepository.getNbTaxonsProPatriEpci(connection, nom_epci_simple)
statsStatutTaxonEpci = vmStatsStatutTaxonEpciRepository.getStatsStatutsTaxonsEpci(connection, nom_epci_simple)
infosEpci = vmEpciRepository.infosEpci(connection, nom_epci_simple)
communesEpci = vmEpciRepository.communesEpciChilds(connection, nom_epci_simple)
epci = vmEpciRepository.getEpciFromNomsimple(connection, nom_epci_simple)
epciDpt = vmEpciRepository.getDptFromEpci(connection, nom_epci_simple)
statsorgataxonepci = vmStatsOrgaEpciRepository.getStatsOrgaTaxonEpciChilds(connection, nom_epci_simple)
statsorgadataepci = vmStatsOrgaEpciRepository.getStatsOrgaDataEpciChilds(connection, nom_epci_simple)
statsgroup2inpnepci = vmStatsGroup2inpnEpciRepository.getStatsGroup2inpnEpciChilds(connection, nom_epci_simple)
statstaxongroup2inpnepci = vmStatsTaxonGroup2inpnEpciRepository.getStatsTaxonGroup2inpnEpciChilds(connection, nom_epci_simple)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
if config.AFFICHAGE_MAILLE:
observations = vmObservationsMaillesRepository.lastObservationsEpciMaille(
connection, config.NB_LAST_OBS, nom_epci_simple
)
else:
observations = vmObservationsRepository.lastObservationsEpci(
connection, config.NB_LAST_OBS, nom_epci_simple
)
orgas = vmObservationsRepository.getOrgasEpci(connection, nom_epci_simple)
observers = vmObservationsRepository.getObserversEpci(connection, nom_epci_simple)
configuration = base_configuration.copy()
configuration.update({
'NB_LAST_OBS': config.NB_LAST_OBS,
'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
'MAP': config.MAP,
'MYTYPE': 0,
'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
return render_template(
'templates/ficheEpci.html',
nom_epci_simple=nom_epci_simple,
listTaxons=listTaxons,
listespeces=listespeces,
taxonProPatri=taxonProPatri,
statsStatutTaxonEpci=statsStatutTaxonEpci,
infosEpci=infosEpci,
communesEpci=communesEpci,
referenciel=epci,
epciDpt=epciDpt,
statsorgataxonepci=statsorgataxonepci,
statsorgadataepci=statsorgadataepci,
statsgroup2inpnepci=statsgroup2inpnepci,
statstaxongroup2inpnepci=statstaxongroup2inpnepci,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
observations=observations,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/departement/<num_dpt>', methods=['GET', 'POST'])
def ficheDepartement(num_dpt):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getTaxonsDpt(connection, num_dpt)
listespeces = vmTaxonsRepository.getListeTaxonsDpt(connection, num_dpt)
taxonProPatri = vmStatsStatutTaxonDptRepository.getNbTaxonsProPatriDpt(connection, num_dpt)
statsStatutTaxonDpt = vmStatsStatutTaxonDptRepository.getStatsStatutsTaxonsDpt(connection, num_dpt)
infosDpt = vmDepartementRepository.infosDpt(connection, num_dpt)
communesDpt = vmDepartementRepository.communesDptChilds(connection, num_dpt)
epciDpt = vmDepartementRepository.epciDptChilds(connection, num_dpt)
dpt = vmDepartementRepository.getDepartementFromNumdpt(connection, num_dpt)
statsorgataxondpt = vmStatsOrgaDptRepository.getStatsOrgaTaxonDptChilds(connection, num_dpt)
statsorgadatadpt = vmStatsOrgaDptRepository.getStatsOrgaDataDptChilds(connection, num_dpt)
statsgroup2inpndpt = vmStatsGroup2inpnDptRepository.getStatsGroup2inpnDptChilds(connection, num_dpt)
statstaxongroup2inpndpt = vmStatsTaxonGroup2inpnDptRepository.getStatsTaxonGroup2inpnDptChilds(connection, num_dpt)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
#if config.AFFICHAGE_MAILLE:
# if (config.TAILLE_MAILLES_DPT=='10'):
# observations = vmObservationsMaillesRepository.lastObservationsDptMaille10(
# connection, config.NB_LAST_OBS, num_dpt
# )
# else:
# observations = vmObservationsMaillesRepository.lastObservationsDptMaille(
# connection, config.NB_LAST_OBS, num_dpt
# )
#else:
# observations = vmObservationsRepository.lastObservationsDpt(
# connection, config.NB_LAST_OBS, num_dpt
# )
orgas = vmObservationsRepository.getOrgasDpt(connection, num_dpt)
observers = vmObservationsRepository.getObserversDpt(connection, num_dpt)
configuration = base_configuration.copy()
configuration.update({
'NB_LAST_OBS': config.NB_LAST_OBS,
'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
'TAILLE_MAILLES_DPT': config.TAILLE_MAILLES_DPT,
'MAP': config.MAP,
'MYTYPE': 0,
'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
return render_template(
'templates/ficheDepartement.html',
num_dpt=num_dpt,
listTaxons=listTaxons,
listespeces=listespeces,
taxonProPatri=taxonProPatri,
statsStatutTaxonDpt=statsStatutTaxonDpt,
infosDpt=infosDpt,
communesDpt=communesDpt,
epciDpt=epciDpt,
referenciel=dpt,
statsorgataxondpt=statsorgataxondpt,
statsorgadatadpt=statsorgadatadpt,
statsgroup2inpndpt=statsgroup2inpndpt,
statstaxongroup2inpndpt=statstaxongroup2inpndpt,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
#observations=observations,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/BiodivPdL_liste_commune_<insee>.pdf')
def listeTaxonCommune_pdf(insee):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getListeTaxonsCommunes(connection, insee)
#infosCommune = vmCommunesRepository.infosCommune(connection, insee)
#epciCommune = vmCommunesRepository.epciCommune(connection, insee)
commune = vmCommunesRepository.getCommuneFromInsee(connection, insee)
#statsorgacomm = vmStatsOrgaCommRepository.getStatsOrgaCommChilds(connection, insee)
#statsgroup2inpncomm = vmStatsGroup2inpnCommRepository.getStatsGroup2inpnCommChilds(connection, insee)
#statstaxongroup2inpncomm = vmStatsTaxonGroup2inpnCommRepository.getStatsTaxonGroup2inpnCommChilds(connection, insee)
#communesSearch = vmCommunesRepository.getAllCommunes(session)
#epciSearch = vmEpciRepository.getAllEpci(session)
#departementSearch = vmDepartementRepository.getAllDepartement(session)
#if config.AFFICHAGE_MAILLE:
# observations = vmObservationsMaillesRepository.lastObservationsCommuneMaille(
# connection, config.NB_LAST_OBS, insee
# )
#else:
# observations = vmObservationsRepository.lastObservationsCommune(
# connection, config.NB_LAST_OBS, insee
# )
#orgas = vmObservationsRepository.getOrgasCommunes(connection, insee)
#observers = vmObservationsRepository.getObserversCommunes(connection, insee)
configuration = base_configuration.copy()
configuration.update({
# 'NB_LAST_OBS': config.NB_LAST_OBS,
# 'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
# 'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
# 'MAP': config.MAP,
# 'MYTYPE': 0,
# 'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
html = render_template(
'static/custom/templates/listeTaxonCommune_pdf.html',
insee=insee,
now = (datetime.now()).strftime("%d-%m-%Y"),
listTaxons=listTaxons,
#infosCommune=infosCommune,
#epciCommune=epciCommune,
referenciel=commune,
#statsorgacomm=statsorgacomm,
#statsgroup2inpncomm=statsgroup2inpncomm,
#statstaxongroup2inpncomm=statstaxongroup2inpncomm,
#communesSearch=communesSearch,
#epciSearch=epciSearch,
#departementSearch=departementSearch,
#observations=observations,
#orgas=orgas,
#observers=observers,
configuration=configuration
) # Make a PDF straight from HTML in a string.
return render_pdf(HTML(string=html))
@main.route('/BiodivPdL_liste_epci_<nom_epci_simple>.pdf')
def listeTaxonEpci_pdf(nom_epci_simple):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getListeTaxonsEpci(connection, nom_epci_simple)
epci = vmEpciRepository.getEpciFromNomsimple(connection, nom_epci_simple)
configuration = base_configuration.copy()
configuration.update({
# 'NB_LAST_OBS': config.NB_LAST_OBS,
# 'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
# 'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
# 'MAP': config.MAP,
# 'MYTYPE': 0,
# 'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
html = render_template(
'static/custom/templates/listeTaxonEpci_pdf.html',
nom_epci_simple=nom_epci_simple,
now = (datetime.now()).strftime("%d-%m-%Y"),
listTaxons=listTaxons,
referenciel=epci,
configuration=configuration
) # Make a PDF straight from HTML in a string.
return render_pdf(HTML(string=html))
@main.route('/BiodivPdL_liste_departement_<num_dpt>.pdf')
def listeTaxonDpt_pdf(num_dpt):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getListeTaxonsDpt(connection, num_dpt)
dpt = vmDepartementRepository.getDepartementFromNumdpt(connection, num_dpt)
configuration = base_configuration.copy()
configuration.update({
# 'NB_LAST_OBS': config.NB_LAST_OBS,
# 'AFFICHAGE_ORGAS_OBS_FICHECOMM': config.AFFICHAGE_ORGAS_OBS_FICHECOMM,
# 'AFFICHAGE_MAILLE': config.AFFICHAGE_MAILLE,
# 'MAP': config.MAP,
# 'MYTYPE': 0,
# 'PRESSION_PROSPECTION': config.PRESSION_PROSPECTION,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
session.close()
connection.close()
html = render_template(
'static/custom/templates/listeTaxonDpt_pdf.html',
num_dpt=num_dpt,
now = (datetime.now()).strftime("%d-%m-%Y"),
listTaxons=listTaxons,
referenciel=dpt,
configuration=configuration
) # Make a PDF straight from HTML in a string.
return render_pdf(HTML(string=html))
@main.route('/liste/<cd_ref>', methods=['GET', 'POST'])
def ficheRangTaxonomie(cd_ref):
session = utils.loadSession()
connection = utils.engine.connect()
listTaxons = vmTaxonsRepository.getTaxonsChildsList(connection, cd_ref)
referenciel = vmTaxrefRepository.getInfoFromCd_ref(session, cd_ref)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
taxonomyHierarchy = vmTaxrefRepository.getAllTaxonomy(session, cd_ref)
orgas = vmObservationsRepository.getOrgasObservations(connection, cd_ref)
observers = vmObservationsRepository.getObservers(connection, cd_ref)
connection.close()
session.close()
configuration = base_configuration.copy()
configuration.update({
'LIMIT_FICHE_LISTE_HIERARCHY': config.LIMIT_FICHE_LISTE_HIERARCHY,
'AFFICHAGE_ORGAS_OBS_FICHETAXO': config.AFFICHAGE_ORGAS_OBS_FICHETAXO,
'MYTYPE': 0,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION,
})
return render_template(
'templates/ficheRangTaxonomique.html',
listTaxons=listTaxons,
referenciel=referenciel,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
taxonomyHierarchy=taxonomyHierarchy,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/groupe/<groupe>', methods=['GET', 'POST'])
def ficheGroupe(groupe):
session = utils.loadSession()
connection = utils.engine.connect()
groups = vmTaxonsRepository.getAllINPNgroup(connection)
listTaxons = vmTaxonsRepository.getTaxonsGroup(connection, groupe)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
orgas = vmObservationsRepository.getGroupeOrgas(connection, groupe)
observers = vmObservationsRepository.getGroupeObservers(connection, groupe)
session.close()
connection.close()
configuration = base_configuration.copy()
configuration.update({
'LIMIT_FICHE_LISTE_HIERARCHY': config.LIMIT_FICHE_LISTE_HIERARCHY,
'AFFICHAGE_ORGAS_OBS_FICHEGROUPE': config.AFFICHAGE_ORGAS_OBS_FICHEGROUPE,
'MYTYPE': 0,
'AFFICHE_PATRIMONIALITE' : config.AFFICHE_PATRIMONIALITE,
'PATRIMONIALITE': config.PATRIMONIALITE,
'PROTECTION': config.PROTECTION
})
return render_template(
'templates/ficheGroupe.html',
listTaxons=listTaxons,
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
referenciel=groupe,
groups=groups,
orgas=orgas,
observers=observers,
configuration=configuration
)
@main.route('/photos', methods=['GET', 'POST'])
def photos():
session = utils.loadSession()
connection = utils.engine.connect()
groups = vmTaxonsRepository.getINPNgroupPhotos(connection)
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
configuration = base_configuration
session.close()
connection.close()
return render_template(
'templates/galeriePhotos.html',
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
groups=groups,
configuration=configuration
)
@main.route('/<page>', methods=['GET', 'POST'])
def get_staticpages(page):
session = utils.loadSession()
if (page not in config.STATIC_PAGES):
abort(404)
static_page = config.STATIC_PAGES[page]
communesSearch = vmCommunesRepository.getAllCommunes(session)
epciSearch = vmEpciRepository.getAllEpci(session)
departementSearch = vmDepartementRepository.getAllDepartement(session)
configuration = base_configuration
session.close()
return render_template(
static_page['template'],
communesSearch=communesSearch,
epciSearch=epciSearch,
departementSearch=departementSearch,
configuration=configuration
)
| 40.597718 | 130 | 0.740469 |
ace4f07fc75fc199b6b9ee5ff6269fd4a15f15ff | 60,763 | py | Python | mermaid/custom_pytorch_extensions.py | HastingsGreer/mermaid | bd13c5fc427eb8cd9054973a8eaaeb302078182d | [
"Apache-2.0"
] | 120 | 2019-10-29T23:53:02.000Z | 2022-03-30T02:59:58.000Z | mermaid/custom_pytorch_extensions.py | AlexanderChristgau/mermaid | ba07883cc3cb5982e4655048a434b4495cb49c6d | [
"Apache-2.0"
] | 10 | 2019-11-05T09:28:35.000Z | 2022-01-09T19:12:51.000Z | mermaid/custom_pytorch_extensions.py | AlexanderChristgau/mermaid | ba07883cc3cb5982e4655048a434b4495cb49c6d | [
"Apache-2.0"
] | 19 | 2019-11-10T13:34:39.000Z | 2022-03-13T20:30:10.000Z | """
This package implements pytorch functions for Fourier-based convolutions.
While this may not be relevant for GPU-implementations, convolutions in the spatial domain are slow on CPUs. Hence, this function should be useful for memory-intensive models that need to be run on the CPU or CPU-based computations involving convolutions in general.
.. todo::
Create a CUDA version of these convolutions functions. There is already a CUDA based FFT implementation available which could be built upon. Alternatively, spatial smoothing may be sufficiently fast on the GPU.
"""
from __future__ import print_function
from __future__ import absolute_import
# TODO
from builtins import range
from builtins import object
import torch
from torch.autograd import Function
import numpy as np
from torch.autograd import gradcheck
from .data_wrapper import USE_CUDA, FFTVal,AdaptVal, MyTensor
# if USE_CUDA:
# import pytorch_fft.fft as fft
from . import utils
def _symmetrize_filter_center_at_zero_1D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2] = filter[sz[0]:sz[0] // 2:-1]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1] = filter[sz[0]:sz[0] // 2:-1]
def _symmetrize_filter_center_at_zero_2D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2,:] = filter[sz[0]:sz[0] // 2:-1,:]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1,:] = filter[sz[0]:sz[0] // 2:-1,:]
if sz[1] % 2 == 0:
# symmetrize if it is even
filter[:,1:sz[1] // 2] = filter[:,sz[1]:sz[1] // 2:-1]
else:
# symmetrize if it is odd
filter[:,1:sz[1] // 2 + 1] = filter[:,sz[1]:sz[1] // 2:-1]
def _symmetrize_filter_center_at_zero_3D(filter):
sz = filter.shape
if sz[0] % 2 == 0:
# symmetrize if it is even
filter[1:sz[0] // 2,:,:] = filter[sz[0]:sz[0] // 2:-1,:,:]
else:
# symmetrize if it is odd
filter[1:sz[0] // 2 + 1,:,:] = filter[sz[0]:sz[0] // 2:-1,:,:]
if sz[1] % 2 == 0:
# symmetrize if it is even
filter[:,1:sz[1] // 2,:] = filter[:,sz[1]:sz[1] // 2:-1,:]
else:
# symmetrize if it is odd
filter[:,1:sz[1] // 2 + 1,:] = filter[:,sz[1]:sz[1] // 2:-1,:]
if sz[2] % 2 == 0:
# symmetrize if it is even
filter[:,:,1:sz[2] // 2] = filter[:,:,sz[2]:sz[2] // 2:-1]
else:
# symmetrize if it is odd
filter[:,:,1:sz[2] // 2 + 1] = filter[:,:,sz[2]:sz[2] // 2:-1]
def symmetrize_filter_center_at_zero(filter,renormalize=False):
"""
Symmetrizes filter. The assumption is that the filter is already in the format for input to an FFT.
I.e., that it has been transformed so that the center of the pixel is at zero.
:param filter: Input filter (in spatial domain). Will be symmetrized (i.e., will change its value)
:param renormalize: (bool) if true will normalize so that the sum is one
:return: n/a (returns via call by reference)
"""
sz = filter.shape
dim = len(sz)
if dim==1:
_symmetrize_filter_center_at_zero_1D(filter)
elif dim==2:
_symmetrize_filter_center_at_zero_2D(filter)
elif dim==3:
_symmetrize_filter_center_at_zero_3D(filter)
else:
raise ValueError('Only implemented for dimensions 1,2, and 3 so far')
if renormalize:
filter = filter / filter.sum()
def are_indices_close(loc):
"""
This function takes a set of indices (as produced by np.where) and determines
if they are roughly closeby. If not it returns *False* otherwise *True*.
:param loc: Index locations as outputted by np.where
:return: Returns if the indices are roughly closeby or not
.. todo::
There should be a better check for closeness of points. The implemented one is very crude.
"""
# TODO: potentially do a better check here, this one is very crude
for cloc in loc:
cMaxDist = (abs(cloc - cloc.max())).max()
if cMaxDist > 2:
return False
return True
def create_complex_fourier_filter(spatial_filter, sz, enforceMaxSymmetry=True, maxIndex=None, renormalize=False):
"""
Creates a filter in the Fourier domain given a spatial array defining the filter
:param spatial_filter: Array defining the filter.
:param sz: Desired size of the filter in the Fourier domain.
:param enforceMaxSymmetry: If set to *True* (default) forces the filter to be real and hence forces the filter
in the spatial domain to be symmetric
:param maxIndex: specifies the index of the maximum which will be used to enforceMaxSymmetry. If it is not
defined, the maximum is simply computed
:param renormalize: (bool) if true, the filter is renormalized to sum to one (useful for Gaussians for example)
:return: Returns the complex coefficients for the filter in the Fourier domain and the maxIndex
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
sz = np.array(sz)
if enforceMaxSymmetry:
if maxIndex is None:
maxIndex = np.unravel_index(np.argmax(spatial_filter), spatial_filter.shape)
maxValue = spatial_filter[maxIndex]
loc = np.where(spatial_filter == maxValue)
nrOfMaxValues = len(loc[0])
if nrOfMaxValues > 1:
# now need to check if they are close to each other
if not are_indices_close(loc):
raise ValueError('Cannot enforce max symmetry as maximum is not unique')
spatial_filter_max_at_zero = np.roll(spatial_filter, -np.array(maxIndex),
list(range(len(spatial_filter.shape))))
symmetrize_filter_center_at_zero(spatial_filter_max_at_zero,renormalize=renormalize)
# we assume this is symmetric and hence take the absolute value
# as the FT of a symmetric kernel has to be real
if USE_CUDA:
f_filter = create_cuda_filter(spatial_filter_max_at_zero, sz)
ret_filter = f_filter[...,0] # only the real part
else:
f_filter = create_numpy_filter(spatial_filter_max_at_zero, sz)
ret_filter = f_filter.real
return ret_filter,maxIndex
else:
if USE_CUDA:
return create_cuda_filter(spatial_filter),maxIndex
else:
return create_numpy_filter(spatial_filter, sz),maxIndex
def create_cuda_filter(spatial_filter, sz):
"""
create cuda version filter, another one dimension is added to the output for computational convenient
besides the output will not be full complex result of shape (∗,2),
where ∗ is the shape of input, but instead the last dimension will be halfed as of size ⌊Nd/2⌋+1.
:param spatial_filter: N1 x...xNd, no batch dimension, no channel dimension
:param sz: [N1,..., Nd]
:return: filter, with size [1,N1,..Nd-1,⌊Nd/2⌋+1,2⌋
"""
fftn = torch.rfft
spatial_filter_th = torch.from_numpy(spatial_filter).float().cuda()
spatial_filter_th = spatial_filter_th[None, ...]
spatial_filter_th_fft = fftn(spatial_filter_th, len(sz))
return spatial_filter_th_fft
def create_numpy_filter(spatial_filter, sz):
return np.fft.fftn(spatial_filter, s=sz)
# todo: maybe check if we can use rfft's here for better performance
def sel_fftn(dim):
"""
sel the gpu and cpu version of the fft
:param dim:
:return: function pointer
"""
if USE_CUDA:
if dim in[1,2,3]:
f= torch.rfft
else:
print('Warning, fft more than 3d is supported but not tested')
return f
else:
if dim == 1:
f = np.fft.fft
elif dim == 2:
f = np.fft.fft2
elif dim == 3:
f = np.fft.fftn
else:
raise ValueError('Only 3D cpu ifft supported')
return f
def sel_ifftn(dim):
"""
select the cpu and gpu version of the ifft
:param dim:
:return: function pointer
"""
if USE_CUDA:
if dim in [1,2,3]:
f = torch.irfft
else:
print('Warning, fft more than 3d is supported but not tested')
else:
if dim == 1:
f = np.fft.ifft
elif dim == 2:
f = np.fft.ifft2
elif dim == 3:
f = np.fft.ifftn
else:
raise ValueError('Only 3D cpu ifft supported')
return f
class FourierConvolution(Function):
"""
pyTorch function to compute convolutions in the Fourier domain: f = g*h
"""
def __init__(self, complex_fourier_filter):
"""
Constructor for the Fouier-based convolution
:param complex_fourier_filter: Filter in the Fourier domain as created by *createComplexFourierFilter*
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierConvolution, self).__init__()
self.complex_fourier_filter = complex_fourier_filter
if USE_CUDA:
self.dim = complex_fourier_filter.dim() -1
else:
self.dim = len(complex_fourier_filter.shape)
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
"""The filter in the Fourier domain"""
def forward(self, input):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
(input_real+input_img)(filter_real+filter_img) = (input_real*filter_real-input_img*filter_img) + (input_img*filter_real+input_real*filter_img)i
filter_img =0, then get input_real*filter_real + (input_img*filter_real)i ac + bci
:param input: Image
:return: Filtered-image
"""
if USE_CUDA:
input = FFTVal(input,ini=1)
f_input = self.fftn(input,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real=f_filter_real.expand_as(f_input[...,0])
f_filter_real = torch.stack((f_filter_real,f_filter_real),-1)
f_conv = f_input * f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input-self.dim
conv_ouput_real = self.ifftn(f_conv, self.dim,onesided=True,signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
else:
if self.dim <3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) * self.complex_fourier_filter)
result = conv_output.real # should in principle be real
elif self.dim==3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(self.fftn(input[batch,ch].detach().cpu().numpy()) * self.complex_fourier_filter)
result[batch,ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# print( 'max(imag) = ' + str( (abs( conv_output.imag )).max() ) )
# print( 'max(real) = ' + str( (abs( conv_output.real )).max() ) )
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_output = FFTVal(grad_output, ini=1)
#print grad_output.view(-1,1).sum()
f_go = self.fftn(grad_output,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go * f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv,self.dim,onesided=True,signal_sizes=grad_output.shape[dim_input_batch::])
# print(grad_input)
# print((grad_input[0,0,12:15]))
return FFTVal(grad_input, ini=-1)
else:
# if self.needs_input_grad[0]:
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim < 3:
grad_input_c = (self.ifftn(np.conjugate(self.complex_fourier_filter) * self.fftn(numpy_go)))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
assert grad_output.dim() == 5 # to ensure the behavior correct, we avoid more than 3 dimension fftn method
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (self.ifftn(np.conjugate(self.complex_fourier_filter) *self.fftn(numpy_go[batch,ch])))
grad_input[batch,ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
# print(grad_input)
# print((grad_input[0,0,12:15]))
return torch.FloatTensor(grad_input)
# print( 'grad max(imag) = ' + str( (abs( grad_input_c.imag )).max() ) )
# print( 'grad max(real) = ' + str( (abs( grad_input_c.real )).max() ) )
class InverseFourierConvolution(Function):
"""
pyTorch function to compute convolutions in the Fourier domain: f = g*h
But uses the inverse of the smoothing filter
"""
def __init__(self, complex_fourier_filter):
"""
Constructor for the Fouier-based convolution (WARNING: EXPERIMENTAL)
:param complex_fourier_filter: Filter in the Fourier domain as created by *createComplexFourierFilter*
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(InverseFourierConvolution, self).__init__()
self.complex_fourier_filter = complex_fourier_filter
if USE_CUDA:
self.dim = complex_fourier_filter.dim() - 1
else:
self.dim = len(complex_fourier_filter.shape)
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
"""Fourier filter"""
self.alpha = 0.1
"""Regularizing weight"""
def set_alpha(self, alpha):
"""
Sets the regularizing weight
:param alpha: regularizing weight
"""
self.alpha = alpha
def get_alpha(self):
"""
Returns the regularizing weight
:return: regularizing weight
"""
return self.alpha
def forward(self, input):
"""
Performs the Fourier-based filtering
:param input: Image
:return: Filtered-image
"""
# do the filtering in the Fourier domain
# (a+bi)/(c) = (a/c) + (b/c)i
if USE_CUDA:
input = FFTVal(input, ini=1)
f_input = self.fftn(input,self.dim,onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real += self.alpha
f_filter_real = f_filter_real.expand_as(f_input[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_input/f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input - self.dim
conv_ouput_real = self.ifftn(f_conv,self.dim,onesided=True,signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
else:
result = np.zeros(input.shape)
if self.dim <3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) / (self.alpha + self.complex_fourier_filter))
# result = abs(conv_output) # should in principle be real
result = conv_output.real
elif self.dim == 3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(
self.fftn(input[batch,ch].detach().cpu().numpy()) / (self.alpha + self.complex_fourier_filter))
result[batch, ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# if self.needs_input_grad[0]:
if USE_CUDA:
grad_output =FFTVal(grad_output, ini=1)
f_go = self.fftn(grad_output, self.dim, onesided=True)
f_filter_real = self.complex_fourier_filter[0]
f_filter_real += self.alpha
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go / f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=grad_output.shape[dim_input::])
return FFTVal(grad_input, ini=-1)
else:
# if self.needs_input_grad[0]:
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim<3:
grad_input_c = (self.ifftn(self.fftn(numpy_go) / (self.alpha + np.conjugate(self.complex_fourier_filter))))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (
self.ifftn(self.fftn(numpy_go[batch,ch]) / (self.alpha + np.conjugate(self.complex_fourier_filter))))
grad_input[batch, ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(grad_input)
def fourier_convolution(input, complex_fourier_filter):
"""
Convenience function for Fourier-based convolutions. Make sure to use this one (instead of directly
using the class FourierConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param complex_fourier_filter: Filter in Fourier domain as generated by *createComplexFourierFilter*
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierConvolution(complex_fourier_filter)(input)
def inverse_fourier_convolution(input, complex_fourier_filter):
# just filtering with inverse filter
return InverseFourierConvolution(complex_fourier_filter)(input)
class GaussianFourierFilterGenerator(object):
def __init__(self, sz, spacing, nr_of_slots=1):
self.sz = sz
"""image size"""
self.spacing = spacing
"""image spacing"""
self.volumeElement = self.spacing.prod()
"""volume of pixel/voxel"""
self.dim = len(spacing)
"""dimension"""
self.nr_of_slots = nr_of_slots
"""number of slots to hold Gaussians (to be able to support multi-Gaussian); this is related to storage"""
"""typically should be set to the number of total desired Gaussians (so that none of them need to be recomputed)"""
self.mus = np.zeros(self.dim)
# TODO: storing the identity map may be a little wasteful
self.centered_id = utils.centered_identity_map(self.sz,self.spacing)
self.complex_gaussian_fourier_filters = [None] * self.nr_of_slots
self.max_indices = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_filters = [None]*self.nr_of_slots
self.complex_gaussian_fourier_xsqr_filters = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_xsqr_filters = [None]*self.nr_of_slots
self.sigmas_complex_gaussian_fourier_filters_np=[]
def get_number_of_slots(self):
return self.nr_of_slots
def get_number_of_currently_stored_gaussians(self):
nr_of_gaussians = 0
for s in self.sigmas_complex_gaussian_fourier_filters:
if s is not None:
nr_of_gaussians += 1
return nr_of_gaussians
def get_dimension(self):
return self.dim
def _compute_complex_gaussian_fourier_filter(self,sigma):
stds = sigma.detach().cpu().numpy() * np.ones(self.dim)
gaussian_spatial_filter = utils.compute_normalized_gaussian(self.centered_id, self.mus, stds)
complex_gaussian_fourier_filter,max_index = create_complex_fourier_filter(gaussian_spatial_filter,self.sz,True)
return complex_gaussian_fourier_filter,max_index
def _compute_complex_gaussian_fourier_xsqr_filter(self,sigma,max_index=None):
if max_index is None:
raise ValueError('A Gaussian filter needs to be generated / requested *before* any other filter')
# TODO: maybe compute this jointly with the gaussian filter itself to avoid computing the spatial filter twice
stds = sigma.detach().cpu().numpy() * np.ones(self.dim)
gaussian_spatial_filter = utils.compute_normalized_gaussian(self.centered_id, self.mus, stds)
gaussian_spatial_xsqr_filter = gaussian_spatial_filter*(self.centered_id**2).sum(axis=0)
complex_gaussian_fourier_xsqr_filter,max_index = create_complex_fourier_filter(gaussian_spatial_xsqr_filter,self.sz,True,max_index)
return complex_gaussian_fourier_xsqr_filter,max_index
def _find_closest_sigma_index(self, sigma, available_sigmas):
"""
For a given sigma, finds the closest one in a list of available sigmas
- If a sigma is already computed it finds its index
- If the sigma has not been computed (it finds the next empty slot (None)
- If no empty slots are available it replaces the closest
:param available_sigmas: a list of sigmas that have already been computed (or None if they have not)
:return: returns the index for the closest sigma among the available_sigmas
"""
closest_i = None
same_i = None
empty_slot_i = None
current_dist_sqr = None
for i,s in enumerate(available_sigmas):
if s is not None:
# keep track of the one with the closest distance
new_dist_sqr = (s-sigma)**2
if current_dist_sqr is None:
current_dist_sqr = new_dist_sqr
closest_i = i
else:
if new_dist_sqr<current_dist_sqr:
current_dist_sqr = new_dist_sqr
closest_i = i
# also check if this is the same
# if it is records the first occurrence
if torch.isclose(sigma,s):
if same_i is None:
same_i = i
else:
# found an empty slot, record it if it is the first one that was found
if empty_slot_i is None:
empty_slot_i = i
# if we found the same we return it
if same_i is not None:
# we found the same; i.e., already computed
return same_i
elif empty_slot_i is not None:
# it was not already computed, but we found an empty slot to put it in
return empty_slot_i
elif closest_i is not None:
# no empty slot, so just overwrite the closest one if there is one
return closest_i
else:
# nothing has been computed yet, so return the 0 index (this should never execute, as it should be taken care of by the empty slot
return 0
def get_gaussian_xsqr_filters(self,sigmas):
"""
Returns complex Gaussian Fourier filter multiplied with x**2 with standard deviation sigma.
Only recomputes the filter if sigma has changed.
:param sigmas: standard deviation of the filter as a list
:return: Returns the complex Gaussian Fourier filters as a list (in the same order as requested)
"""
current_complex_gaussian_fourier_xsqr_filters = []
# only recompute the ones that need to be recomputed
for sigma in sigmas:
# now find the index that corresponds to this
i = self._find_closest_sigma_index(sigma, self.sigmas_complex_gaussian_fourier_xsqr_filters)
if self.sigmas_complex_gaussian_fourier_xsqr_filters[i] is None:
need_to_recompute = True
elif self.complex_gaussian_fourier_xsqr_filters[i] is None:
need_to_recompute = True
elif torch.isclose(sigma,self.sigmas_complex_gaussian_fourier_xsqr_filters[i]):
need_to_recompute = False
else:
need_to_recompute = True
if need_to_recompute:
print('INFO: Recomputing gaussian xsqr filter for sigma={:.2f}'.format(sigma))
self.sigmas_complex_gaussian_fourier_xsqr_filters[i] = sigma #.clone()
self.complex_gaussian_fourier_xsqr_filters[i],_ = self._compute_complex_gaussian_fourier_xsqr_filter(sigma,self.max_indices[i])
current_complex_gaussian_fourier_xsqr_filters.append(self.complex_gaussian_fourier_xsqr_filters[i])
return current_complex_gaussian_fourier_xsqr_filters
def get_gaussian_filters(self,sigmas):
"""
Returns a complex Gaussian Fourier filter with standard deviation sigma.
Only recomputes the filter if sigma has changed.
:param sigma: standard deviation of filter.
:return: Returns the complex Gaussian Fourier filter
"""
current_complex_gaussian_fourier_filters = []
# only recompute the ones that need to be recomputed
for sigma in sigmas:
# now find the index that corresponds to this
sigma_value = sigma.item()
if sigma_value in self.sigmas_complex_gaussian_fourier_filters_np:
i = self.sigmas_complex_gaussian_fourier_filters_np.index(sigma_value)
else:
i = self._find_closest_sigma_index(sigma,self.sigmas_complex_gaussian_fourier_filters)
if self.sigmas_complex_gaussian_fourier_filters[i] is None:
need_to_recompute = True
elif self.complex_gaussian_fourier_filters[i] is None:
need_to_recompute = True
elif torch.isclose(sigma,self.sigmas_complex_gaussian_fourier_filters[i]):
need_to_recompute = False
else:
need_to_recompute = True
if need_to_recompute: # todo not comment this warning
print('INFO: Recomputing gaussian filter for sigma={:.2f}'.format(sigma))
self.sigmas_complex_gaussian_fourier_filters[i] = sigma #.clone()
self.sigmas_complex_gaussian_fourier_filters_np.append(sigma_value)
self.complex_gaussian_fourier_filters[i], self.max_indices[i] = self._compute_complex_gaussian_fourier_filter(sigma)
current_complex_gaussian_fourier_filters.append(self.complex_gaussian_fourier_filters[i])
return current_complex_gaussian_fourier_filters
class FourierGaussianConvolution(Function):
"""
pyTorch function to compute Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator):
"""
Constructor for the Fouier-based convolution
:param sigma: standard deviation for the filter
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierGaussianConvolution, self).__init__()
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.dim = self.gaussian_fourier_filter_generator.get_dimension()
self.fftn = sel_fftn(self.dim)
self.ifftn = sel_ifftn(self.dim)
def _compute_convolution_CUDA(self,input,complex_fourier_filter):
input = FFTVal(input, ini=1)
f_input = self.fftn(input, self.dim, onesided=True)
f_filter_real = complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_input[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_input * f_filter_real
dim_input = len(input.shape)
dim_input_batch = dim_input - self.dim
conv_ouput_real = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=input.shape[dim_input_batch::])
result = conv_ouput_real
return FFTVal(result, ini=-1)
def _compute_convolution_CPU(self,input,complex_fourier_filter):
if self.dim < 3:
conv_output = self.ifftn(self.fftn(input.detach().cpu().numpy()) * complex_fourier_filter)
result = conv_output.real # should in principle be real
elif self.dim == 3:
result = np.zeros(input.shape)
for batch in range(input.size()[0]):
for ch in range(input.size()[1]):
conv_output = self.ifftn(self.fftn(input[batch, ch].detach().cpu().numpy()) * complex_fourier_filter)
result[batch, ch] = conv_output.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(result)
# print( 'max(imag) = ' + str( (abs( conv_output.imag )).max() ) )
# print( 'max(real) = ' + str( (abs( conv_output.real )).max() ) )
def _compute_input_gradient_CUDA(self,grad_output,complex_fourier_filter):
grad_output = FFTVal(grad_output, ini=1)
# print grad_output.view(-1,1).sum()
f_go = self.fftn(grad_output, self.dim, onesided=True)
f_filter_real = complex_fourier_filter[0]
f_filter_real = f_filter_real.expand_as(f_go[..., 0])
f_filter_real = torch.stack((f_filter_real, f_filter_real), -1)
f_conv = f_go * f_filter_real
dim_input = len(grad_output.shape)
dim_input_batch = dim_input - self.dim
grad_input = self.ifftn(f_conv, self.dim, onesided=True, signal_sizes=grad_output.shape[dim_input_batch::])
return FFTVal(grad_input, ini=-1)
def _compute_input_gradient_CPU(self,grad_output,complex_fourier_filter):
numpy_go = grad_output.detach().cpu().numpy()
# we use the conjugate because the assumption was that the spatial filter is real
# THe following two lines should be correct
if self.dim < 3:
grad_input_c = (self.ifftn(np.conjugate(complex_fourier_filter) * self.fftn(numpy_go)))
grad_input = grad_input_c.real
elif self.dim == 3:
grad_input = np.zeros(numpy_go.shape)
assert grad_output.dim() == 5 # to ensure the behavior correct, we avoid more than 3 dimension fftn method
for batch in range(grad_output.size()[0]):
for ch in range(grad_output.size()[1]):
grad_input_c = (
self.ifftn(np.conjugate(complex_fourier_filter) * self.fftn(numpy_go[batch, ch])))
grad_input[batch, ch] = grad_input_c.real
else:
raise ValueError("cpu fft smooth should be 1d-3d")
return torch.FloatTensor(grad_input)
def _compute_sigma_gradient_CUDA(self,input,sigma,grad_output,complex_fourier_filter,complex_fourier_xsqr_filter):
convolved_input = self._compute_convolution_CUDA(input, complex_fourier_filter)
grad_sigma = -1. / sigma * self.dim * (grad_output.detach().cpu().numpy() * convolved_input).sum()
convolved_input_xsqr = self._compute_convolution_CUDA(input, complex_fourier_xsqr_filter)
grad_sigma += 1. / (sigma ** 3) * (grad_output.detach().cpu().numpy() * convolved_input_xsqr).sum()
return grad_sigma
# TODO: gradient appears to be incorrect
def _compute_sigma_gradient_CPU(self,input,sigma,grad_output,complex_fourier_filter,complex_fourier_xsqr_filter):
convolved_input = self._compute_convolution_CPU(input,complex_fourier_filter)
grad_sigma = -1./sigma*self.dim*(grad_output.detach().cpu().numpy()*convolved_input).sum()
convolved_input_xsqr = self._compute_convolution_CPU(input,complex_fourier_xsqr_filter)
grad_sigma += 1./(sigma**3)*(grad_output.detach().cpu().numpy()*convolved_input_xsqr).sum()
return grad_sigma
class FourierSingleGaussianConvolution(FourierGaussianConvolution):
"""
pyTorch function to compute Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator, compute_std_gradient):
"""
Constructor for the Fouier-based convolution
:param sigma: standard deviation for the filter
:param compute_std_gradient: if True computes the gradient with respect to the std, otherwise set to 0
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierSingleGaussianConvolution, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filter = None
self.complex_fourier_xsqr_filter = None
self.input = None
self.sigma = None
self.compute_std_gradient = compute_std_gradient
def forward(self, input, sigma):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigma = sigma
self.complex_fourier_filter = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigma)[0]
self.complex_fourier_xsqr_filter = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigma)[0]
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
if USE_CUDA:
return self._compute_convolution_CUDA(input,self.complex_fourier_filter)
else:
return self._compute_convolution_CPU(input,self.complex_fourier_filter)
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigma = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA(grad_output,self.complex_fourier_filter)
else:
grad_input = self._compute_input_gradient_CPU(grad_output,self.complex_fourier_filter)
# now compute the gradient with respect to the standard deviation of the filter
if self.compute_std_gradient:
if self.needs_input_grad[1]:
if USE_CUDA:
grad_sigma = self._compute_sigma_gradient_CUDA(self.input,self.sigma,grad_output,self.complex_fourier_filter,self.complex_fourier_xsqr_filter)
else:
grad_sigma = self._compute_sigma_gradient_CPU(self.input,self.sigma,grad_output,self.complex_fourier_filter,self.complex_fourier_xsqr_filter)
else:
grad_sigma = torch.zeros_like(self.sigma)
# now return the computed gradients
return grad_input, grad_sigma
def fourier_single_gaussian_convolution(input, gaussian_fourier_filter_generator,sigma,compute_std_gradient):
"""
Convenience function for Fourier-based Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviation for the Gaussian filter
:param compute_std_gradient: if set to True computes the gradient otherwise sets it to 0
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierSingleGaussianConvolution(gaussian_fourier_filter_generator,compute_std_gradient)(input,sigma)
class FourierMultiGaussianConvolution(FourierGaussianConvolution):
"""
pyTorch function to compute multi Gaussian convolutions in the Fourier domain: f = g*h.
Also allows to differentiate through the Gaussian standard deviation.
"""
def __init__(self, gaussian_fourier_filter_generator,compute_std_gradients,compute_weight_gradients):
"""
Constructor for the Fouier-based convolution
:param gaussian_fourier_filter_generator: class instance that creates and caches the Gaussian filters
:param compute_std_gradients: if set to True the gradients for std are computed, otherwise they are filled w/ zero
:param compute_weight_gradients: if set to True the gradients for weights are computed, otherwise they are filled w/ zero
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierMultiGaussianConvolution, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filters = None
self.complex_fourier_xsqr_filters = None
self.input = None
self.weights = None
self.sigmas = None
self.nr_of_gaussians = None
self.compute_std_gradients = compute_std_gradients
self.compute_weight_gradients = compute_weight_gradients
def forward(self, input, sigmas, weights):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigmas = sigmas
self.weights = weights
self.nr_of_gaussians = len(self.sigmas)
nr_of_weights = len(self.weights)
assert(self.nr_of_gaussians==nr_of_weights)
self.complex_fourier_filters = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigmas)
self.complex_fourier_xsqr_filters = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigmas)
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
ret = torch.zeros_like(input)
for i in range(self.nr_of_gaussians):
if USE_CUDA:
ret += self.weights[i]*self._compute_convolution_CUDA(input,self.complex_fourier_filters[i])
else:
ret+= self.weights[i]*self._compute_convolution_CPU(input,self.complex_fourier_filters[i])
return ret
def _compute_input_gradient_CUDA_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self.weights[i]*self._compute_input_gradient_CUDA(grad_output, complex_fourier_filters[i])
return grad_input
def _compute_input_gradient_CPU_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self.weights[i] * self._compute_input_gradient_CPU(grad_output,complex_fourier_filters[i])
return grad_input
def _compute_sigmas_gradient_CUDA_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self.weights[i] * self._compute_sigma_gradient_CUDA(input,sigmas[i],grad_output,
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_sigmas_gradient_CPU_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self.weights[i] * self._compute_sigma_gradient_CPU(input,sigmas[i],grad_output,
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_weights_gradient_CUDA_multi_gaussian(self,input,weights,grad_output,complex_fourier_filters):
grad_weights = torch.zeros_like(weights)
for i in range(self.nr_of_gaussians):
grad_weights[i] = (grad_output*self._compute_convolution_CUDA(input,complex_fourier_filters[i])).sum()
return grad_weights
def _compute_weights_gradient_CPU_multi_gaussian(self,input,weights,grad_output,complex_fourier_filters):
grad_weights = torch.zeros_like(weights)
for i in range(self.nr_of_gaussians):
grad_weights[i] = (grad_output * self._compute_convolution_CPU(input, complex_fourier_filters[i])).sum()
return grad_weights
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigmas = grad_weights = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA_multi_gaussian(grad_output,self.complex_fourier_filters)
else:
grad_input = self._compute_input_gradient_CPU_multi_gaussian(grad_output,self.complex_fourier_filters)
# now compute the gradient with respect to the standard deviation of the filter
if self.needs_input_grad[1]:
if self.compute_std_gradients:
if USE_CUDA:
grad_sigmas = self._compute_sigmas_gradient_CUDA_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = self._compute_sigmas_gradient_CPU_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = torch.zeros_like(self.sigmas)
if self.needs_input_grad[2]:
if self.compute_weight_gradients:
if USE_CUDA:
grad_weights = self._compute_weights_gradient_CUDA_multi_gaussian(self.input,self.weights,grad_output,self.complex_fourier_filters)
else:
grad_weights = self._compute_weights_gradient_CPU_multi_gaussian(self.input,self.weights,grad_output,self.complex_fourier_filters)
else:
grad_weights = torch.zeros_like(self.weights)
# now return the computed gradients
#print('gsigmas: min=' + str(grad_sigmas.min()) + '; max=' + str(grad_sigmas.max()))
#print('gweight: min=' + str(grad_weights.min()) + '; max=' + str(grad_weights.max()))
#print( 'gsigmas = ' + str( grad_sigmas))
#print( 'gweight = ' + str( grad_weights))
return grad_input, grad_sigmas, grad_weights
def fourier_multi_gaussian_convolution(input, gaussian_fourier_filter_generator,sigma,weights,compute_std_gradients=True,compute_weight_gradients=True):
"""
Convenience function for Fourier-based multi Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviations for the Gaussian filter (need to be positive)
:param weights: weights for the multi-Gaussian kernel (need to sum up to one and need to be positive)
:param compute_std_gradients: if set to True computes the gradients with respect to the standard deviation
:param compute_weight_gradients: if set to True then gradients for weight are computed, otherwise they are replaced w/ zero
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierMultiGaussianConvolution(gaussian_fourier_filter_generator,compute_std_gradients,compute_weight_gradients)(input,sigma,weights)
class FourierSetOfGaussianConvolutions(FourierGaussianConvolution):
"""
pyTorch function to compute a set of Gaussian convolutions (as in the multi-Gaussian) in the Fourier domain: f = g*h.
Also allows to differentiate through the standard deviations. THe output is not a smoothed field, but the
set of all of them. This can then be fed into a subsequent neural network for further processing.
"""
def __init__(self, gaussian_fourier_filter_generator,compute_std_gradients):
"""
Constructor for the Fouier-based convolution
:param gaussian_fourier_filter_generator: class instance that creates and caches the Gaussian filters
:param compute_std_gradients: if set to True the gradients for the stds are computed, otherwise they are filled w/ zero
"""
# we assume this is a spatial filter, F, hence conj(F(w))=F(-w)
super(FourierSetOfGaussianConvolutions, self).__init__(gaussian_fourier_filter_generator)
self.gaussian_fourier_filter_generator = gaussian_fourier_filter_generator
self.complex_fourier_filters = None
self.complex_fourier_xsqr_filters = None
self.input = None
self.sigmas = None
self.nr_of_gaussians = None
self.compute_std_gradients = compute_std_gradients
def forward(self, input, sigmas):
"""
Performs the Fourier-based filtering
the 3d cpu fft is not implemented in fftn, to avoid fusing with batch and channel, here 3d is calculated in loop
1d 2d cpu works well because fft and fft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the rfft is used for efficiency, which means the filter should be symmetric
:param input: Image
:return: Filtered-image
"""
self.input = input
self.sigmas = sigmas
self.nr_of_gaussians = len(self.sigmas)
self.complex_fourier_filters = self.gaussian_fourier_filter_generator.get_gaussian_filters(self.sigmas)
if self.compute_std_gradients:
self.complex_fourier_xsqr_filters = self.gaussian_fourier_filter_generator.get_gaussian_xsqr_filters(self.sigmas)
# TODO check if the xsqr should be put into an if statement here
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# filter_imag =0, then get ac + bci
sz = input.size()
new_sz = [self.nr_of_gaussians] + list(sz)
ret = AdaptVal(MyTensor(*new_sz))
for i in range(self.nr_of_gaussians):
if USE_CUDA:
ret[i,...] = self._compute_convolution_CUDA(input,self.complex_fourier_filters[i])
else:
ret[i,...] = self._compute_convolution_CPU(input,self.complex_fourier_filters[i])
return ret
def _compute_input_gradient_CUDA_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self._compute_input_gradient_CUDA(grad_output[i,...], complex_fourier_filters[i])
return grad_input
def _compute_input_gradient_CPU_multi_gaussian(self,grad_output,complex_fourier_filters):
grad_input = torch.zeros_like(self.input)
for i in range(self.nr_of_gaussians):
grad_input += self._compute_input_gradient_CPU(grad_output[i,...],complex_fourier_filters[i])
return grad_input
def _compute_sigmas_gradient_CUDA_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self._compute_sigma_gradient_CUDA(input,sigmas[i],grad_output[i,...],
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
def _compute_sigmas_gradient_CPU_multi_gaussian(self,input,sigmas,grad_output,complex_fourier_filters,complex_fourier_xsqr_filters):
grad_sigmas = torch.zeros_like(sigmas)
for i in range(self.nr_of_gaussians):
grad_sigmas[i] = self._compute_sigma_gradient_CPU(input,sigmas[i],grad_output[i,...],
complex_fourier_filters[i],
complex_fourier_xsqr_filters[i])
return grad_sigmas
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
"""
Computes the gradient
the 3d cpu ifft is not implemented in ifftn, to avoid fusing with batch and channel, here 3d is calcuated in loop
1d 2d cpu works well because ifft and ifft2 is inbuilt, similarly , 1d 2d 3d gpu fft also is inbuilt
in gpu implementation, the irfft is used for efficiency, which means the filter should be symmetric
:param grad_output: Gradient output of previous layer
:return: Gradient including the Fourier-based convolution
"""
# Initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_sigmas = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# first compute the gradient with respect to the input
if self.needs_input_grad[0]:
# (a+bi)(c+di) = (ac-bd) + (bc+ad)i
# input_imag =0, then get ac + bci
if USE_CUDA:
grad_input = self._compute_input_gradient_CUDA_multi_gaussian(grad_output,self.complex_fourier_filters)
else:
grad_input = self._compute_input_gradient_CPU_multi_gaussian(grad_output,self.complex_fourier_filters)
# now compute the gradient with respect to the standard deviation of the filter
if self.needs_input_grad[1]:
if self.compute_std_gradients:
if USE_CUDA:
grad_sigmas = self._compute_sigmas_gradient_CUDA_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = self._compute_sigmas_gradient_CPU_multi_gaussian(self.input,self.sigmas,grad_output,self.complex_fourier_filters,self.complex_fourier_xsqr_filters)
else:
grad_sigmas = torch.zeros_like(self.sigmas)
# now return the computed gradients
return grad_input, grad_sigmas
def fourier_set_of_gaussian_convolutions(input, gaussian_fourier_filter_generator,sigma,compute_std_gradients=False):
"""
Convenience function for Fourier-based multi Gaussian convolutions. Make sure to use this one (instead of directly
using the class FourierGaussianConvolution). This will assure that each call generates its own instance
and hence autograd will work properly
:param input: Input image
:param gaussian_fourier_filter_generator: generator which will create Gaussian Fourier filter (and caches them)
:param sigma: standard deviations for the Gaussian filter (need to be positive)
:param compute_weight_std_gradients: if set to True then gradients for standard deviation are computed, otherwise they are replaced w/ zero
:return:
"""
# First braces create a Function object. Any arguments given here
# will be passed to __init__. Second braces will invoke the __call__
# operator, that will then use forward() to compute the result and
# return it.
return FourierSetOfGaussianConvolutions(gaussian_fourier_filter_generator,compute_std_gradients)(input,sigma)
def check_fourier_conv():
"""
Convenience function to check the gradient. Fails, as pytorch's check appears to have difficulty
:return: True if analytical and numerical gradient are the same
.. todo::
The current check seems to fail in pyTorch. However, the gradient appears to be correct. Potentially an issue with the numerical gradient approximiaton.
"""
# gradcheck takes a tuple of tensor as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
# TODO: Seems to fail at the moment, check why if there are issues with the gradient
sz = np.array([20, 20], dtype='int64')
# f = np.ones(sz)
f = 1 / 400. * np.ones(sz)
dim = len(sz)
mus = np.zeros(dim)
stds = np.ones(dim)
spacing = np.ones(dim)
centered_id = utils.centered_identity_map(sz,spacing)
g = 100 * utils.compute_normalized_gaussian(centered_id, mus, stds)
FFilter,_ = create_complex_fourier_filter(g, sz)
input = AdaptVal(torch.randn([1, 1] + list(sz)))
input.requires_grad = True
test = gradcheck(FourierConvolution(FFilter), input, eps=1e-6, atol=1e-4)
print(test)
def check_run_forward_and_backward():
"""
Convenience function to check running the function forward and backward
s
:return:
"""
sz = [20, 20]
f = 1 / 400. * np.ones(sz)
FFilter,_ = create_complex_fourier_filter(f, sz, False)
input = torch.randn(sz).float()
input.requires_grad = True
fc = FourierConvolution(FFilter)(input)
# print( fc )
fc.backward(torch.randn(sz).float())
print(input.grad)
| 45.652141 | 266 | 0.661274 |
ace4f2baeb237bfd035c79cc17c8cbc6c8898693 | 6,803 | py | Python | bindings/python/ensmallen_graph/datasets/string/brevibacteriumlinens.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/brevibacteriumlinens.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/brevibacteriumlinens.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Brevibacterium linens.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:27:59.541557
The undirected graph Brevibacterium linens has 3786 nodes and 333522 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04655 and has 19 connected components, where the component with most
nodes has 3746 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 140, the mean node degree is 176.19, and
the node degree mode is 1. The top 5 most central nodes are 321955.AAGP01000001_gene3292
(degree 1286), 321955.AAGP01000013_gene703 (degree 1231), 321955.AAGP01000001_gene3190
(degree 1198), 321955.AAGP01000004_gene2750 (degree 1113) and 321955.AAGP01000059_gene1408
(degree 1082).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import BrevibacteriumLinens
# Then load the graph
graph = BrevibacteriumLinens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def BrevibacteriumLinens(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Brevibacterium linens graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Brevibacterium linens graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:27:59.541557
The undirected graph Brevibacterium linens has 3786 nodes and 333522 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04655 and has 19 connected components, where the component with most
nodes has 3746 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 140, the mean node degree is 176.19, and
the node degree mode is 1. The top 5 most central nodes are 321955.AAGP01000001_gene3292
(degree 1286), 321955.AAGP01000013_gene703 (degree 1231), 321955.AAGP01000001_gene3190
(degree 1198), 321955.AAGP01000004_gene2750 (degree 1113) and 321955.AAGP01000059_gene1408
(degree 1082).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import BrevibacteriumLinens
# Then load the graph
graph = BrevibacteriumLinens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="BrevibacteriumLinens",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.617801 | 223 | 0.708658 |
ace4f2dbd5d6ddeed251fc56e9cd70cd33d89dca | 85,605 | py | Python | pretrained-model/alxlnet/pytorch/modeling_alxlnet.py | ebiggerr/malaya | be757c793895522f80b929fe82353d90762f7fff | [
"MIT"
] | 88 | 2021-01-06T10:01:31.000Z | 2022-03-30T17:34:09.000Z | pretrained-model/alxlnet/pytorch/modeling_alxlnet.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 43 | 2021-01-14T02:44:41.000Z | 2022-03-31T19:47:42.000Z | pretrained-model/alxlnet/pytorch/modeling_alxlnet.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 38 | 2021-01-06T07:15:03.000Z | 2022-03-19T05:07:50.000Z | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
)
from transformers.modeling_utils import (
PoolerAnswerClass,
PoolerEndLogits,
PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
)
logger = logging.getLogger(__name__)
_TOKENIZER_FOR_DOC = 'XLNetTokenizer'
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
'xlnet-base-cased',
'xlnet-large-cased',
# See all XLNet models at https://huggingface.co/models?filter=xlnet
]
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights = None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
if hasattr(model, 'lm_loss'):
# We will load also the output bias
tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
if (
hasattr(model, 'sequence_summary')
and 'model/sequnece_summary/summary/kernel' in tf_weights
):
# We will load also the sequence summary
tf_to_pt_map[
'model/sequnece_summary/summary/kernel'
] = model.sequence_summary.summary.weight
tf_to_pt_map[
'model/sequnece_summary/summary/bias'
] = model.sequence_summary.summary.bias
if (
hasattr(model, 'logits_proj')
and config.finetuning_task is not None
and 'model/regression_{}/logit/kernel'.format(
config.finetuning_task
)
in tf_weights
):
tf_to_pt_map[
'model/regression_{}/logit/kernel'.format(
config.finetuning_task
)
] = model.logits_proj.weight
tf_to_pt_map[
'model/regression_{}/logit/bias'.format(config.finetuning_task)
] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
'model/transformer/word_embedding/lookup_table_2': model.word_embedding2.weight,
'model/transformer/mask_emb/mask_emb': model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = 'model/transformer/layer_shared/'
tf_to_pt_map.update(
{
layer_str
+ 'rel_attn/LayerNorm/gamma': b.rel_attn.layer_norm.weight,
layer_str
+ 'rel_attn/LayerNorm/beta': b.rel_attn.layer_norm.bias,
layer_str + 'rel_attn/o/kernel': b.rel_attn.o,
layer_str + 'rel_attn/q/kernel': b.rel_attn.q,
layer_str + 'rel_attn/k/kernel': b.rel_attn.k,
layer_str + 'rel_attn/r/kernel': b.rel_attn.r,
layer_str + 'rel_attn/v/kernel': b.rel_attn.v,
layer_str + 'ff/LayerNorm/gamma': b.ff.layer_norm.weight,
layer_str + 'ff/LayerNorm/beta': b.ff.layer_norm.bias,
layer_str + 'ff/layer_1/kernel': b.ff.layer_1.weight,
layer_str + 'ff/layer_1/bias': b.ff.layer_1.bias,
layer_str + 'ff/layer_2/kernel': b.ff.layer_2.weight,
layer_str + 'ff/layer_2/bias': b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
'model/transformer/r_r_bias': r_r_list,
'model/transformer/r_w_bias': r_w_list,
'model/transformer/r_s_bias': r_s_list,
'model/transformer/seg_embed': seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.'
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info('Loading TF weight {} with shape {}'.format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info('Importing {}'.format(name))
if name not in tf_weights:
logger.info(
'{} not in tf pre-trained weights, skipping'.format(name)
)
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name and (
'ff' in name or 'summary' in name or 'logit' in name
):
logger.info('Transposing')
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info(
'Initialize PyTorch weight {} for layer {}'.format(name, i)
)
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info('Initialize PyTorch weight {}'.format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
logger.info(
'Weights not copied to PyTorch model: {}'.format(
', '.join(tf_weights.keys())
)
)
return model
ACT2FN = {'gelu': gelu_new, 'relu': torch.nn.functional.relu, 'swish': swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_head != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention '
'heads (%d)' % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(
torch.FloatTensor(config.d_model, self.n_head, self.d_head)
)
self.k = nn.Parameter(
torch.FloatTensor(config.d_model, self.n_head, self.d_head)
)
self.v = nn.Parameter(
torch.FloatTensor(config.d_model, self.n_head, self.d_head)
)
self.o = nn.Parameter(
torch.FloatTensor(config.d_model, self.n_head, self.d_head)
)
self.r = nn.Parameter(
torch.FloatTensor(config.d_model, self.n_head, self.d_head)
)
self.r_r_bias = nn.Parameter(
torch.FloatTensor(self.n_head, self.d_head)
)
self.r_s_bias = nn.Parameter(
torch.FloatTensor(self.n_head, self.d_head)
)
self.r_w_bias = nn.Parameter(
torch.FloatTensor(self.n_head, self.d_head)
)
self.seg_embed = nn.Parameter(
torch.FloatTensor(2, self.n_head, self.d_head)
)
self.layer_norm = XLNetLayerNorm(
config.d_model, eps = config.layer_norm_eps
)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen = -1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(
x, 1, torch.arange(klen, device = x.device, dtype = torch.long)
)
return x
@staticmethod
def rel_shift_bnij(x, klen = -1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(
x, 3, torch.arange(klen, device = x.device, dtype = torch.long)
)
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat = None,
attn_mask = None,
head_mask = None,
output_attentions = False,
):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen = ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum(
'ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed
)
ef = torch.einsum('ijbs,ibns->bnij', seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum(
'ijbn->bnij', attn_mask
)
else:
attn_score = attn_score - 1e30 * torch.einsum(
'ijbn->bnij', attn_mask
)
# attention probability
attn_prob = F.softmax(attn_score, dim = 3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum('ijbn->bnij', head_mask)
# attention output
attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum('bnij->ijbn', attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual = True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems = None,
target_mapping = None,
head_mask = None,
output_attentions = False,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim = 0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
# content-based value head
v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)
# position-based key head
k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat = seg_mat,
attn_mask = attn_mask_h,
head_mask = head_mask,
output_attentions = output_attentions,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum(
'mbnd,mlb->lbnd', q_head_g, target_mapping
)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat = seg_mat,
attn_mask = attn_mask_g,
head_mask = head_mask,
output_attentions = output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum(
'lbnd,mlb->mbnd', attn_vec_g, target_mapping
)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat = seg_mat,
attn_mask = attn_mask_g,
head_mask = head_mask,
output_attentions = output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim = 0)
else:
cat = h
# content heads
q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)
# positional heads
k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat = seg_mat,
attn_mask = attn_mask_h,
head_mask = head_mask,
output_attentions = output_attentions,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(
config.d_model, eps = config.layer_norm_eps
)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems = None,
target_mapping = None,
head_mask = None,
output_attentions = False,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems = mems,
target_mapping = target_mapping,
head_mask = head_mask,
output_attentions = output_attentions,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[
2:
] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = 'transformer'
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean = 0.0, std = self.config.initializer_range
)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(
mean = 0.0, std = self.config.initializer_range
)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(
mean = 0.0, std = self.config.initializer_range
)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
`use_cache` has to be set to `True` to make use of `mems`.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (:obj:`bool`):
If `use_cache` is True, `mems` are returned and can be used to speed up decoding (see `mems`). Defaults to `True`.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
'The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.',
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.projection = 128
self.word_embedding = nn.Embedding(config.vocab_size, self.projection)
self.word_embedding2 = nn.Embedding(self.projection, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList(
[XLNetLayer(config) for _ in range(config.n_layer)]
)
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal = 1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim = 1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal = -1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim = 1)
ret = ret.to(self.device)
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim = 0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz = None):
sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
pos_emb = torch.cat(
[torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim = -1
)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz = None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype = torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == 'bi':
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == 'uni':
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype = torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype = torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(
fwd_pos_seq, inv_freq, bsz // 2
)
bwd_pos_emb = self.positional_embedding(
bwd_pos_seq, inv_freq, bsz // 2
)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim = 1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(self.device)
return pos_emb
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
@add_code_sample_docstrings(
tokenizer_class = _TOKENIZER_FOR_DOC, checkpoint = 'xlnet-base-cased'
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
output_attentions = None,
output_hidden_states = None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
'You cannot specify both input_ids and inputs_embeds at the same time'
)
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError(
'You have to specify either input_ids or inputs_embeds'
)
token_type_ids = (
token_type_ids.transpose(0, 1).contiguous()
if token_type_ids is not None
else None
)
input_mask = (
input_mask.transpose(0, 1).contiguous()
if input_mask is not None
else None
)
attention_mask = (
attention_mask.transpose(0, 1).contiguous()
if attention_mask is not None
else None
)
perm_mask = (
perm_mask.permute(1, 2, 0).contiguous()
if perm_mask is not None
else None
)
target_mapping = (
target_mapping.permute(1, 2, 0).contiguous()
if target_mapping is not None
else None
)
mlen = (
mems[0].shape[0] if mems is not None and mems[0] is not None else 0
)
klen = mlen + qlen
dtype_float = self.dtype
device = self.device
# Attention mask
# causal attention mask
if self.attn_type == 'uni':
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == 'bi':
attn_mask = None
else:
raise ValueError(
'Unsupported attention type: {}'.format(self.attn_type)
)
# data mask: input mask & perm mask
assert (
input_mask is None or attention_mask is None
), 'You can only use one of input_mask (uses 1 for padding) '
'or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one.'
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(
data_mask
)
data_mask = torch.cat([mems_mask, data_mask], dim = 1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat(
[torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask],
dim = -1,
)
non_tgt_mask = (
(attn_mask + non_tgt_mask[:, :, None, None]) > 0
).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
output_middle = self.word_embedding(input_ids)
word_emb_k = torch.matmul(
output_middle, self.word_embedding2.weight
)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros(
[mlen, bsz], dtype = torch.long, device = device
)
cat_ids = torch.cat([mem_pad, token_type_ids], dim = 0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes = 2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz = bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(0)
)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype = next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if (
self.mem_len is not None
and self.mem_len > 0
and use_cache is True
):
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if output_hidden_states:
hidden_states.append(
(output_h, output_g) if output_g is not None else output_h
)
outputs = layer_module(
output_h,
output_g,
attn_mask_h = non_tgt_mask,
attn_mask_g = attn_mask,
r = pos_emb,
seg_mat = seg_mat,
mems = mems[i],
target_mapping = target_mapping,
head_mask = head_mask[i],
output_attentions = output_attentions,
)
output_h, output_g = outputs[:2]
if output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if output_hidden_states:
hidden_states.append(
(output_h, output_g) if output_g is not None else output_h
)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and use_cache is True:
outputs = outputs + (new_mems,)
if output_hidden_states:
if output_g is not None:
hidden_states = tuple(
h.permute(1, 0, 2).contiguous()
for hs in hidden_states
for h in hs
)
else:
hidden_states = tuple(
hs.permute(1, 0, 2).contiguous() for hs in hidden_states
)
outputs = outputs + (hidden_states,)
if output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(
att_stream.permute(2, 3, 0, 1).contiguous()
for att_stream in t
)
for t in attentions
)
else:
attentions = tuple(
t.permute(2, 3, 0, 1).contiguous() for t in attentions
)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias = True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros(
(effective_batch_size, 1),
dtype = torch.long,
device = input_ids.device,
)
input_ids = torch.cat([input_ids, dummy_token], dim = 1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length),
dtype = torch.float,
device = input_ids.device,
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length),
dtype = torch.float,
device = input_ids.device,
)
target_mapping[0, 0, -1] = 1.0
inputs = {
'input_ids': input_ids,
'perm_mask': perm_mask,
'target_mapping': target_mapping,
'use_cache': kwargs['use_cache'],
}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs['mems'] = past
return inputs
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
labels = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask = attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
token_type_ids = token_type_ids,
input_mask = input_mask,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return (
outputs
) # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
@add_code_sample_docstrings(
tokenizer_class = _TOKENIZER_FOR_DOC, checkpoint = 'xlnet-base-cased'
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
labels = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask = attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
token_type_ids = token_type_ids,
input_mask = input_mask,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1)
)
outputs = (loss,) + outputs
return (
outputs
) # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
@add_code_sample_docstrings(
tokenizer_class = _TOKENIZER_FOR_DOC, checkpoint = 'xlnet-base-cased'
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
labels = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask = attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
token_type_ids = token_type_ids,
input_mask = input_mask,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss,
labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(labels),
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1)
)
outputs = (loss,) + outputs
return (
outputs
) # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format(
'(batch_size, num_choices, sequence_length)'
)
)
@add_code_sample_docstrings(
tokenizer_class = _TOKENIZER_FOR_DOC, checkpoint = 'xlnet-base-cased'
)
def forward(
self,
input_ids = None,
token_type_ids = None,
input_mask = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
labels = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
num_choices = (
input_ids.shape[1]
if input_ids is not None
else inputs_embeds.shape[1]
)
flat_input_ids = (
input_ids.view(-1, input_ids.size(-1))
if input_ids is not None
else None
)
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
flat_input_mask = (
input_mask.view(-1, input_mask.size(-1))
if input_mask is not None
else None
)
flat_inputs_embeds = (
inputs_embeds.view(
-1, inputs_embeds.size(-2), inputs_embeds.size(-1)
)
if inputs_embeds is not None
else None
)
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids = flat_token_type_ids,
input_mask = flat_input_mask,
attention_mask = flat_attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
head_mask = head_mask,
inputs_embeds = flat_inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return (
outputs
) # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
@add_code_sample_docstrings(
tokenizer_class = _TOKENIZER_FOR_DOC, checkpoint = 'xlnet-base-cased'
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
start_positions = None,
end_positions = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask = attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
token_type_ids = token_type_ids,
input_mask = input_mask,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim = -1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index = ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return (
outputs
) # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(
XLNET_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')
)
def forward(
self,
input_ids = None,
attention_mask = None,
mems = None,
perm_mask = None,
target_mapping = None,
token_type_ids = None,
input_mask = None,
head_mask = None,
inputs_embeds = None,
use_cache = True,
start_positions = None,
end_positions = None,
is_impossible = None,
cls_index = None,
p_mask = None,
output_attentions = None,
output_hidden_states = None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
>>> from transformers import XLNetTokenizer, XLNetForQuestionAnswering
>>> import torch
>>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
>>> model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask = attention_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
token_type_ids = token_type_ids,
input_mask = input_mask,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask = p_mask)
outputs = transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(
hidden_states,
start_positions = start_positions,
p_mask = p_mask,
)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(
hidden_states,
start_positions = start_positions,
cls_index = cls_index,
)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(
start_logits, dim = -1
) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim = -1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(
-1, -1, hsz
) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(
hidden_states, -2, start_top_index_exp
) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(
-1, slen, -1, -1
) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(
hidden_states_expanded,
start_states = start_states,
p_mask = p_mask,
)
end_log_probs = F.softmax(
end_logits, dim = 1
) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim = 1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(
-1, self.start_n_top * self.end_n_top
)
end_top_index = end_top_index.view(
-1, self.start_n_top * self.end_n_top
)
start_states = torch.einsum(
'blh,bl->bh', hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states,
start_states = start_states,
cls_index = cls_index,
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (
start_top_log_probs,
start_top_index,
end_top_log_probs,
end_top_index,
cls_logits,
) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
| 42.462798 | 304 | 0.604322 |
ace4f3b4b53799c9ce0f90bca1f0084fb7597799 | 63,562 | py | Python | python/ccxt/async_support/exmo.py | testtas9812/ccxt | 61bc084a913cc066dc34bdfd8e44f896dd04b337 | [
"MIT"
] | null | null | null | python/ccxt/async_support/exmo.py | testtas9812/ccxt | 61bc084a913cc066dc34bdfd8e44f896dd04b337 | [
"MIT"
] | null | null | null | python/ccxt/async_support/exmo.py | testtas9812/ccxt | 61bc084a913cc066dc34bdfd8e44f896dd04b337 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class exmo(Exchange):
def describe(self):
return self.deep_extend(super(exmo, self).describe(), {
'id': 'exmo',
'name': 'EXMO',
'countries': ['LT'], # Lithuania
'rateLimit': 350, # once every 350 ms ≈ 180 requests per minute ≈ 3 requests per second
'version': 'v1.1',
'has': {
'cancelOrder': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': 'emulated',
'fetchOrderBook': True,
'fetchOrderBooks': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFees': True,
'fetchTransactions': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'45m': '45',
'1h': '60',
'2h': '120',
'3h': '180',
'4h': '240',
'1d': 'D',
'1w': 'W',
'1M': 'M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766491-1b0ea956-5eda-11e7-9225-40d67b481b8d.jpg',
'api': {
'public': 'https://api.exmo.com',
'private': 'https://api.exmo.com',
'web': 'https://exmo.me',
},
'www': 'https://exmo.me',
'referral': 'https://exmo.me/?ref=131685',
'doc': [
'https://exmo.me/en/api_doc?ref=131685',
'https://github.com/exmo-dev/exmo_api_lib/tree/master/nodejs',
],
'fees': 'https://exmo.com/en/docs/fees',
},
'api': {
'web': {
'get': [
'ctrl/feesAndLimits',
'en/docs/fees',
],
},
'public': {
'get': [
'currency',
'currency/list/extended',
'order_book',
'pair_settings',
'ticker',
'trades',
'candles_history',
'required_amount',
'payments/providers/crypto/list',
],
},
'private': {
'post': [
'user_info',
'order_create',
'order_cancel',
'stop_market_order_create',
'stop_market_order_cancel',
'user_open_orders',
'user_trades',
'user_cancelled_orders',
'order_trades',
'deposit_address',
'withdraw_crypt',
'withdraw_get_txid',
'excode_create',
'excode_load',
'code_check',
'wallet_history',
'wallet_operations',
'margin/user/order/create',
'margin/user/order/update',
'margin/user/order/cancel',
'margin/user/position/close',
'margin/user/position/margin_add',
'margin/user/position/margin_remove',
'margin/currency/list',
'margin/pair/list',
'margin/settings',
'margin/funding/list',
'margin/user/info',
'margin/user/order/list',
'margin/user/order/history',
'margin/user/order/trades',
'margin/user/order/max_quantity',
'margin/user/position/list',
'margin/user/position/margin_remove_info',
'margin/user/position/margin_add_info',
'margin/user/wallet/list',
'margin/user/wallet/history',
'margin/user/trade/list',
'margin/trades',
'margin/liquidation/feed',
],
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
'funding': {
'tierBased': False,
'percentage': False, # fixed funding fees for crypto, see fetchFundingFees below
},
},
'options': {
'networks': {
'ETH': 'ERC20',
'TRX': 'TRC20',
},
},
'exceptions': {
'exact': {
'40005': AuthenticationError, # Authorization error, incorrect signature
'40009': InvalidNonce, #
'40015': ExchangeError, # API function do not exist
'40016': OnMaintenance, # {"result":false,"error":"Error 40016: Maintenance work in progress"}
'40017': AuthenticationError, # Wrong API Key
'40032': PermissionDenied, # {"result":false,"error":"Error 40032: Access is denied for self API key"}
'40033': PermissionDenied, # {"result":false,"error":"Error 40033: Access is denied, self resources are temporarily blocked to user"}
'40034': RateLimitExceeded, # {"result":false,"error":"Error 40034: Access is denied, rate limit is exceeded"}
'50052': InsufficientFunds,
'50054': InsufficientFunds,
'50304': OrderNotFound, # "Order was not found '123456789'"(fetching order trades for an order that does not have trades yet)
'50173': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
'50277': InvalidOrder,
'50319': InvalidOrder, # Price by order is less than permissible minimum for self pair
'50321': InvalidOrder, # Price by order is more than permissible maximum for self pair
'50381': InvalidOrder, # {"result":false,"error":"Error 50381: More than 2 decimal places are not permitted for pair BTC_USD"}
},
'broad': {
'range period is too long': BadRequest,
'invalid syntax': BadRequest,
'API rate limit exceeded': RateLimitExceeded, # {"result":false,"error":"API rate limit exceeded for x.x.x.x. Retry after 60 sec.","history":[],"begin":1579392000,"end":1579478400}
},
},
})
async def fetch_trading_fees(self, params={}):
return {
'maker': self.fees['trading']['maker'],
'taker': self.fees['trading']['taker'],
}
def parse_fixed_float_value(self, input):
if (input is None) or (input == '-'):
return None
if input == '':
return 0
isPercentage = (input.find('%') >= 0)
parts = input.split(' ')
value = parts[0].replace('%', '')
result = float(value)
if (result > 0) and isPercentage:
raise ExchangeError(self.id + ' parseFixedFloatValue detected an unsupported non-zero percentage-based fee ' + input)
return result
async def fetch_funding_fees(self, params={}):
await self.load_markets()
currencyList = await self.publicGetCurrencyListExtended(params)
#
# [
# {"name":"VLX","description":"Velas"},
# {"name":"RUB","description":"Russian Ruble"},
# {"name":"BTC","description":"Bitcoin"},
# {"name":"USD","description":"US Dollar"}
# ]
#
cryptoList = await self.publicGetPaymentsProvidersCryptoList(params)
#
# {
# "BTC":[
# {"type":"deposit", "name":"BTC", "currency_name":"BTC", "min":"0.001", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 0.001 BTC. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1},
# {"type":"withdraw", "name":"BTC", "currency_name":"BTC", "min":"0.001", "max":"350", "enabled":true,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"0.0005 BTC", "currency_confirmations":6}
# ],
# "ETH":[
# {"type":"withdraw", "name":"ETH", "currency_name":"ETH", "min":"0.01", "max":"500", "enabled":true,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"0.004 ETH", "currency_confirmations":4},
# {"type":"deposit", "name":"ETH", "currency_name":"ETH", "min":"0.01", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 0.01 ETH. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1}
# ],
# "USDT":[
# {"type":"deposit", "name":"USDT(OMNI)", "currency_name":"USDT", "min":"10", "max":"0", "enabled":false,"comment":"Minimum deposit amount is 10 USDT", "commission_desc":"0%", "currency_confirmations":2},
# {"type":"withdraw", "name":"USDT(OMNI)", "currency_name":"USDT", "min":"10", "max":"100000", "enabled":false,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"5 USDT", "currency_confirmations":6},
# {"type":"deposit", "name":"USDT(ERC20)", "currency_name":"USDT", "min":"10", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 10 USDT", "commission_desc":"0%", "currency_confirmations":2},
# {
# "type":"withdraw",
# "name":"USDT(ERC20)",
# "currency_name":"USDT",
# "min":"55",
# "max":"200000",
# "enabled":true,
# "comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales. Recommendation: Due to the high load of ERC20 network, using TRC20 address for withdrawal is recommended.",
# "commission_desc":"10 USDT",
# "currency_confirmations":6
# },
# {"type":"deposit", "name":"USDT(TRC20)", "currency_name":"USDT", "min":"10", "max":"100000", "enabled":true,"comment":"Minimum deposit amount is 10 USDT. Only TRON main network supported", "commission_desc":"0%", "currency_confirmations":2},
# {"type":"withdraw", "name":"USDT(TRC20)", "currency_name":"USDT", "min":"10", "max":"150000", "enabled":true,"comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales. Only TRON main network supported.", "commission_desc":"1 USDT", "currency_confirmations":6}
# ],
# "XLM":[
# {"type":"deposit", "name":"XLM", "currency_name":"XLM", "min":"1", "max":"1000000", "enabled":true,"comment":"Attention! A deposit without memo(invoice) will not be credited. Minimum deposit amount is 1 XLM. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1},
# {"type":"withdraw", "name":"XLM", "currency_name":"XLM", "min":"21", "max":"1000000", "enabled":true,"comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales.", "commission_desc":"0.01 XLM", "currency_confirmations":1}
# ],
# }
#
result = {
'info': cryptoList,
'withdraw': {},
'deposit': {},
}
for i in range(0, len(currencyList)):
currency = currencyList[i]
currencyId = self.safe_string(currency, 'name')
code = self.safe_currency_code(currencyId)
providers = self.safe_value(cryptoList, currencyId, [])
for j in range(0, len(providers)):
provider = providers[j]
type = self.safe_string(provider, 'type')
commissionDesc = self.safe_string(provider, 'commission_desc')
newFee = self.parse_fixed_float_value(commissionDesc)
previousFee = self.safe_number(result[type], code)
if (previousFee is None) or ((newFee is not None) and (newFee < previousFee)):
result[type][code] = newFee
# cache them for later use
self.options['fundingFees'] = result
return result
async def fetch_currencies(self, params={}):
#
currencyList = await self.publicGetCurrencyListExtended(params)
#
# [
# {"name":"VLX","description":"Velas"},
# {"name":"RUB","description":"Russian Ruble"},
# {"name":"BTC","description":"Bitcoin"},
# {"name":"USD","description":"US Dollar"}
# ]
#
cryptoList = await self.publicGetPaymentsProvidersCryptoList(params)
#
# {
# "BTC":[
# {"type":"deposit", "name":"BTC", "currency_name":"BTC", "min":"0.001", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 0.001 BTC. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1},
# {"type":"withdraw", "name":"BTC", "currency_name":"BTC", "min":"0.001", "max":"350", "enabled":true,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"0.0005 BTC", "currency_confirmations":6}
# ],
# "ETH":[
# {"type":"withdraw", "name":"ETH", "currency_name":"ETH", "min":"0.01", "max":"500", "enabled":true,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"0.004 ETH", "currency_confirmations":4},
# {"type":"deposit", "name":"ETH", "currency_name":"ETH", "min":"0.01", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 0.01 ETH. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1}
# ],
# "USDT":[
# {"type":"deposit", "name":"USDT(OMNI)", "currency_name":"USDT", "min":"10", "max":"0", "enabled":false,"comment":"Minimum deposit amount is 10 USDT", "commission_desc":"0%", "currency_confirmations":2},
# {"type":"withdraw", "name":"USDT(OMNI)", "currency_name":"USDT", "min":"10", "max":"100000", "enabled":false,"comment":"Do not withdraw directly to the Crowdfunding or ICO address as your account will not be credited with tokens from such sales.", "commission_desc":"5 USDT", "currency_confirmations":6},
# {"type":"deposit", "name":"USDT(ERC20)", "currency_name":"USDT", "min":"10", "max":"0", "enabled":true,"comment":"Minimum deposit amount is 10 USDT", "commission_desc":"0%", "currency_confirmations":2},
# {
# "type":"withdraw",
# "name":"USDT(ERC20)",
# "currency_name":"USDT",
# "min":"55",
# "max":"200000",
# "enabled":true,
# "comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales. Recommendation: Due to the high load of ERC20 network, using TRC20 address for withdrawal is recommended.",
# "commission_desc":"10 USDT",
# "currency_confirmations":6
# },
# {"type":"deposit", "name":"USDT(TRC20)", "currency_name":"USDT", "min":"10", "max":"100000", "enabled":true,"comment":"Minimum deposit amount is 10 USDT. Only TRON main network supported", "commission_desc":"0%", "currency_confirmations":2},
# {"type":"withdraw", "name":"USDT(TRC20)", "currency_name":"USDT", "min":"10", "max":"150000", "enabled":true,"comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales. Only TRON main network supported.", "commission_desc":"1 USDT", "currency_confirmations":6}
# ],
# "XLM":[
# {"type":"deposit", "name":"XLM", "currency_name":"XLM", "min":"1", "max":"1000000", "enabled":true,"comment":"Attention! A deposit without memo(invoice) will not be credited. Minimum deposit amount is 1 XLM. We do not support BSC and BEP20 network, please consider self when sending funds", "commission_desc":"0%", "currency_confirmations":1},
# {"type":"withdraw", "name":"XLM", "currency_name":"XLM", "min":"21", "max":"1000000", "enabled":true,"comment":"Caution! Do not withdraw directly to a crowdfund or ICO address, as your account will not be credited with tokens from such sales.", "commission_desc":"0.01 XLM", "currency_confirmations":1}
# ],
# }
#
result = {}
for i in range(0, len(currencyList)):
currency = currencyList[i]
currencyId = self.safe_string(currency, 'name')
name = self.safe_string(currency, 'description')
providers = self.safe_value(cryptoList, currencyId)
active = False
type = 'crypto'
limits = {
'deposit': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
}
fee = None
depositEnabled = None
withdrawEnabled = None
if providers is None:
active = True
type = 'fiat'
else:
for j in range(0, len(providers)):
provider = providers[j]
type = self.safe_string(provider, 'type')
minValue = self.safe_number(provider, 'min')
maxValue = self.safe_number(provider, 'max')
if maxValue == 0.0:
maxValue = None
activeProvider = self.safe_value(provider, 'enabled')
if type == 'deposit':
if activeProvider and not depositEnabled:
depositEnabled = True
elif not activeProvider:
depositEnabled = False
elif type == 'withdraw':
if activeProvider and not withdrawEnabled:
withdrawEnabled = True
elif not activeProvider:
withdrawEnabled = False
if activeProvider:
active = True
if (limits[type]['min'] is None) or (minValue < limits[type]['min']):
limits[type]['min'] = minValue
limits[type]['max'] = maxValue
if type == 'withdraw':
commissionDesc = self.safe_string(provider, 'commission_desc')
fee = self.parse_fixed_float_value(commissionDesc)
code = self.safe_currency_code(currencyId)
result[code] = {
'id': currencyId,
'code': code,
'name': name,
'type': type,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': fee,
'precision': 8,
'limits': limits,
'info': providers,
}
return result
async def fetch_markets(self, params={}):
response = await self.publicGetPairSettings(params)
#
# {
# "BTC_USD":{
# "min_quantity":"0.0001",
# "max_quantity":"1000",
# "min_price":"1",
# "max_price":"30000",
# "max_amount":"500000",
# "min_amount":"1",
# "price_precision":8,
# "commission_taker_percent":"0.4",
# "commission_maker_percent":"0.4"
# },
# }
#
keys = list(response.keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = response[id]
symbol = id.replace('_', '/')
baseId, quoteId = symbol.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
takerString = self.safe_string(market, 'commission_taker_percent')
makerString = self.safe_string(market, 'commission_maker_percent')
taker = self.parse_number(Precise.string_div(takerString, '100'))
maker = self.parse_number(Precise.string_div(makerString, '100'))
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': 'spot',
'spot': True,
'margin': False,
'future': False,
'swap': False,
'option': False,
'optionType': None,
'strike': None,
'linear': None,
'inverse': None,
'contract': False,
'contractSize': None,
'settle': None,
'settleId': None,
'expiry': None,
'expiryDatetime': None,
'active': True,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': self.safe_number(market, 'min_quantity'),
'max': self.safe_number(market, 'max_quantity'),
},
'price': {
'min': self.safe_number(market, 'min_price'),
'max': self.safe_number(market, 'max_price'),
},
'cost': {
'min': self.safe_number(market, 'min_amount'),
'max': self.safe_number(market, 'max_amount'),
},
},
'precision': {
'amount': 8,
'price': self.safe_integer(market, 'price_precision'),
},
'info': market,
})
return result
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV')
maxLimit = self.safe_integer(options, 'maxLimit', 3000)
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
if limit > maxLimit:
raise BadRequest(self.id + ' fetchOHLCV will serve ' + str(maxLimit) + ' candles at most')
request['from'] = int(now / 1000) - limit * duration - 1
request['to'] = int(now / 1000)
else:
request['from'] = int(since / 1000) - 1
if limit is None:
request['to'] = int(now / 1000)
else:
if limit > maxLimit:
raise BadRequest(self.id + ' fetchOHLCV will serve ' + str(maxLimit) + ' candles at most')
to = self.sum(since, limit * duration * 1000)
request['to'] = int(to / 1000)
response = await self.publicGetCandlesHistory(self.extend(request, params))
#
# {
# "candles":[
# {"t":1584057600000,"o":0.02235144,"c":0.02400233,"h":0.025171,"l":0.02221,"v":5988.34031761},
# {"t":1584144000000,"o":0.0240373,"c":0.02367413,"h":0.024399,"l":0.0235,"v":2027.82522329},
# {"t":1584230400000,"o":0.02363458,"c":0.02319242,"h":0.0237948,"l":0.02223196,"v":1707.96944997},
# ]
# }
#
candles = self.safe_value(response, 'candles', [])
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "t":1584057600000,
# "o":0.02235144,
# "c":0.02400233,
# "h":0.025171,
# "l":0.02221,
# "v":5988.34031761
# }
#
return [
self.safe_integer(ohlcv, 't'),
self.safe_number(ohlcv, 'o'),
self.safe_number(ohlcv, 'h'),
self.safe_number(ohlcv, 'l'),
self.safe_number(ohlcv, 'c'),
self.safe_number(ohlcv, 'v'),
]
def parse_balance(self, response):
result = {'info': response}
free = self.safe_value(response, 'balances', {})
used = self.safe_value(response, 'reserved', {})
currencyIds = list(free.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
if currencyId in free:
account['free'] = self.safe_string(free, currencyId)
if currencyId in used:
account['used'] = self.safe_string(used, currencyId)
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostUserInfo(params)
#
# {
# "uid":131685,
# "server_date":1628999600,
# "balances":{
# "EXM":"0",
# "USD":"0",
# "EUR":"0",
# "GBP":"0",
# },
# }
#
return self.parse_balance(response)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.publicGetOrderBook(self.extend(request, params))
result = self.safe_value(response, market['id'])
return self.parse_order_book(result, symbol, None, 'bid', 'ask')
async def fetch_order_books(self, symbols=None, limit=None, params={}):
await self.load_markets()
ids = None
if symbols is None:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
request = {
'pair': ids,
}
if limit is not None:
request['limit'] = limit
response = await self.publicGetOrderBook(self.extend(request, params))
result = {}
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
result[symbol] = self.parse_order_book(response[marketId], symbol, None, 'bid', 'ask')
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "buy_price":"0.00002996",
# "sell_price":"0.00003002",
# "last_trade":"0.00002992",
# "high":"0.00003028",
# "low":"0.00002935",
# "avg":"0.00002963",
# "vol":"1196546.3163222",
# "vol_curr":"35.80066578",
# "updated":1642291733
# }
#
timestamp = self.safe_timestamp(ticker, 'updated')
market = self.safe_market(None, market)
last = self.safe_string(ticker, 'last_trade')
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy_price'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell_price'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_string(ticker, 'avg'),
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': self.safe_string(ticker, 'vol_curr'),
'info': ticker,
}, market, False)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTicker(params)
#
# {
# "ADA_BTC":{
# "buy_price":"0.00002996",
# "sell_price":"0.00003002",
# "last_trade":"0.00002992",
# "high":"0.00003028",
# "low":"0.00002935",
# "avg":"0.00002963",
# "vol":"1196546.3163222",
# "vol_curr":"35.80066578",
# "updated":1642291733
# }
# }
#
result = {}
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = self.safe_market(marketId, None, '_')
symbol = market['symbol']
ticker = self.safe_value(response, marketId)
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
response = await self.publicGetTicker(params)
market = self.market(symbol)
return self.parse_ticker(response[market['id']], market)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "trade_id":165087520,
# "date":1587470005,
# "type":"buy",
# "quantity":"1.004",
# "price":"0.02491461",
# "amount":"0.02501426"
# },
#
# fetchMyTrades, fetchOrderTrades
#
# {
# "trade_id": 3,
# "date": 1435488248,
# "type": "buy",
# "pair": "BTC_USD",
# "order_id": 12345,
# "quantity": 1,
# "price": 100,
# "amount": 100,
# "exec_type": "taker",
# "commission_amount": "0.02",
# "commission_currency": "BTC",
# "commission_percent": "0.2"
# }
#
timestamp = self.safe_timestamp(trade, 'date')
symbol = None
id = self.safe_string(trade, 'trade_id')
orderId = self.safe_string(trade, 'order_id')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'quantity')
costString = self.safe_string(trade, 'amount')
side = self.safe_string(trade, 'type')
type = None
marketId = self.safe_string(trade, 'pair')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
takerOrMaker = self.safe_string(trade, 'exec_type')
fee = None
feeCostString = self.safe_string(trade, 'commission_amount')
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'commission_currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
feeRateString = self.safe_string(trade, 'commission_percent')
if feeRateString is not None:
feeRateString = Precise.string_div(feeRateString, '1000', 18)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': feeRateString,
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTrades(self.extend(request, params))
#
# {
# "ETH_BTC":[
# {
# "trade_id":165087520,
# "date":1587470005,
# "type":"buy",
# "quantity":"1.004",
# "price":"0.02491461",
# "amount":"0.02501426"
# },
# {
# "trade_id":165087369,
# "date":1587469938,
# "type":"buy",
# "quantity":"0.94",
# "price":"0.02492348",
# "amount":"0.02342807"
# }
# ]
# }
#
data = self.safe_value(response, market['id'], [])
return self.parse_trades(data, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# a symbol is required but it can be a single string, or a non-empty array
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument(a single symbol or an array)')
await self.load_markets()
pair = None
market = None
if isinstance(symbol, list):
numSymbols = len(symbol)
if numSymbols < 1:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a non-empty symbol array')
marketIds = self.market_ids(symbol)
pair = ','.join(marketIds)
else:
market = self.market(symbol)
pair = market['id']
request = {
'pair': pair,
}
if limit is not None:
request['limit'] = limit
response = await self.privatePostUserTrades(self.extend(request, params))
result = []
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
items = response[marketId]
trades = self.parse_trades(items, market, since, limit, {
'symbol': symbol,
})
result = self.array_concat(result, trades)
return self.filter_by_since_limit(result, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
prefix = (type + '_') if (type == 'market') else ''
orderType = prefix + side
orderPrice = price
if (type == 'market') and (price is None):
orderPrice = 0
request = {
'pair': market['id'],
# 'leverage': 2,
'quantity': self.amount_to_precision(symbol, amount),
# spot - buy, sell, market_buy, market_sell, market_buy_total, market_sell_total
# margin - limit_buy, limit_sell, market_buy, market_sell, stop_buy, stop_sell, stop_limit_buy, stop_limit_sell, trailing_stop_buy, trailing_stop_sell
'type': orderType,
'price': self.price_to_precision(symbol, orderPrice),
# 'stop_price': self.price_to_precision(symbol, stopPrice),
# 'distance': 0, # distance for trailing stop orders
# 'expire': 0, # expiration timestamp in UTC timezone for the order, unless expire is 0
# 'client_id': 123, # optional, must be a positive integer
# 'comment': '', # up to 50 latin symbols, whitespaces, underscores
}
method = 'privatePostOrderCreate'
clientOrderId = self.safe_value_2(params, 'client_id', 'clientOrderId')
if clientOrderId is not None:
clientOrderId = self.safe_integer_2(params, 'client_id', 'clientOrderId')
if clientOrderId is None:
raise BadRequest(self.id + ' createOrder client order id must be an integer / numeric literal')
else:
request['client_id'] = clientOrderId
params = self.omit(params, ['client_id', 'clientOrderId'])
if (type == 'stop') or (type == 'stop_limit') or (type == 'trailing_stop'):
stopPrice = self.safe_number_2(params, 'stop_price', 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder() requires a stopPrice extra param for a ' + type + ' order')
else:
params = self.omit(params, ['stopPrice', 'stop_price'])
request['stop_price'] = self.price_to_precision(symbol, stopPrice)
method = 'privatePostMarginUserOrderCreate'
response = await getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'order_id')
timestamp = self.milliseconds()
status = 'open'
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'remaining': amount,
'filled': 0.0,
'fee': None,
'trades': None,
'clientOrderId': clientOrderId,
'average': None,
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'order_id': id}
return await self.privatePostOrderCancel(self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': str(id),
}
response = await self.privatePostOrderTrades(self.extend(request, params))
#
# {
# "type": "buy",
# "in_currency": "BTC",
# "in_amount": "1",
# "out_currency": "USD",
# "out_amount": "100",
# "trades": [
# {
# "trade_id": 3,
# "date": 1435488248,
# "type": "buy",
# "pair": "BTC_USD",
# "order_id": 12345,
# "quantity": 1,
# "price": 100,
# "amount": 100
# }
# ]
# }
#
order = self.parse_order(response)
return self.extend(order, {
'id': str(id),
})
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': str(id),
}
response = await self.privatePostOrderTrades(self.extend(request, params))
#
# {
# "type": "buy",
# "in_currency": "BTC",
# "in_amount": "1",
# "out_currency": "USD",
# "out_amount": "100",
# "trades": [
# {
# "trade_id": 3,
# "date": 1435488248,
# "type": "buy",
# "pair": "BTC_USD",
# "order_id": 12345,
# "quantity": 1,
# "price": 100,
# "amount": 100,
# "exec_type": "taker",
# "commission_amount": "0.02",
# "commission_currency": "BTC",
# "commission_percent": "0.2"
# }
# ]
# }
#
trades = self.safe_value(response, 'trades')
return self.parse_trades(trades, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.privatePostUserOpenOrders(params)
marketIds = list(response.keys())
orders = []
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
parsedOrders = self.parse_orders(response[marketId], market)
orders = self.array_concat(orders, parsedOrders)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def parse_order(self, order, market=None):
#
# fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "order_id": "14",
# "created": "1435517311",
# "type": "buy",
# "pair": "BTC_USD",
# "price": "100",
# "quantity": "1",
# "amount": "100"
# }
#
# fetchOrder
#
# {
# "type": "buy",
# "in_currency": "BTC",
# "in_amount": "1",
# "out_currency": "USD",
# "out_amount": "100",
# "trades": [
# {
# "trade_id": 3,
# "date": 1435488248,
# "type": "buy",
# "pair": "BTC_USD",
# "order_id": 12345,
# "quantity": 1,
# "price": 100,
# "amount": 100
# }
# ]
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.safe_timestamp(order, 'created')
symbol = None
side = self.safe_string(order, 'type')
if market is None:
marketId = None
if 'pair' in order:
marketId = order['pair']
elif ('in_currency' in order) and ('out_currency' in order):
if side == 'buy':
marketId = order['in_currency'] + '_' + order['out_currency']
else:
marketId = order['out_currency'] + '_' + order['in_currency']
if (marketId is not None) and (marketId in self.markets_by_id):
market = self.markets_by_id[marketId]
amount = self.safe_number(order, 'quantity')
if amount is None:
amountField = 'in_amount' if (side == 'buy') else 'out_amount'
amount = self.safe_number(order, amountField)
price = self.safe_number(order, 'price')
cost = self.safe_number(order, 'amount')
filled = 0.0
trades = []
transactions = self.safe_value(order, 'trades', [])
feeCost = None
lastTradeTimestamp = None
average = None
numTransactions = len(transactions)
if numTransactions > 0:
feeCost = 0
for i in range(0, numTransactions):
trade = self.parse_trade(transactions[i], market)
if id is None:
id = trade['order']
if timestamp is None:
timestamp = trade['timestamp']
if timestamp > trade['timestamp']:
timestamp = trade['timestamp']
filled = self.sum(filled, trade['amount'])
feeCost = self.sum(feeCost, trade['fee']['cost'])
trades.append(trade)
lastTradeTimestamp = trades[numTransactions - 1]['timestamp']
status = self.safe_string(order, 'status') # in case we need to redefine it for canceled orders
remaining = None
if amount is not None:
remaining = amount - filled
if filled >= amount:
status = 'closed'
else:
status = 'open'
if market is None:
market = self.get_market_from_trades(trades)
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
if cost is None:
if price is not None:
cost = price * filled
else:
if filled > 0:
if average is None:
average = cost / filled
if price is None:
price = cost / filled
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_integer(order, 'client_id')
return {
'id': id,
'clientOrderId': clientOrderId,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': 'limit',
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'average': average,
'trades': trades,
'fee': fee,
'info': order,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
response = await self.privatePostDepositAddress(params)
depositAddress = self.safe_string(response, code)
address = None
tag = None
if depositAddress:
addressAndTag = depositAddress.split(',')
address = addressAndTag[0]
numParts = len(addressAndTag)
if numParts > 1:
tag = addressAndTag[1]
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
def get_market_from_trades(self, trades):
tradesBySymbol = self.index_by(trades, 'pair')
symbols = list(tradesBySymbol.keys())
numSymbols = len(symbols)
if numSymbols == 1:
return self.markets[symbols[0]]
return None
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
currency = self.currency(code)
request = {
'amount': amount,
'currency': currency['id'],
'address': address,
}
if tag is not None:
request['invoice'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ERC20>ETH alias
if network is not None:
request['transport'] = network
params = self.omit(params, 'network')
response = await self.privatePostWithdrawCrypt(self.extend(request, params))
return {
'info': response,
'id': response['task_id'],
}
def parse_transaction_status(self, status):
statuses = {
'transferred': 'ok',
'paid': 'ok',
'pending': 'pending',
'processing': 'pending',
'verifying': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchTransactions
#
# {
# "dt": 1461841192,
# "type": "deposit",
# "curr": "RUB",
# "status": "processing",
# "provider": "Qiwi(LA) [12345]",
# "amount": "1",
# "account": "",
# "txid": "ec46f784ad976fd7f7539089d1a129fe46...",
# }
#
# fetchWithdrawals
#
# {
# "operation_id": 47412538520634344,
# "created": 1573760013,
# "updated": 1573760013,
# "type": "withdraw",
# "currency": "DOGE",
# "status": "Paid",
# "amount": "300",
# "provider": "DOGE",
# "commission": "0",
# "account": "DOGE: DBVy8pF1f8yxaCVEHqHeR7kkcHecLQ8nRS",
# "order_id": 69670170,
# "provider_type": "crypto",
# "crypto_address": "DBVy8pF1f8yxaCVEHqHeR7kkcHecLQ8nRS",
# "card_number": "",
# "wallet_address": "",
# "email": "",
# "phone": "",
# "extra": {
# "txid": "f2b66259ae1580f371d38dd27e31a23fff8c04122b65ee3ab5a3f612d579c792",
# "confirmations": null,
# "excode": "",
# "invoice": ""
# },
# "error": ""
# },
#
id = self.safe_string(transaction, 'order_id')
timestamp = self.safe_timestamp_2(transaction, 'dt', 'created')
updated = self.safe_timestamp(transaction, 'updated')
amount = self.safe_number(transaction, 'amount')
if amount is not None:
amount = abs(amount)
status = self.parse_transaction_status(self.safe_string_lower(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
if txid is None:
extra = self.safe_value(transaction, 'extra', {})
extraTxid = self.safe_string(extra, 'txid')
if extraTxid != '':
txid = extraTxid
type = self.safe_string(transaction, 'type')
currencyId = self.safe_string_2(transaction, 'curr', 'currency')
code = self.safe_currency_code(currencyId, currency)
address = None
tag = None
comment = None
account = self.safe_string(transaction, 'account')
if type == 'deposit':
comment = account
elif type == 'withdrawal':
address = account
if address is not None:
parts = address.split(':')
numParts = len(parts)
if numParts == 2:
address = self.safe_string(parts, 1)
address = address.replace(' ', '')
fee = None
# fixed funding fees only(for now)
if not self.fees['funding']['percentage']:
key = 'withdraw' if (type == 'withdrawal') else 'deposit'
feeCost = self.safe_number(transaction, 'commission')
if feeCost is None:
feeCost = self.safe_number(self.options['fundingFees'][key], code)
# users don't pay for cashbacks, no fees for that
provider = self.safe_string(transaction, 'provider')
if provider == 'cashback':
feeCost = 0
if feeCost is not None:
# withdrawal amount includes the fee
if type == 'withdrawal':
amount = amount - feeCost
fee = {
'cost': feeCost,
'currency': code,
'rate': None,
}
network = self.safe_string(transaction, 'provider')
return {
'info': transaction,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'network': network,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'status': status,
'type': type,
'updated': updated,
'comment': comment,
'txid': txid,
'fee': fee,
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['date'] = int(since / 1000)
currency = None
if code is not None:
currency = self.currency(code)
response = await self.privatePostWalletHistory(self.extend(request, params))
#
# {
# "result": True,
# "error": "",
# "begin": "1493942400",
# "end": "1494028800",
# "history": [
# {
# "dt": 1461841192,
# "type": "deposit",
# "curr": "RUB",
# "status": "processing",
# "provider": "Qiwi(LA) [12345]",
# "amount": "1",
# "account": "",
# "txid": "ec46f784ad976fd7f7539089d1a129fe46...",
# },
# {
# "dt": 1463414785,
# "type": "withdrawal",
# "curr": "USD",
# "status": "paid",
# "provider": "EXCODE",
# "amount": "-1",
# "account": "EX-CODE_19371_USDda...",
# "txid": "",
# },
# ],
# }
#
return self.parse_transactions(response['history'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {
'type': 'withdraw',
}
if limit is not None:
request['limit'] = limit # default: 100, maximum: 100
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
response = await self.privatePostWalletOperations(self.extend(request, params))
#
# {
# "items": [
# {
# "operation_id": 47412538520634344,
# "created": 1573760013,
# "updated": 1573760013,
# "type": "withdraw",
# "currency": "DOGE",
# "status": "Paid",
# "amount": "300",
# "provider": "DOGE",
# "commission": "0",
# "account": "DOGE: DBVy8pF1f8yxaCVEHqHeR7kkcHecLQ8nRS",
# "order_id": 69670170,
# "extra": {
# "txid": "f2b66259ae1580f371d38dd27e31a23fff8c04122b65ee3ab5a3f612d579c792",
# "excode": "",
# "invoice": ""
# },
# "error": ""
# },
# ],
# "count": 23
# }
#
return self.parse_transactions(response['items'], currency, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
if api != 'web':
url += self.version + '/'
url += path
if (api == 'public') or (api == 'web'):
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({'nonce': nonce}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if ('result' in response) or ('errmsg' in response):
#
# {"result":false,"error":"Error 50052: Insufficient funds"}
# {"s":"error","errmsg":"strconv.ParseInt: parsing \"\": invalid syntax"}
#
success = self.safe_value(response, 'result', False)
if isinstance(success, basestring):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = None
message = self.safe_string_2(response, 'error', 'errmsg')
errorParts = message.split(':')
numParts = len(errorParts)
if numParts > 1:
errorSubParts = errorParts[0].split(' ')
numSubParts = len(errorSubParts)
code = errorSubParts[1] if (numSubParts > 1) else errorSubParts[0]
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback)
| 44.355897 | 365 | 0.488295 |
ace4f56ab12e67ac65935a86081c996c667b24f7 | 1,224 | py | Python | examples/send_heatmap_data_as_image.py | vohanhattan/real-time-streaming | d721585190acfee6945cf83b1ca0df15042f29ff | [
"MIT"
] | 7 | 2019-12-27T01:21:38.000Z | 2022-02-13T13:11:04.000Z | examples/send_heatmap_data_as_image.py | doc22940/Visualize-Realtime-Data-Stream-Chart-in-Flask | e629ca011b8d54ab1a0c74271ea238b7fe1e459e | [
"MIT"
] | 1 | 2021-10-04T21:39:47.000Z | 2021-10-04T23:41:55.000Z | examples/send_heatmap_data_as_image.py | doc22940/Visualize-Realtime-Data-Stream-Chart-in-Flask | e629ca011b8d54ab1a0c74271ea238b7fe1e459e | [
"MIT"
] | 4 | 2020-03-06T18:30:13.000Z | 2022-03-21T07:24:42.000Z | from VisClient import VisClient
import time
import base64
import random
"""
This Function is under development and is not currently working!
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cv2
pixels = np.zeros((1080, 1920))
data_heat_value = 1
conn = VisClient("127.0.0.1",12345)
print("Creating and sending heatmap to server!!!")
while True:
# Add 100 points
for i in range(0,100):
# Add random point in heatmap
pixels[random.randint(0,1080-1)][random.randint(0,1920-1)] += data_heat_value
fig, ax = plt.subplots()
im = ax.imshow(pixels)
ax.set_title("Heatmap Example")
fig.tight_layout()
fig.canvas.draw()
#plt.show()
image = np.array(fig.canvas.renderer._renderer)
#cv2.imshow("test", image)
#cv2.waitKey(1)
retval, buffer = cv2.imencode('.png', image)
encoded_string = "data:image/png;base64,"+base64.b64encode(buffer).decode()
send_string = '{"id":696969969, "value":"'+encoded_string+'", "type":"image","name":""}'
plt.close(fig)
conn.send_large(send_string)
time.sleep(2)
| 21.473684 | 92 | 0.693627 |
ace4f5c1b403d2e1395adefcac794d67e766033b | 1,117 | py | Python | nowpp/tools.py | serazing/now-postprocess | 999e0af71934b42270ffb3e445df64fbb22717c8 | [
"MIT"
] | 2 | 2021-08-25T13:30:07.000Z | 2021-12-04T15:21:21.000Z | nowpp/tools.py | serazing/now-postprocess | 999e0af71934b42270ffb3e445df64fbb22717c8 | [
"MIT"
] | null | null | null | nowpp/tools.py | serazing/now-postprocess | 999e0af71934b42270ffb3e445df64fbb22717c8 | [
"MIT"
] | null | null | null | def start_client(nci_machine, threads_per_worker=4, memory_limit=None):
import os
from dask.distributed import LocalCluster, Client
if nci_machine == 'vdi':
local_dir = "/local/e14/gs9353/dask-workers/"
cluster = LocalCluster(threads_per_worker=threads_per_worker,
processes=True, local_directory=local_dir)
client = Client(cluster)
elif nci_machine == 'gadi':
local_dir = os.path.join(os.environ['PBS_JOBFS'],
'dask-worker-space')
n_workers = int(os.environ['PBS_NCPUS']) // threads_per_worker
if memory_limit is None:
memory_limit = f'{3.9 * threads_per_worker}gb'
client = Client(n_workers=n_workers,
threads_per_worker=threads_per_worker,
processes=True,
memory_limit=memory_limit,
local_directory=local_dir)
elif nci_machine == 'rajin':
raise ValueError('Raijin has been decommited')
else:
raise ValueError('No such machine')
return client
| 44.68 | 73 | 0.605192 |
ace4f69e8b636a8905dafde16ee4a9fdb53509bc | 219 | py | Python | Python/Curso em video/ex030.py | Erick-Paulino/exercicios-de-cursos | ed78087a27da359ee79fe6dae3ffd9393e7d5873 | [
"MIT"
] | null | null | null | Python/Curso em video/ex030.py | Erick-Paulino/exercicios-de-cursos | ed78087a27da359ee79fe6dae3ffd9393e7d5873 | [
"MIT"
] | null | null | null | Python/Curso em video/ex030.py | Erick-Paulino/exercicios-de-cursos | ed78087a27da359ee79fe6dae3ffd9393e7d5873 | [
"MIT"
] | null | null | null | '''Crie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR.'''
n = int(input('digite um número: '))
if n % 2 == 0:
print(f'o número {n} é par.')
else:
print(f'o número {n} é impar.')
| 27.375 | 89 | 0.616438 |
ace4f84ea8d448d2a578777f8fe8c739b703d552 | 9,053 | py | Python | frappe/utils/__init__.py | gangadharkadam/vervefrappe | 45fdde00657dbac7667e9c5de3a2e8cd489b92f6 | [
"MIT"
] | null | null | null | frappe/utils/__init__.py | gangadharkadam/vervefrappe | 45fdde00657dbac7667e9c5de3a2e8cd489b92f6 | [
"MIT"
] | null | null | null | frappe/utils/__init__.py | gangadharkadam/vervefrappe | 45fdde00657dbac7667e9c5de3a2e8cd489b92f6 | [
"MIT"
] | 1 | 2018-03-21T20:38:32.000Z | 2018-03-21T20:38:32.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals
from werkzeug.test import Client
import os, sys, re, urllib
import frappe
# utility functions like cint, int, flt, etc.
from frappe.utils.data import *
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(user):
"""get the full name (first name + last name) of the user from User"""
if not hasattr(frappe.local, "fullnames"):
frappe.local.fullnames = {}
if not frappe.local.fullnames.get(user):
p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True)
if p:
frappe.local.fullnames[user] = " ".join(filter(None,
[p.get('first_name'), p.get('last_name')])) or user
else:
frappe.local.fullnames[user] = user
return frappe.local.fullnames.get(user)
def get_formatted_email(user):
"""get email id of user formatted as: John Doe <johndoe@example.com>"""
if user == "Administrator":
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
def extract_email_id(email):
"""fetch only the email part of the email id"""
from email.utils import parseaddr
fullname, email_id = parseaddr(email)
if isinstance(email_id, basestring) and not isinstance(email_id, unicode):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
match = re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
if not match:
return False
return match.group(0)==email.lower()
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def get_gravatar(email):
import md5
return "https://secure.gravatar.com/avatar/{hash}?d=retro".format(hash=md5.md5(email).hexdigest())
def get_traceback():
"""
Returns the traceback of the Exception
"""
import traceback
exc_type, value, tb = sys.exc_info()
trace_list = traceback.format_tb(tb, None) + \
traceback.format_exception_only(exc_type, value)
body = "Traceback (innermost last):\n" + "%-20s %s" % \
(unicode((b"").join(trace_list[:-1]), 'utf-8'), unicode(trace_list[-1], 'utf-8'))
if frappe.logger:
frappe.logger.error('Db:'+(frappe.db and frappe.db.cur_db_name or '') \
+ ' - ' + body)
return body
def log(event, details):
frappe.logger.info(details)
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
t = []
for k in args.keys():
t.append(str(k)+'='+urllib.quote(str(args[k] or '')))
return sep.join(t)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the defaults, or a value if key is passed
"""
return frappe.db.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to defaults`
"""
return frappe.db.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def pprint_dict(d, level=1, no_blanks=True):
"""
Pretty print a dictionary with indents
"""
if no_blanks:
remove_blanks(d)
# make indent
indent, ret = '', ''
for i in range(0,level): indent += '\t'
# add lines
comment, lines = '', []
kl = d.keys()
kl.sort()
# make lines
for key in kl:
if key != '##comment':
tmp = {key: d[key]}
lines.append(indent + str(tmp)[1:-1] )
# add comment string
if '##comment' in kl:
ret = ('\n' + indent) + '# ' + d['##comment'] + '\n'
# open
ret += indent + '{\n'
# lines
ret += indent + ',\n\t'.join(lines)
# close
ret += '\n' + indent + '}'
return ret
def get_common(d1,d2):
"""
returns (list of keys) the common part of two dicts
"""
return [p for p in d1 if p in d2 and d1[p]==d2[p]]
def get_common_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
ret = {}
for key in d1:
if key in d2 and d2[key]==d1[key]:
ret[key] = d1[key]
return ret
def get_diff_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
diff_keys = set(d2.keys()).difference(set(d1.keys()))
ret = {}
for d in diff_keys: ret[d] = d2[d]
return ret
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
from frappe.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError, e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print err
if out: print out
return err, out
def get_path(*path, **kwargs):
base = kwargs.get('base')
if not base:
base = frappe.local.site_path
return os.path.join(base, *path)
def get_site_base_path(sites_dir=None, hostname=None):
return frappe.local.site_path
def get_site_path(*path):
return get_path(base=get_site_base_path(), *path)
def get_files_path(*path):
return get_site_path("public", "files", *path)
def get_backups_path():
return get_site_path("private", "backups")
def get_request_site_address(full_address=False):
return get_url(full_address=full_address)
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and isinstance(d[key], unicode):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and not isinstance(d[key], unicode):
d[key] = d[key].decode(encoding, "ignore")
return d
def get_site_name(hostname):
return hostname.split(':')[0]
def get_disk_usage():
"""get disk usage of files folder"""
files_path = get_files_path()
if not os.path.exists(files_path):
return 0
err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path))
return cint(out.split("\n")[-2].split("\t")[0])
def touch_file(path):
with open(path, 'a'):
os.utime(path, None)
return True
def get_test_client():
from frappe.app import application
return Client(application)
def get_hook_method(hook_name, fallback=None):
method = (frappe.get_hooks().get(hook_name))
if method:
method = frappe.get_attr(method[0])
return method
if fallback:
return fallback
def update_progress_bar(txt, i, l):
lt = len(txt)
if lt < 36:
txt = txt + " "*(36-lt)
complete = int(float(i+1) / l * 40)
sys.stdout.write("\r{0}: [{1}{2}]".format(txt, "="*complete, " "*(40-complete)))
sys.stdout.flush()
def get_html_format(print_path):
html_format = None
if os.path.exists(print_path):
with open(print_path, "r") as f:
html_format = f.read()
for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format):
for app_name in frappe.get_installed_apps():
include_path = frappe.get_app_path(app_name, *path.split(os.path.sep))
if os.path.exists(include_path):
with open(include_path, "r") as f:
html_format = html_format.replace(include_directive, f.read())
break
return html_format
def is_markdown(text):
if "<!-- markdown -->" in text:
return True
elif "<!-- html -->" in text:
return False
else:
return not re.search("<p[\s]*>|<br[\s]*>", text)
def get_sites(sites_path=None):
import os
if not sites_path:
sites_path = '.'
return [site for site in os.listdir(sites_path)
if os.path.isdir(os.path.join(sites_path, site))
and not site in ('assets',)]
| 24.336022 | 169 | 0.660555 |
ace4fa1c63c208662a6acbb4cc6828a105498cba | 441,588 | py | Python | torch/testing/_internal/common_methods_invocations.py | daniellepintz/pytorch | 32fbeb170d57ab6a5af9ca6de23a54a6a910a433 | [
"Intel"
] | null | null | null | torch/testing/_internal/common_methods_invocations.py | daniellepintz/pytorch | 32fbeb170d57ab6a5af9ca6de23a54a6a910a433 | [
"Intel"
] | null | null | null | torch/testing/_internal/common_methods_invocations.py | daniellepintz/pytorch | 32fbeb170d57ab6a5af9ca6de23a54a6a910a433 | [
"Intel"
] | 1 | 2021-10-05T07:05:26.000Z | 2021-10-05T07:05:26.000Z | from functools import wraps, partial
from itertools import product, chain
import itertools
import collections
import copy
import operator
import random
import numbers
import unittest
import os
import torch
import numpy as np
from torch._six import inf
import collections.abc
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, Dict
from torch.testing import \
(make_non_contiguous, floating_types, floating_types_and, complex_types,
floating_and_complex_types, floating_and_complex_types_and,
all_types_and_complex_and, all_types_and, all_types_and_complex,
integral_types_and, all_types, double_types, make_tensor)
from .._core import _dispatch_dtypes
from torch.testing._internal.common_device_type import \
(onlyOnCPUAndCUDA, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver,
skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride, toleranceOverride, tol)
from torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater
from torch.testing._internal.common_utils import \
(is_iterable_of_tensors,
random_symmetric_matrix, random_symmetric_psd_matrix,
make_fullrank_matrices_with_distinct_singular_values,
random_symmetric_pd_matrix, make_symmetric_matrices,
make_symmetric_pd_matrices, random_square_matrix_of_rank,
random_fullrank_matrix_distinct_singular_value,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,
torch_to_numpy_dtype_dict, TEST_WITH_ASAN,
GRADCHECK_NONDET_TOL,)
import torch.testing._internal.opinfo_helper as opinfo_helper
from setuptools import distutils
if TEST_SCIPY:
import scipy.special
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
# Unique value to distinguish default from anything else
_NOTHING = object()
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
class SkipInfo(DecorateInfo):
"""Describes which test, or type of tests, should be skipped when testing
an operator. Any test that matches all provided arguments will be skipped.
The skip will only be checked if the active_if argument is True."""
def __init__(
self, cls_name=None, test_name=None, *, device_type=None, dtypes=None, active_if=True,
expected_failure=False):
"""
Args:
cls_name: the name of the test class to skip
test_name: the name of the test within the test class to skip
device_type: the devices for which to skip the tests
dtypes: the dtypes for which to skip the tests
active_if: whether tests matching the above arguments should be skipped
expected_failure: whether to assert that skipped tests fail
"""
decorator = unittest.expectedFailure if expected_failure else unittest.skip("Skipped!")
super().__init__(decorators=decorator, cls_name=cls_name, test_name=test_name,
device_type=device_type, dtypes=dtypes, active_if=active_if)
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
# op with TensorList inputs do not support method or inplace variants.
assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)
self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
def numpy(self):
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Numbers, strings, and bool are preserved as is
# Lists, tuples and dicts are handled by calling this function recursively
def to_numpy(x):
def _np(t):
return t.detach().cpu().numpy()
if isinstance(x, torch.Tensor):
return _np(x)
elif isinstance(x, list):
return list(map(to_numpy, x))
elif isinstance(x, tuple):
return tuple(map(to_numpy, x))
elif isinstance(x, dict):
return {k: to_numpy(v) for k, v in x.items()}
elif isinstance(x, (numbers.Number, bool, str)):
return x
raise ValueError("Unknown type {0}!".format(type(x)))
sample_np_input, np_args, np_kwargs = to_numpy(self.input), to_numpy(self.args), to_numpy(self.kwargs)
return (sample_np_input, np_args, np_kwargs)
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# This note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do two things:
#
# 1) to simplify testing an operator
# 2) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# Both these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests still have to be written manually.
#
# The utility of OpInfos can also be motivated from a different perspective.
# PyTorch is a complicated framework with many interrelated systems, too
# many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo. This is a helpful perspective to have, because it's often
# surprising to OpInfo writers that just implementing an OpInfo typically can't
# verify an operator is actually implemented correctly. "If an OpInfo doesn't
# validate my op works as expected, what's the point of it?" But the point of
# it is that it lets engineers focus on testing their operator logic instead
# of having to write tests for how the operator interacts with each of
# PyTorch's many systems. And, OK, sometimes it validates your op works
# the way you want and all you have to do is write an OpInfo and you're done
# testing... more on that below.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return a list of SampleInputs (see the class description above).
# Each SampleInput defines an "input", "args", "kwargs",
# an "output_process_fn_grad" function, the "broadcasts_input" bool and
# a "name".
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to the torch, torch.fft, torch.linalg,
# or torch.special namespaces then you should add an OpInfo for it. As
# mentioned a couple times above, implementing an OpInfo is not usually
# sufficient testing (unless the operator is a unary elementwise operator).
# The OpInfo will only test the properties described in the "WHAT'S TESTED"
# section. It DOES NOT verify that the operator is implemented correctly.
#
# We are currently reviewing if operators in the torch.nn.functional namespace
# will be added as OpInfos, but you are encouraged to add an OpInfo for
# such operators, too.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach can be frustrating to writing an OpInfo can
# be frustrating, but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in opinfo_helper.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve, particularly for the
# torch, torch.fft, torch.linalg, and torch.special namespaces, and possibly
# for the torch.nn.functional namespace, too. In addition an analogous class,
# ModuleInfo, will be developed to improve module testing.
#
# We also expect at least two new OpInfo subclasses: BinaryUfuncInfo and
# ReductionInfo. Both will have new automated tests for correctness, too,
# which might make testing binary elementwise operations and reductions as
# simple as testing unary elementwise operations today.
# Classes and methods for the operator database
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
def __init__(self,
name, # the string name of the function
*,
ref=None, # An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
# the following metadata describes the operator, its variants,
# and its aliases, if any
aliases=None, # iterable of aliases, e.g. ("absolute",) for torch.abs
variant_test_name='', # additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
op=None, # the function variant of the operation, populated as torch.<name> if None
method_variant=_NOTHING, # explicitly specifies the method variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
# the following metadata are test directives for skipping or
# modifying tests and a pointer to the op's sample inputs function
# this function lets the OpInfo generate valid inputs
skips=tuple(), # information about which tests to skip
decorators=tuple(), # decorators to apply to generated tests
sample_inputs_func=None, # function to generate sample inputs
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
dtypes=floating_types(), # dtypes this function is expected to work with
# the following dtypesIf... options override the dtypes value
# on their respective device types
dtypesIfCPU=None, # dtypes this function is expected to work with on CPU
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
backward_dtypes=None, # backward dtypes this function is expected to work with
backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU
backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA
backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM
default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with
# these dtypes for the op unless otherwise specified.
# This is helpful in reducing the test matrix.
# the following metadata describes the operators out= support
supports_out=True, # whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments
# the following metadata relates to autograd support
supports_autograd=True, # whether the operation supports gradient computations
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_gradgrad=True, # whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# (this value is ignored if supports_autograd=False)
supports_inplace_autograd=None, # whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_forward_ad=False, # Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck
check_batched_grad=True, # whether to check batched grad when doing gradcheck
check_batched_gradgrad=True, # whether to check batched grad grad when doing gradgradcheck
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
aten_name=None, # name of the corresponding aten:: operator
assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed
autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
# the following metadata relates to sparse support and is used in test_sparse.py
supports_sparse=False, # whether the op supports sparse inputs
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples=True,
test_neg_view=True,
assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape
):
dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
self.name = name
self.ref = ref
self.aten_name = aten_name if aten_name is not None else name
self.variant_test_name = variant_test_name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes
self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCPU if dtypesIfCPU is not None
else dtypes)
self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (
backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None
else backward_dtypes if backward_dtypes is not None
else dtypesIfROCM if dtypesIfROCM is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes
self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA
self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
self.op = op if op else _getattr_qual(torch, self.name)
method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant
# attributes like real, imag are not callable
self.method_variant = method_variant if callable(method_variant) else None
inplace_name = name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \
if inplace_variant is _NOTHING else inplace_variant
self.operator_variant = getattr(operator, name, None)
self.supports_out = supports_out
self.safe_casts_outputs = safe_casts_outputs
self.decorators = (*decorators, *skips)
self.sample_inputs_func = sample_inputs_func
self.assert_autodiffed = assert_autodiffed
self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []
if autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
else:
self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes
# autograd support
self.supports_autograd = supports_autograd
self.supports_inplace_autograd = supports_inplace_autograd
if self.supports_inplace_autograd is None:
self.supports_inplace_autograd = supports_autograd
self.gradcheck_wrapper = gradcheck_wrapper
self.supports_gradgrad = supports_gradgrad
self.supports_forward_ad = supports_forward_ad
self.check_batched_grad = check_batched_grad
self.check_batched_gradgrad = check_batched_gradgrad
self.gradcheck_nondet_tol = gradcheck_nondet_tol
self.gradcheck_fast_mode = gradcheck_fast_mode
self.supports_sparse = supports_sparse
self.aliases = ()
if aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]
self.assert_jit_shape_analysis = assert_jit_shape_analysis
self.test_conjugated_samples = test_conjugated_samples
self.test_neg_view = test_neg_view
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator_variant(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
with torch.no_grad():
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i in range(len(samples)):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
with torch.no_grad():
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypesIfCPU
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypesIfCPU
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_complex_autograd(self, device_type):
if device_type == 'cpu':
return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)
if device_type == 'cuda':
if TEST_WITH_ROCM:
return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)
else:
return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)
else:
return any(dtype.is_complex for dtype in self.backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
def default_test_dtypes(self, device_type):
"""Returns the default dtypes used to test this operator on the device.
Equal to the operator's default_test_dtypes filtered to remove dtypes
not supported by the device.
"""
supported = self.supported_dtypes(device_type)
return (supported if self._default_test_dtypes is None
else supported.intersection(self._default_test_dtypes))
def _generate_reduction_inputs(device, dtype, requires_grad):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], device, dtype, requires_grad=requires_grad)
yield make_tensor([2], device, dtype, requires_grad=requires_grad)
yield make_tensor([2, 3], device, dtype, requires_grad=requires_grad, noncontiguous=True)
yield make_tensor([3, 2, 1, 5], device, dtype, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.genearte_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
inputs: List[SampleInput] = []
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
inputs.append(SampleInput(t, args=args, kwargs=kwargs))
return inputs
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by skipping the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
return sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
super(ReductionOpInfo, self).__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
def sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
return (SampleInput(make_tensor((L,), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)))
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=None,
dtypesIfCUDA=None,
dtypesIfROCM=None,
default_test_dtypes=(
torch.uint8, torch.long, torch.half, torch.bfloat16,
torch.float32, torch.cfloat), # dtypes which tests check by default
domain=(None, None), # the [low, high) domain of the function
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
handles_extremals=True, # whether the op correctly handles extremal values (like inf)
handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
supports_sparse=False,
**kwargs):
super(UnaryUfuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
default_test_dtypes=default_test_dtypes,
sample_inputs_func=sample_inputs_func,
supports_sparse=supports_sparse,
**kwargs)
self.ref = ref
self.domain = domain
self.handles_large_floats = handles_large_floats
self.handles_extremals = handles_extremals
self.handles_complex_extremals = handles_complex_extremals
self.supports_complex_to_float = supports_complex_to_float
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
args_cases = (
# Cases with tensor indices.
(torch.tensor([1, 2, 3]),),
(torch.tensor(1),),
(torch.tensor([1, 2, 3]), 1),
(torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),
# Cases with list of indices.
((2, 4),),
((2, 4), 1),
((2, 4), -1),
# Cases with integer section.
(3,),
(3, 1),
(3, -1),
)
def generator():
for args in args_cases:
yield SampleInput(make_input((S, S, S)), args=args)
return list(generator())
def sample_inputs_linalg_det(op_info, device, dtype, requires_grad):
kw = dict(device=device, dtype=dtype)
inputs = [
make_tensor((S, S), **kw),
make_tensor((1, 1), **kw), # 1x1
random_symmetric_matrix(S, **kw), # symmetric
random_symmetric_psd_matrix(S, **kw), # symmetric_psd
random_symmetric_pd_matrix(S, **kw), # symmetric_pd
random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null
random_square_matrix_of_rank(S, 1, **kw), # rank1
random_square_matrix_of_rank(S, 2, **kw), # rank2
random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value
make_tensor((3, 3, S, S), **kw), # batched
make_tensor((3, 3, 1, 1), **kw), # batched_1x1
random_symmetric_matrix(S, 3, **kw), # batched_symmetric
random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd
random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd
random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values
make_tensor((0, 0), **kw),
make_tensor((0, S, S), **kw),
]
for t in inputs:
t.requires_grad = requires_grad
return [SampleInput(t) for t in inputs]
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank <= size[-1]
with torch.no_grad():
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs = x.lu()
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
assert (matrix.det().abs() < torch.finfo(dtype).eps * torch.linalg.matrix_norm(matrix)).all().item()
matrix.requires_grad_(requires_grad)
return matrix
def sample_generator():
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield make_singular_matrix_batch_base(shape, rank)
return [SampleInput(t) for t in sample_generator()]
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
inputs = []
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(n,)))
for n in [-4, -2, -1]:
t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)
t.requires_grad = requires_grad
inputs.append(SampleInput(t, args=(n,)))
return inputs
def sample_inputs_hsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6,), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_vsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_dsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),
SampleInput(make_tensor((S, S, 6), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),)
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2]
]
result = []
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
tensors.append(t)
result.append(SampleInput(tensors))
return result
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((2, 2), (2, 3, 2))
ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
inputs: List[SampleInput] = []
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(ord, dim, keepdim)))
return inputs
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input_shape, dict of dim and eps
cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S), {'dim': 1}),
((S, 2), {'dim': -1}),
((S,), {'dim': 0, 'eps': 0.5}),
((), {'dim': 0}),
((S, S, M), {'dim': 2}),
((S, S), {})
)
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)
# Test for Broadcasting
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
return list(generator())
def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
yield SampleInput(make_arg(shape))
return list(generator())
def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (2,), '2'),
((S, S), (0,), '0'),
((S, S), (0.5,), '0_5'),
((S, S), (1,), '1'),
((S, S), (3,), '3'),
((S, S), (-1,), 'neg_1'),
((S, S), (-2,), 'neg_2'),
((S, S), (-0.5,), 'neg_0_5'),
((S, S), (-1.5,), 'neg_1_5'),
)
cases_nonzero_input = (
((S, S, S), (1.5,), '1_5_default'),
((S, S, S), (1.5, 1), '1_5_dim'),
((S, S, S), (1.5, -1), '1_5_neg_dim'),
((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),
((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),
)
cases_negdim_base = (
((S, S), (-2, 1,), 'neg_2_2_dim'),
((S, S), (-1, 1,), 'neg_1_2_dim'),
((S, S), (0, 1,), '0_2_dim'),
((S, S), (1, 1,), '1_2_dim'),
((S, S), (2, 1,), '2_2_dim'),
((S, S), (3, 1,), '3_2_dim'),
((S, S, S), (2, 1), '2_dim'),
((S, S, S), (3, 1), '3_dim'),
((S, S, S), (2, 1, True), 'keepdim_2_dim'),
((S, S, S), (3, 1, True), 'keepdim_3_dim'),
((), (2, 0), '2_dim_scalar'),
((), (3, 0), '3_dim_scalar'),
((), (2, 0, True), 'keepdim_2_dim_scalar'),
((), (3, 0, True), 'keepdim_3_dim_scalar'),
)
cases_negdim = []
for case in cases_negdim_base:
cases_negdim.append(case)
shape, args, name = case
new_args = copy.deepcopy(list(args))
new_args[1] *= -1
cases_negdim.append((shape, tuple(new_args), name.replace("_dim", "_neg_dim")))
def generator():
for shape, args, name in itertools.chain(cases, cases_negdim):
yield SampleInput(make_arg(shape), args=args, name=name)
for shape, args, name in cases_nonzero_input:
yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)
return list(generator())
def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (), 'default'),
((S, S), ('fro',), 'fro_default'),
((S, S), ('fro', [0, 1],), 'fro'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), ('nuc',), 'nuc'),
((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (-inf,), '-inf'),
((S, S), (inf,), 'inf'),
((S, S), (inf, 1,), 'inf_2_dim'),
((S, S), (inf, -1,), 'inf_2_neg_dim'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):
size_1D = (S,)
size_2D = (2, 2)
test_cases = [
# input size, ord, dim args
(size_1D, 2, None),
(size_1D, 2, (0,)),
(size_1D, 0, None),
(size_1D, 0, (0,)),
(size_1D, 0.9, None),
(size_1D, 0.9, (0,)),
(size_1D, 1, None),
(size_1D, 1, (0,)),
(size_1D, -2.1, None),
(size_1D, -2.1, (0,)),
(size_1D, inf, None),
(size_1D, inf, (0,)),
(size_1D, -inf, None),
(size_1D, -inf, (0,)),
(size_2D, 2, None),
(size_2D, 2, (0,)),
(size_2D, 2, (-1, 0)),
(size_2D, 0, None),
(size_2D, 0, (0,)),
(size_2D, 0, (-1, 0)),
(size_2D, 0.9, None),
(size_2D, 0.9, (0,)),
(size_2D, 0.9, (-1, 0)),
(size_2D, 1, None),
(size_2D, 1, (0,)),
(size_2D, 1, (-1, 0)),
(size_2D, -2.1, None),
(size_2D, -2.1, (0,)),
(size_2D, -2.1, (-1, 0)),
(size_2D, inf, None),
(size_2D, inf, (0,)),
(size_2D, inf, (-1, 0)),
(size_2D, -inf, None),
(size_2D, -inf, (0,)),
(size_2D, -inf, (-1, 0)),
]
inputs = []
for test_size, ord, dim in test_cases:
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim,
dim=dim)))
return inputs
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self, name, *, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None, **kwargs):
super().__init__(name, **kwargs)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
def _resolve_binay_pwise_kwargs(
op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None
):
"""Resolves default values for :func:`sample_inputs_binary_pwise`.
By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty
dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`
and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.
"""
if op_kwargs is None:
op_kwargs = {}
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs
def sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
*,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binay_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
scalar = make_tensor((), device=device, dtype=dtype, **rhs_make_tensor_kwargs)
if python_scalars:
scalar = scalar.item() # type: ignore[assignment]
shapes = [
((), scalar),
((S,), scalar),
((S, 1), (S,)),
((M, S), scalar),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
]
sample_inputs = []
for shape_lhs, shape_rhs_or_scalar in shapes:
lhs = make_tensor(
shape_lhs,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**lhs_make_tensor_kwargs,
)
if isinstance(shape_rhs_or_scalar, tuple):
# shape
rhs = make_tensor(
shape_rhs_or_scalar,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**rhs_make_tensor_kwargs,
)
broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs
else:
# scalar
rhs = shape_rhs_or_scalar # type: ignore[assignment]
broadcasts_input = False
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input))
return sample_inputs
def sample_inputs_add_sub(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
alpha=1,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binay_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False))
return sample_inputs
def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((1, 2))),
SampleInput(make_arg((2,))),
SampleInput(make_arg(())))
def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):
first_shape, second_shape = (S, M), (M, S)
sample_inputs = []
sample_inputs.append(
SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),)))
if dtype.is_complex:
sample_inputs.append(
SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(
make_tensor(second_shape, device, dtype,
requires_grad=requires_grad).conj(),)))
sample_inputs.append(
SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad).transpose(0, 1),
args=(
make_tensor(second_shape, device, dtype,
requires_grad=requires_grad).transpose(0, 1).conj(),)))
return sample_inputs
def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)
tests_list = [
((2, 3), (2, 2), (2, 3), False)
]
tests_with_lhs_broadcasting = [
((1,), (2, 2), (2, 3), True),
((), (2, 2), (2, 3), True)
]
test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]
sample_inputs = []
for shape_a, shape_b, shape_c, broadcasts_input in test_cases:
sample_inputs.append(
SampleInput(
make_tensor(shape_a, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape_b, device, dtype,
requires_grad=requires_grad),
make_tensor(shape_c, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shape = (3, 3)
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj(),
make_tensor(shape, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad),
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj()),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
return sample_inputs
def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):
sample_inputs = []
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
))
if dtype.is_complex:
# dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)
# is tested in test_conj_view (which tests operations with only conjugated input tensor
# -- not conjugated arg tensors)
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
torch.conj(make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
)
))
return sample_inputs
def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (((S,), (S, M), (M,), 1, 1, False),
((S,), (S, M), (M,), 0.2, 0.6, False),
)
test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),
((1,), (S, M), (M,), 0.2, 0.6, True),
((), (S, M), (M,), 1, 1, True),
((), (S, M), (M,), 0.2, 0.6, True),
)
cases = test_cases + test_cases_with_broadcast
def generator():
# addmv performs: beta * M + alpha * (mat @ vec)
for M, mat, vec, beta, alpha, broadcasts_input in cases:
yield SampleInput(make_arg(M), args=(make_arg(mat), make_arg(vec)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting
test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
def generator():
for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:
if dtype.is_complex:
beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)
return list(generator())
def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [(((S, S), (S, S), (S, S)), False),
(((S, S), (S, 1), (1, S)), False),
(((1,), (S, S, 1), (1, S)), True),
(((), (), ()), False),
(((S, S), (), ()), True),
(((), (S, S, 1), (1, S)), True)
]
sample_inputs = []
for input_args, broadcasts_input in test_cases:
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(args[0], args=args[1:], broadcasts_input=broadcasts_input))
sample_inputs.append(SampleInput(args[0], args=args[1:], kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
sample_inputs = []
for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:
args = (make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch1_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch2_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))
if dtype.is_complex:
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shapes = [(S, S, S), (S, M, S), (S, S, M)]
args = (make_tensor(shapes[0], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[1], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[2], device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(
SampleInput(
args[0].transpose(-1, 1), args=(args[1].transpose(-1, 1).conj(), args[2].transpose(-1, 1).conj()),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))
return tuple(sample_inputs)
def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):
input1 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))
input2 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
broadcasts_input=True)
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
input3 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
input4 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha),
broadcasts_input=True)
return (input1, input2, input3, input4)
def sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_xlog1py(self, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
# same shape
yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))
# rhs broadcast
yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))
# all zero `x`
with torch.no_grad():
x = make_arg((S, S))
x.fill_(0)
yield SampleInput(x, args=(make_arg((S, S), low=-1),))
# randomly zero-masked `x`
x = make_arg((S, S))
y = make_arg((S, S), low=-1)
with torch.no_grad():
x[torch.rand(x.shape) > 0.5] = 0
yield SampleInput(x, args=(y,))
# Scalar x
# `input` has to be a tensor
# yield SampleInput(0, args=(make_arg((S, S), low=-1),))
# yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))
# Scalar y
yield SampleInput(make_arg((S, S)), args=(-0.5,))
yield SampleInput(make_arg((S, S)), args=(1.2,))
return list(generator())
def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = ((), (S, S, S), (S,))
def generator():
for shape in cases:
yield(SampleInput(make_arg(shape)))
return list(generator())
def sample_inputs_logsumexp(self, device, dtype, requires_grad):
inputs = (
((), (0,), True),
((S, S), (1,), True),
((S, S), (1,), False)
)
samples = []
for shape, dim, keepdim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim, keepdim)))
return tuple(samples)
def sample_inputs_logcumsumexp(self, device, dtype, requires_grad):
inputs = (
((S, S, S), 0),
((S, S, S), 1),
((), 0),
)
samples = []
for shape, dim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim,)))
return tuple(samples)
def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2, 1, 0.5)),
((S, S, S), (2, -1, 0.5)),
((S, S, S), (1, 2, 3)),
((S, S, S), (float('inf'), 2, 0.5)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((1, 2, 3), (-1, -2)),
((1, 2, 3), (-1, 2)),
((1, 2, 3), (1, -2)),
((1, 2, 3), (1, 2)),
((), (0, 0)),
((1, ), (0, 0)),
((M, M), (0, 1)),
((S, S, S), (2, 0)), )
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always invertible input for linear algebra ops using
random_fullrank_matrix_distinct_singular_value.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# autograd is not supported for inputs with zero number of elements
shapes = ((S, S),
(2, S, S),
(2, 1, S, S), )
def generator():
for shape in shapes:
yield SampleInput(make_arg(shape))
return list(generator())
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(
SampleInput(
make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(shape,)) for size, shape in test_cases)
def sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
(S, S, S),
(S,),
(),
)
sample_inputs = []
for size in test_cases:
tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)
tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)
sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))
sample_inputs.append(SampleInput(tensor1, args=(2,)))
return tuple(sample_inputs)
def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):
small_S = 2
test_cases = (
((S, S, 2), (S, S + 1, 2)),
((S, S), (S, S)),
((S, S, S), (S, S, S)),
((3, 5), (3, 5)),
((2, 3, 5), (2, 3, 5)),
((1, 2, 3), (1, 2, 3)),
((1, 1), (S, 1)),
((0, 5), (4, 5)),
((4, 5), (0, 5)),
((0, 4, 5), (3, 5)),
((4, 5), (0, 3, 5)),
((0, 4, 5), (1, 3, 5)),
((1, 4, 5), (0, 3, 5)),
# Using S here would make this one test take 9s
((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),
((small_S, 1, 1, small_S), (1, small_S, small_S)),
((1, 1, small_S), (small_S, 1, small_S, small_S)),
)
samples = []
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
# FIXME add an override for JIT and revert 0. back to 0
# since it's accepted by eager
for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]:
for t1_size, t2_size in test_cases:
# The args should never be non-contiguous as this is not supported in the backward
samples.append(SampleInput(
make_tensor(t1_size, device, dtype, requires_grad=requires_grad, noncontiguous=False),
args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad, noncontiguous=False), p, cm)))
return samples
def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
cases = (((S, S, S), (1,)),
((), (1,)),
# For requires_grad=False below,
# check https://github.com/pytorch/pytorch/issues/59137
((S, S, S), (make_arg((), requires_grad=False),)))
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, S, S), (S, S, S), False),
((S, S, S), (), False),
((S, S, S), (1,), False),
((S,), (1,), False),
((), (), False),
)
test_cases_lhs_broadcasting = (
((S, 1, S), (S, S, S), True),
((1,), (S, S, S), True),
((1, S), (1, 1, S), True),
((), (0,), True),
((), (S, S, S), True),
)
cases = test_cases + test_cases_lhs_broadcasting
sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
broadcasts_input=broadcasts_input)
for first_shape, second_shape, broadcasts_input in cases)
equal_tensors_non_bool = (
([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),
([[[6, 5]], [[1, -5]]]),
([[2], [-1]]),
([0, -6]),
([3],),
)
equal_tensors_bool = (
([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),
([[[1, 1]], [[1, 0]]]),
([[1], [0]]),
([0, 1]),
([1],),
)
more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool
more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),
args=(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),))
for elements in more_cases)
sample_inputs = [*sample_inputs, *more_inputs]
return tuple(sample_inputs)
def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors, args=(0,)),)
def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]
((S, S), (S, S), {'dim': -1}),
((S, S), (S, S), {'dim': 1}),
((M, S), (S, S), {'dim': 0}), # different shapes
((1, 2, 3), (1, 2, 3), {'dim': -2}),
((0,), (0,), {'dim': 0}), # empty tensor
((0, S), (S, S), {'dim': 0}),
((1,), (1,), {}) # dim not passed, fallback to default
)
def generator():
for input_shape1, input_shape2, kwargs in cases:
yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)
return list(generator())
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors),)
def sample_inputs_hypot(op_info, device, dtype, requires_grad):
input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
return (
SampleInput(input, args=(args,)),
)
def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S), 1, S, True, device=device), 0)),
# `indices` broadcast
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),
# `self` broadcast
SampleInput(make_tensor((1, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),
# without `dim` arg
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), )),
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device),)),
)
def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):
test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S, S), {}),
((S, S, S), {'dim': 1}),
((S, S, S), {'dim': 1, 'keepdim': True}),
((), {'dim': 0}),
((), {}),
((), {'dim': 0, 'keepdim': True}),
)
samples: List[SampleInput] = []
for shape, kwargs in test_cases:
samples.append(SampleInput(
make_tensor(shape, device, dtype, requires_grad=requires_grad),
kwargs=kwargs))
return samples
def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((S, S, S), 1, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
args = (make_tensor(size, device, dtype,
low=None, high=None,
requires_grad=requires_grad), 1, dim,
make_tensor(size_prepend, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_prepend else None,
make_tensor(size_append, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_append else None)
sample_inputs.append(SampleInput(args[0], args=args[1:]))
return tuple(sample_inputs)
def sample_inputs_histogram(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):
input_tensor = make_arg(size)
weight_tensor = make_arg(size) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = make_arg((bin_ct + 1,))
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_gradient(op_info, device, dtype, requires_grad):
sample_inputs = []
test_cases_float = (
((S,), None, None, 1),
((S,), 2., None, 1),
((S, S), None, None, 2),
((S, S), [2.0, 2.1], None, 1),
((S, S), [2.0, 2.1], (0, 1), 1),
((4, 4, 4), [2., 1.], (0, 1), 2),
)
for size, spacing, dim, edge_order in test_cases_float:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))
test_cases_tensor = (
((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),
((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),
)
for size, coordinates, dim, edge_order in test_cases_tensor:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
coordinates_tensor_list = []
for coords in coordinates:
a = torch.tensor(coords, dtype=dtype, device=device)
coordinates_tensor_list.append(a)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))
return tuple(sample_inputs)
def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, index_variable(2, S, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
test_args = [
([1, 2],),
(slice(0, 3),),
([slice(0, 3), 1],),
([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),
([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),
([slice(None), slice(None), [0, 3]],),
([slice(None), [0, 3], slice(None)],),
([[0, 3], slice(None), slice(None)],),
([[0, 3], [1, 2], slice(None)],),
([[0, 3], ],),
([[0, 3], slice(None)],),
([[0, 3], Ellipsis],),
([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),
(index_variable(2, S, device=device),),
(mask_not_all_zeros((S,)),),
]
return tuple(SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=args)
for args in test_args)
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for accumulate in [False, True]:
# Test with indices arg
inputs.append(SampleInput(
make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(index_variable(2, S, device=device), ),
make_tensor((2, S), device, dtype, low=None, high=None)),
kwargs=dict(accumulate=accumulate)))
# Test with mask arg
mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))
inputs.append(SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(mask, ),
make_tensor((S,), device, dtype, low=None, high=None),),
kwargs=dict(accumulate=accumulate)))
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):
# These testa are pretty much the same as those from index_copy.
# Perhaps merge?
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# non-contiguous target
t_nonctg = t.transpose(0, 1)
# non-contiguous source
s_nonctg = s.transpose(0, 1)
idx = make_arg((S,), dtype=torch.int64, low=0, high=S)
idx_nonctg = make_arg((S,), dtype=torch.int64, low=0, high=S, noncontiguous=True)
samples = [SampleInput(tensor, args=(1, idx, source))
for tensor, idx, source in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg])]
samples.extend(SampleInput(tensor, args=(1, idx, source), kwargs=dict(alpha=a))
for tensor, idx, source, a in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg], [-1, 0, 2]))
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
samples.extend(SampleInput(t, args=(0, idx, s), kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))
return samples
def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def small_3d_unique(dtype, device):
res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)
res = res.to(dtype)
apply_grad(res)
return res
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
samples.append(largesample)
# Test cases for small 3d tensors.
# Imitates legacy tests from test/test_torch.py
t = small_3d_unique(dtype, device)
dims = range(-3, 3)
flag = [True, False]
for dim, descending, stable in product(dims, flag, flag):
# default schema without stable sort
samples.append(SampleInput(t, args=(dim, descending)))
# schema with stable sort, no CUDA support yet
if torch.device(device).type == 'cpu':
samples.append(
SampleInput(t, kwargs=dict(dim=dim, descending=descending, stable=stable))
)
# Test cases for scalar tensor
scalar = torch.tensor(1, dtype=dtype, device=device)
apply_grad(scalar)
samples.append(SampleInput(scalar))
samples.append(SampleInput(scalar, args=(0,)))
samples.append(SampleInput(scalar, args=(0, True)))
# Test cases for stable sort
samples.append(SampleInput(scalar, kwargs=dict(stable=True)))
samples.append(SampleInput(scalar, kwargs=dict(dim=0, stable=True)))
samples.append(SampleInput(scalar, kwargs=dict(dim=0, descending=True, stable=True)))
return samples
def sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):
samples = []
t = make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)
fill_val = torch.tensor(-1 + 1j if t.is_complex() else -1)
# non-contiguous input
t01 = t.transpose(0, 1)
t02 = t.transpose(0, 2)
t12 = t.transpose(1, 2)
idx = index_variable(1, S, device=device)
# non-contiguous index
idx_nonctg = torch.empty_strided((S,), (2,), device=device, dtype=torch.int64)
idx_nonctg.copy_(idx)
for d in range(t.dim()):
for tensor in [t, t01, t02, t12]:
samples.append(SampleInput(tensor, args=(d, idx, fill_val)))
samples.append(SampleInput(tensor, args=(d, -idx - 1, fill_val)))
samples.append(SampleInput(tensor, args=(d, idx_nonctg, fill_val)))
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
index_tensor = partial(torch.tensor, device=device, dtype=torch.long)
def unique_idx(numel, max_idx):
# Generate unique random indices vector of `numel`
# elements in range [0, max_idx).
indices = random.sample(range(max_idx), numel)
return index_tensor(indices)
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))
# Duplicate indices
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))
return samples
def sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_binary_op = (
((S, S, S), (S, S, S),),
((S, S, S), (S,),),
((S,), (S, S, S),),
((S, 1, S), (S, S),),
((S, S), (S, S),),
((), (),),
((S, S, S), (),),
((), (S, S, S),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(make_tensor(other_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),),))
for input_tensor, other_tensor in args_for_binary_op)
return inputs
def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((1, 8, 8, 8), (5, 7)),
((2, 8, 8, 8), (None, 7)),
((1, 8, 4, 3), (5, None)),
((1, 8, 4, 3), (None, None)),
((1, 8, 4, 3), (5)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((2, 1, 4, 5), {'p': 1., 'dim': 2}),
((2, 3, 4, 5), {'p': 2., 'dim': 1}),
((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),
((1, 3, 4, 5), {'p': -1., 'dim': 1}),
((1, 3, 4, 5), {'p': 0., 'dim': -1}),
((), {'p': 1.2, 'dim': 0}),
((2, 3, 4, 5), {}),
((2, 3, 4, 5), {'eps': 1e-4}))
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),
((2, 2, 4, 4), (2, 2, 4, 5), (4,),
{'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),
((1, 1, 4, 5), (1, 1, 4, 3), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 1, 4, 3), (1, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5), (4, 8, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, normalized_shape and a kwarg dict for eps
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 2, 3), (1, 2, 3), {'eps': 0.5}),
((2, 2, 3), (2, 3), {'eps': -0.5}),
((1,), (1,), {}),
((1, 2), (2,), {}),
((0, 1), (1,), {}),
)
def generator():
for input_shape, normalized_shape, kwargs in cases:
# Shape of weight and bias should be the same as normalized_shape
weight = make_arg(normalized_shape)
bias = make_arg(normalized_shape)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, bias),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=((2,),))
# TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,
# enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400
# With weight and a `None` bias
# yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))
# With `None` weight and bias (tests failing for this, see the link above)
# yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))
return list(generator())
def sample_inputs_hardswish(self, device, dtype, requires_grad):
N = 5
# make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]
return tensors
def sample_inputs_interpolate(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
align_corners_options: Tuple[Any, ...] = (None,)
if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):
align_corners_options = (True, False, None)
ranks_for_mode = {
'nearest': [1, 2, 3],
'linear': [1],
'bilinear': [2],
'bicubic': [2],
'trilinear': [3],
'area': [1, 2, 3]
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
args=(shape(S, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(shape(L, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 1.7, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 0.6, mode, align_corners)),
])
return sample_inputs
def sample_inputs_gelu(self, device, dtype, requires_grad):
N = 5
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-3, high=3)) for _ in range(1, N)]
return tensors
def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_reduction_with_dim = (
((S, S, S), (1,),),
((S, S, S), (1, True, ),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args_for_reduction_with_dim)
return inputs
def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
inputs.append(SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
return inputs
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):
test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))
test_interpolations = ['linear', 'midpoint']
inputs = []
for quantiles in test_quantiles:
for t in _generate_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t, args=(quantiles,)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):
# Interpolation kwarg for now is only supported when providing both dim and keepdim
kwargs.setdefault('dim', 0)
kwargs.setdefault('keepdim', False)
for interpolation in test_interpolations:
kwargs['interpolation'] = interpolation
inputs.append(SampleInput(t, args=(quantiles,), kwargs=kwargs))
return inputs
def sample_inputs_reduction_count_nonzero(*args, **kwargs):
"""Sample inputs for count_nonzero"""
samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)
# count_nonzero does not support keepdim yet
for sample in samples:
sample.kwargs.pop('keepdim', None)
return samples
def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),
((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),
((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),
((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),
((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),
((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))
def generator():
for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:
yield SampleInput(make_arg(input_shape),
args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))
# Case with just input_shape and kernel_size
yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))
return list(generator())
def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):
def get_tensor_input(size):
return make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs = []
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1,)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))
return inputs
def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)
arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(arg_a, args=(arg_b,)))
return inputs
def sample_inputs_dist(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))
ps = (2, 4)
def generate_samples():
for size_x, size_y, p in product(sizes, sizes, ps):
yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))
return list(generate_samples())
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape, low=None, high=None, dtype=dtype):
return make_tensor(shape, device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# non-contiguous input
t01 = t.transpose(0, 1)
# non-contiguous input
s01 = s.transpose(0, 1)
# idx is a permutation of 0...S-1 for this function to be deterministic
idx = torch.randperm(S, device=device, dtype=torch.int64)
# non-contiguous index
idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]
# index_copy_ does not support negative indices
# idx_neg = -idx - 1
samples = [SampleInput(tensor, args=(1, idx, source))
for tensor, idx, source in product([t, t01], [idx, idx_nonctg], [s, s01])]
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
return samples
def sample_inputs_mode(op_info, device, dtype, requires_grad):
inputs = []
args = (
((S, S, S), (),),
((S, S, S), (1, ),),
((S, S, S), (1, True, ),),
((), (),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args)
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_put(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs
tgt_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))
src_gen = (make_arg((S,), noncontiguous=not ctg) for ctg in (True, False))
idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]
idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]
idx_neg = -idx - 1
idx_list = [idx, idx_nonctg, idx_neg]
for tgt, idx, src, acc in product(tgt_gen, idx_list, src_gen, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
# Scalar cases
scalar_sizes = [(), (1,)]
tgt_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
src_gen = (make_arg(size) for size in scalar_sizes)
for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
# Empty cases
tgt_sizes = [(0,), (), (1,), (3, 2)]
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
return list(gen_inputs())
def sample_inputs_take(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs: take S elements out of S * S
src_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))
idx = make_idx((S,), high=S * S)
idx_nonctg = make_idx((S,), high=S * S, noncontiguous=True)
idx_neg = -idx - 1
idx_list = [idx, idx_nonctg, idx_neg]
for src, idx in product(src_gen, idx_list):
yield SampleInput(input=src, args=(idx,))
# Scalar cases
scalar_sizes = [(), (1,)]
src_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
for src, idx in product(src_gen, idx_gen):
yield SampleInput(input=src, args=(idx,))
# Empty cases
src_sizes = [(0,), (), (1,), (3, 2)]
src_gen = (make_arg(size) for size in src_sizes)
idx = make_idx((0,), high=1)
for src in src_gen:
yield SampleInput(input=src, args=(idx,))
return list(gen_inputs())
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, 1, 2, 3], [3, 2, 1, 0])),
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, -1, -2, -3], [-3, -2, -1, -0]))
)
def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]
shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]
tensors = [make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad) for shape in shapes]
samples = []
for rep_dim, tensor in product(rep_dims, tensors):
for t in (tensor, tensor.T):
if op_info.name == 'repeat' and len(rep_dim) >= t.dim():
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
samples.append(SampleInput(t, args=(rep_dim,),))
elif op_info.name == 'tile':
samples.append(SampleInput(t, args=(rep_dim,),))
return samples
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, S, S), (1, 2, 2)),
((S, S, S), (-1, 2, 2)),
((S, S, S), (1, 0, 0)),
((S, S, S), (-1, 0, 0)),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_axes = [
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 3),
((3, 4, 5), -1),
((3, 4, 5), -3),
((), 0)
]
samples = []
for shape, axis in shapes_and_axes:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(tensor, args=(axis,),))
return samples
def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):
shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))
kernel_sizes = (2, (2, 2), (3, 3))
dilations = (1, 2, (1, 2))
paddings = (0, 1, (1, 1))
strides = (1, 2, (1, 2))
def generator():
cases = product(shapes, kernel_sizes, dilations, paddings, strides)
for shape, kernel_size, dilation, padding, stride in cases:
tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)
yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))
# With default args
yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),
args=((3, 3),))
return list(generator())
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, 1, S, 1), ()),
((1, 1, 1, 1), ()),
((S, 1, S, 1), (1,)),
((S, 1, S, 1), (-1,)),
((S, 1, S, 1), (2,)),
((S, 1, S, 1), (-2,)),
((), (0, )),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
assert mode in ('constant', 'reflect', 'replicate', 'circular')
if mode in ['reflect', 'replicate']:
cases: tuple = ( # ignore
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
elif mode == 'constant':
cases = (
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((1, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((0, 3, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((1, 3, 3), (1, 1, 1, 1, 1, 1)),
((0, 3, 3, 3), (1, 2)),
((0, 3, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((3, 3, 5, 5), (1, 2)),
((3, 3, 5, 5), (0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 2)),
((1, 3, 3, 3, 3), (0, 1)),
((1, 3, 3, 3, 3), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
else: # mode == 'circular'
if dtype == torch.bool:
# test_dtypes fails on ASAN with for the case ab
# runtime error: load of value 190, which is not a valid value for type 'bool'
# Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562
# Reference Issue: https://github.com/pytorch/pytorch/issues/63034
cases = (
((2, 3, 3), (1, 2)),
((1, 3, 3), (1, 2)),
)
else:
cases = (
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
if mode == 'constant':
# Default args
yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))
if mode in ['reflect', 'replicate', 'circular']:
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode))
else: # mode == 'constant'
for pad_value in (1., 2.):
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))
return list(generator())
# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet
# Creates matrices with a positive nonzero determinant
def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):
def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):
u, s, vh = torch.linalg.svd(A, full_matrices=False)
s.clamp_(min=min_singular_value)
A = (u * s.unsqueeze(-2)) @ vh
det = A.det()
if sign is not None:
if A.dim() == 2:
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
samples = []
# cases constructed using make_tensor()
tensor_shapes = (
(S, S),
(1, 1),
(3, 3, S, S),
(3, 3, 1, 1)
)
for shape in tensor_shapes:
t = make_tensor(shape, device=device, dtype=dtype)
d = make_nonzero_det(t).requires_grad_(requires_grad)
samples.append(SampleInput(d))
# cases constructed using:
# 1) make_symmetric_matrices
# 2) make_symmetric_pd_matrices
# 3) make_fullrank_matrices_with_distinct_singular_values
symmetric_shapes = (
(S, S),
(3, S, S),
)
def _helper(constructor, *shape, **kwargs):
t = constructor(*shape, device=device, dtype=dtype)
d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)
samples.append(SampleInput(d))
for shape in symmetric_shapes:
_helper(make_symmetric_matrices, *shape)
_helper(make_symmetric_pd_matrices, *shape)
_helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)
return tuple(samples)
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
nd_tensor = make_tensor((S, S + 1, S + 2), device, dtype, low=None, high=None,
requires_grad=requires_grad)
tensor = make_tensor((31,), device, dtype, low=None, high=None,
requires_grad=requires_grad)
if self.ndimensional:
return [
SampleInput(nd_tensor, kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(s=(8,))),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
else:
return [
SampleInput(nd_tensor, kwargs=dict(n=10, dim=1, norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(n=7)),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: bool, # Whether dim argument can be a tuple
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
skipCUDAIfRocm,
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref if ref is not None else _getattr_qual(np, name)
self.ndimensional = ndimensional
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=None,
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):
if same_size:
return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = "_foreach_" + name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
safe_casts_outputs=True,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
safe_casts_outputs=safe_casts_outputs,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):
# Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
)
test_cases = (torch.linalg.cholesky(a) for a in inputs)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
out.append(SampleInput(a, kwargs=dict(upper=True)))
return out
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
out = []
for batch in ((), (3,), (3, 3)):
shape = batch + (3, 3)
# NOTE: inputs are not marked with `requires_grad` since
# linalg_lstsq is not differentiable
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = make_tensor(shape, device, dtype, low=None, high=None)
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
samples = (
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
)
return samples
def sample_inputs_ormqr(op_info, device, dtype, requires_grad):
# create a helper function wrapping `make_tensor`
make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def gen_inputs():
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
tf = [True, False]
for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):
reflectors = make_input((*batch, m, n))
tau = make_input((*batch, min(m, n)))
other_matrix_shape = (m, n) if left else (n, m)
other = make_input((*batch, *other_matrix_shape))
kwargs = {"left": left, "transpose": transpose}
yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)
return tuple(gen_inputs())
def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_symeig(op_info, device, dtype, requires_grad=False):
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"upper": bool(np.random.choice([True, False])),
"eigenvectors": True}
# A gauge-invariant function
o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))
return out
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.eigh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.kwargs = {"UPLO": np.random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):
def out_fn(output):
return output[1]
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
for o in out:
o.kwargs = {"hermitian": True}
return out
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):
"""
This function generates always solvable input for torch.linalg.solve
Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
out = []
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)
b.requires_grad = requires_grad
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
return out
def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
batch_shapes = ((), (3,), (3, 3))
for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):
shape = batch_shape + (S + size_delta, S)
input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
yield SampleInput(input, args=(True, get_infos))
return list(generate_samples())
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 3, 0]
nrhs = [0, 1, 6]
def generate_samples():
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
requires_grad_options = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for lu_requires_grad, b_requires_grad in product(requires_grad_options, requires_grad_options):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not lu_requires_grad and not b_requires_grad:
continue
# we run LU several times to guarantee that the produced SampleInputs are independent
# this is especially important when setting different requries_grad for same tensors!
lu, pivs = a.lu()
lu.requires_grad = lu_requires_grad
b = torch.randn(*batch, n, rhs, dtype=dtype, device=device)
b.requires_grad = b_requires_grad
yield SampleInput(b, args=(lu, pivs))
return list(generate_samples())
def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):
lu_data, pivots = lu_sample.input.lu()
yield SampleInput(lu_data, args=(pivots,))
# generate rectangular inputs
lu_data_shape = lu_data.shape
batch_shape = lu_data_shape[:-2]
n = lu_data_shape[-2]
for shape_inc in ((1, 0), (0, 1)):
lu_data, pivots = make_tensor(
batch_shape + (n + shape_inc[0], n + shape_inc[1]),
device, dtype,
requires_grad=False,
low=None, high=None
).lu()
lu_data.requires_grad_(requires_grad)
yield SampleInput(lu_data, args=(pivots,))
return list(generate_samples())
def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((1, (0, 1),),
(1, (1, 2),),
(1, (1, -1),),
())
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):
tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
tensor_1d = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
return [
SampleInput(tensor_nd),
SampleInput(tensor_nd, kwargs=dict(dim=1)),
SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),
SampleInput(tensor_nd, kwargs=dict(dim=(1,), correction=S // 2)),
SampleInput(tensor_nd, kwargs=dict(dim=None, correction=0, keepdim=True)),
]
def _generate_correlation_inputs(device, dtype, requires_grad):
shapes = [(2,), (1, 2), (3, 2), (2, 3)]
for shape in shapes:
yield make_tensor(shape, device, dtype, requires_grad=requires_grad)
def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]
def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_correlation_inputs(device, dtype, requires_grad):
inputs.append(SampleInput(t))
num_observations = t.numel() if t.ndimension() < 2 else t.size(1)
fweights = make_tensor((num_observations,), device, torch.int, low=0, high=10, requires_grad=requires_grad)
aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)
for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):
inputs.append(SampleInput(t, kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))
return inputs
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [((1, 2, 3, 4), (0, 2, 3, 1)),
((1, 2, 3, 4), (0, -2, -1, 1)),
((), ()),
((1, 2, 3, 4), (2, 1, 3, 0))]
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
return list(generator())
# Based on erstwhile method_tests tests & some tensor_op_tests for pow
def sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):
samples = []
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
test_cases = (
((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),
((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, False, False),
((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),
)
tests_require_resizing = (
((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),
((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),
)
cases = test_cases + tests_require_resizing
samples = list(SampleInput(make_tensor(shape_b, low=low_b, high=high_b,
requires_grad=b_grad, device=device,
dtype=dtype) + additive_b,
args=(make_tensor(shape_e, low=low_e, high=high_e,
requires_grad=e_grad, device=device,
dtype=dtype) + additive_e,),
broadcasts_input=broadcasts_input)
for shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,
high_e, additive_e, e_grad, broadcasts_input in cases)
tensor_scalar_inputs = (
((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))
)
more_samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,
high=high, low=low,
requires_grad=b_grad) + additive,
args=exp)
for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)
samples = [*samples, *more_samples]
elif dtype in [torch.complex64, torch.complex128]:
args_tuple = (
((2, 2), 0, 5, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14j,))
)
samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,
high=high, low=low,
requires_grad=b_grad) + 1e-3 * (1 + 1j),
args=arg)
for shape, low, high, b_grad, arg in args_tuple)
elif dtype == torch.bool:
arg_tuple = (0, 1, 1., 2.3)
samples = list(SampleInput(make_tensor((2, 2), device=device, dtype=dtype,
requires_grad=requires_grad),
args=(arg,))
for arg in arg_tuple)
dtypes_list = [torch.float64, torch.float32, torch.int64, torch.int32]
more_samples = list(SampleInput(make_tensor((2, 2), device, dtype=torch.bool,
requires_grad=requires_grad),
args=(make_tensor((2, 2), device, dtype=dtype,
requires_grad=requires_grad),))
for dtype in dtypes_list)
samples = [*samples, *more_samples]
samples.append(SampleInput(make_tensor((2, 2, 2), device, dtype=torch.bool,
requires_grad=requires_grad),
args=(make_tensor((2, 1), device, dtype=torch.float64,
requires_grad=requires_grad),)))
else:
exp_tuple = (1, 2, 3)
samples = list(SampleInput(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),
args=(arg,))
for arg in exp_tuple)
samples.append(SampleInput(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),
args=(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),)))
return tuple(samples)
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):
eigvecs = make_tensor((S, S), device=device, dtype=dtype,
low=None, high=None)
eigvals = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None)
# we produce only diagonazible inputs which do not have
# complex eigenvalues for real inputs, as there is no
# backward implementation for real inputs with complex
# eigenvalues yet.
input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()
input.requires_grad_(requires_grad)
def process_output(eigpair):
eigvals, eigvecs = eigpair
if dtype.is_complex:
# eig produces eigenvectors which are normalized to 1 norm.
# Note that if v is an eigenvector, so is v * e^{i \phi},
# and |v| = |v * e^{i \phi}| = 1.
# This, however, makes the eigenvector backward computation process
# rather unstable unless the objective function is gauge-invariant,
# that is if f(z) == f(|z|), for example.
# Hence for complex inputs we ignore the phases and return only
# the absolute values.
return eigvals, eigvecs.abs()
else:
return eigvals, eigvecs
return [
SampleInput(
input,
kwargs=dict(eigenvectors=True),
output_process_fn_grad=process_output
),
]
def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):
x = make_tensor((3,), device, dtype, requires_grad=requires_grad)
y = make_tensor((4,), device, dtype, requires_grad=requires_grad)
A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)
C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)
D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)
H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)
inputs = []
# Vector operations
inputs.append(SampleInput([x], args=('i->',))) # sum
inputs.append(SampleInput([x, y], args=('i,j->ij',))) # outer
# Matrix operations
inputs.append(SampleInput([A], args=("ij->i",))) # col sum
inputs.append(SampleInput([A, B], args=("ij,kj->ik",))) # matmul
inputs.append(SampleInput([A, E], args=("ij,Ab->ijAb",))) # matrix outer product
# Tensor operations
inputs.append(SampleInput([C, D], args=("aij,ajk->aik",))) # batch matmul
inputs.append(SampleInput([D, E], args=("aij,jk->aik",))) # tensor matrix contraction
inputs.append(SampleInput([C, B], args=("ijk,ik->j",))) # non contiguous
# Test diagonals
inputs.append(SampleInput([I], args=('iji->j',))) # non-contiguous trace
# Test ellipsis
inputs.append(SampleInput([H], args=("i...->...",)))
inputs.append(SampleInput([C, x], args=('...ik, ...j -> ij',)))
return inputs
def sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.qr
The input is generated as the itertools.product of 'batches' and 'ns'.
"""
batches = [(), (0,), (2, ), (1, 1)]
ns = [5, 2, 0]
out = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)
out.append(SampleInput(a))
return out
def sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
# TODO: CUDA path doesn't work with batched or empty inputs
if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):
continue
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_flip(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, M, S), (S, 0, M))
all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
def gen_samples():
for size, dims in product(sizes, all_dims):
yield SampleInput(make_arg(size), kwargs={"dims": dims})
return list(gen_samples())
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
if autodiffed:
samples = (
((S, S, S), 1.5, False),
((), 1.5, False),
)
else:
cases = (
((S, S, S), (), False),
((S, S, S), (S, S, S), False),
((S, S, S), (S,), False),
)
# Sample inputs with scalars as torch tensors
cases_with_tensor_scalar = (
((), torch.tensor(1, dtype=dtype, device=device, requires_grad=False), False),
)
# Sample inputs with broadcasting
cases_with_broadcasting = (
((S,), (S, S, S), True),
((S, 1, S), (S, S, S), True),
((), (S, S, S), True),
)
samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]
def generator():
for shape, arg_other, broadcasts_input in samples:
if isinstance(arg_other, tuple):
arg = make_arg(arg_other, requires_grad=False, exclude_zero=True)
else:
# shape_other is scalar or torch.tensor
arg = arg_other
yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))
return list(generator())
# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!
def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):
x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
def detach(tensor):
return tensor.clone().detach_().requires_grad_(requires_grad)
return [
SampleInput(detach(x), args=(lb, ub)),
SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),
SampleInput(detach(x), args=(detach(lb[:, :1]),)),
]
def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
if dtype is torch.uint8:
min_max_vals = ((2, 5), (3, 7))
else:
min_max_vals = ((0, 1), (-1, 1))
output = [SampleInput(tensor, args=vals) for tensor, vals in product(tensors, min_max_vals)]
output += [SampleInput(tensors[0], args=(0.5, None)), SampleInput(tensors[0], args=(None, 0.5))]
empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)
output += [SampleInput(empty_tensor, args=(0.0, 1.0)), ]
return output
def sample_kwargs_clamp_scalar(device, dtype, input):
if dtype is torch.uint8:
min_val, max_val = (random.randint(1, 3), random.randint(4, 8))
elif dtype.is_floating_point:
min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]
else:
min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))
return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))
sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': 1})
return (sample0, sample1)
def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_zeros(dim_select):
assert len(dim_select) == 2
result = make_arg(3 * (S,))
with torch.no_grad():
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
# will not be needed once OpInfo tests suport Iterables
def sample_generator():
for dim in range(3):
yield SampleInput(make_arg((S, S, S)), args=(dim,))
# Scalar tensors and empty tensor
for size in [(), (1,), (0,)]:
yield SampleInput(make_arg(size), args=(0,))
yield SampleInput(prod_zeros([0, 1]), args=(1,))
yield SampleInput(prod_zeros([0, 2]), args=(1,))
yield SampleInput(prod_zeros([1, 2]), args=(1,))
# test dtype kwarg
yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})
return list(sample_generator())
def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]
def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((), device, dtype, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor(*shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
cases = [
# no broadcast
((S, S, S), (S, S, S), False),
# broadcast rhs
((S, S, S), (S, S), False),
# scalar
((S, S), 3.14, False),
# scalar positive zero
((S, S), 0.0, False),
# scalar negative zero
((S, S), -0.0, False),
]
# broadcast lhs
cases.append(((S, S), (S, S, S), True))
# broadcast all
cases.append(((S, 1, S), (M, S), True))
def generator():
for input_shape, arg_val, broadcasts_input in cases:
if isinstance(arg_val, tuple):
arg = _make_tensor(*arg_val)
else:
# arg_val is scalar
arg = arg_val
yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_prod(op_info, device, dtype, requires_grad):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_single_zero():
result = make_arg(2 * (S,))
with torch.no_grad():
result[0, 1] = 0
return result
# will not be needed once OpInfo tests support Iterables
def sample_generator():
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
yield SampleInput(sample.input) # only Tensor, ignore other inputs
yield sample
sample.kwargs['keepdim'] = True
yield sample
yield SampleInput(prod_single_zero())
yield SampleInput(make_arg((3, 3, 3)), args=(1,))
yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})
# test zero scalar tensor
zero = make_arg(())
with torch.no_grad():
zero.zero_()
yield SampleInput(zero)
yield SampleInput(zero, args=(0,))
yield SampleInput(zero, args=(0,), kwargs={'keepdim': True})
return list(sample_generator())
def sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (
((S, S), (S, S), False),
((S, S), (S,), False),
((S, ), (S, S), True)
)
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor, args=arg))
return samples + [vec_sample]
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
yield SampleInput(make_arg(shape), args=arg)
return list(generator())
def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),
SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)
# Used for both log_softmax and softmax
def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [
((S, ), (0, )),
((S, S), (0, )),
((S, S), (1, )),
((S, S), (-1, )),
((S, M, S), (2, )),
]
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != 'xla':
cases.append(((), (0, )))
return [
SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)
for shape, dim in cases
]
def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_isin(op_info, device, dtype, requires_grad):
element = make_tensor((L,), device, dtype, low=None, high=None, requires_grad=requires_grad)
indices = torch.randint(0, L, size=[S])
test_elements = element[indices].clone()
return [
SampleInput(element, args=(test_elements,))
]
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def samples_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),
broadcasts_input=True)
samples = tuple(samples_generator())
return samples
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def sample_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg(())),
broadcasts_input=True)
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
samples = tuple(sample_generator())
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),
)
return samples
def sample_inputs_matmul(op_info, device, dtype, requires_grad):
test_cases = (((L,), (L,)),
((S, M), (M,)),
((M,), (M, S)),
((S, M), (M, S)),
((S, 0), (0, M)),
((S, S, M), (M,)),
((S, S, M), (M, S)),
((S, S, 0), (0, S)),
((M,), (S, M, S)),
((S, M), (S, M, S)),
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
((M,), (S, S, M, S)))
sample_inputs = []
for lhs_shape, rhs_shape in test_cases:
lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
if op_info.name == 'matmul':
sample_inputs.append(SampleInput(lhs, args=(rhs,)))
elif op_info.name == '__rmatmul__':
sample_inputs.append(SampleInput(rhs, args=(lhs,)))
else:
raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'")
return tuple(sample_inputs)
def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,
requires_grad: bool,
*, variant: str) -> List[SampleInput]:
if variant == 'variadic':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors[0], tuple(tensors[1:])
elif variant == 'list':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors, ()
else:
raise ValueError(
'Unsupported variant, must be one of {"variadic", "list"}. '
f'Got "{variant}".')
SCALAR = torch.Size([])
VECTOR = torch.Size([3])
test_cases: List[List[torch.Size]] = [
[SCALAR],
[VECTOR],
[VECTOR, SCALAR],
[VECTOR, SCALAR, VECTOR],
[VECTOR, SCALAR, VECTOR, SCALAR],
]
sample_inputs = []
for shapes in test_cases:
input, args = make_inputs(
[make_tensor(shape, device, dtype, requires_grad=requires_grad)
for shape in shapes])
sample_inputs.append(SampleInput(input=input, args=args))
return sample_inputs
def sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape):
return make_tensor(shape, device, dtype, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
def generator():
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
return list(generator())
def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
# Since the accepted lower bound for input
# to mvlgamma depends on `p` argument,
# the following function computes the lower bound
# which we pass to `make_tensor`.
def compute_min_val(p):
return (p - 1.) / 2
def generator():
for shape, n in product(tensor_shapes, ns):
min_val = compute_min_val(n)
if not dtype.is_floating_point:
# Round-up minimum value for integral dtypes
min_val += 1
yield SampleInput(make_arg(shape, low=min_val), args=(n,))
return list(generator())
# Since `mvlgamma` has multiple entries,
# there are multiple common skips for the additional
# entries. Following function is a helper to that end.
def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
SkipInfo('TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
)
return skips
# To test reference numerics against multiple values of argument `p`,
# we make multiple OpInfo entries with each entry corresponding to different value of p.
# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.
# Class `MvlGammaInfo` already contains the basic information related to the operator,
# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which
# differ between the entries.
class MvlGammaInfo(UnaryUfuncInfo):
def __init__(self, variant_test_name, domain, skips, sample_kwargs):
super(MvlGammaInfo, self).__init__(
'mvlgamma',
ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,
aliases=('special.multigammaln',),
variant_test_name=variant_test_name,
domain=domain,
decorators=(precisionOverride({torch.float16: 5e-2}),),
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half),
sample_inputs_func=sample_inputs_mvlgamma,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=skips,
sample_kwargs=sample_kwargs)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
return (SampleInput(make_tensor((L,), device, dtype,
low=low,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=low,
requires_grad=requires_grad)))
def sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(make_arg((S,), low=2, requires_grad=False),)),
SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(3.,)),
)
return samples
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S,), device, dtype,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
requires_grad=requires_grad)))
if requires_grad and op_info.op == torch.special.i0e:
# NOTE: `i0e`'s first-order gradient is not continous
# at `0`, hence we don't test `i0e` with any input being `0`.
# TODO: Remove this when `make_tensor` supports excluding `0`.
with torch.no_grad():
for sample in samples:
t = sample.input
t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]
elif requires_grad and op_info.op != torch.special.i0e:
# Special Case for gradient
# Sample with `0` in the input
t = make_tensor((S,), device, dtype,
requires_grad=requires_grad)
with torch.no_grad():
t[0] = 0
samples += (SampleInput(t),) # type: ignore[assignment]
return samples
def sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):
filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]
return (SampleInput(input, args=(arg,), kwargs=dict(alpha=alpha))
for (input, arg), alpha in filtered_product)
int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j
if variant == 'tensor':
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),
)
if dtype.is_complex:
alphas = [int_alpha, float_alpha, complex_alpha]
elif dtype.is_floating_point:
alphas = [int_alpha, float_alpha]
else:
alphas = [int_alpha]
args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),
(_make_tensor_helper((S, S)), _make_tensor_helper((S,))),
(_make_tensor_helper(()), _make_tensor_helper(())))
samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]
elif variant == 'scalar':
# Scalar Other
samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),
SampleInput(_make_tensor_helper(()), args=(0.5,)),
SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),
SampleInput(_make_tensor_helper(()), args=(1.5j,)),
SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),
SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))
scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),
(_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),
(_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]
alphas = [int_alpha, float_alpha, complex_alpha]
def filter_fn(arg_alpha):
arg, alpha = arg_alpha
if isinstance(alpha, complex):
if dtype.is_complex or isinstance(arg[1], complex):
return True
else:
# complex alpha is valid only if either `self` or `other` is complex
return False
# Non-Complex Alpha
return True
# Samples with alpha (scalar version) covers the following cases
# self | other | alpha
# -----------------------------------------
# real | real | real (int and float)
# real | complex | real and complex
# complex | real | real and complex
# complex | complex | real and complex
#
# It does not cover
# real | real | complex
# x = torch.randn(2, requires_grad=True, dtype=torch.float64)
# torch.rsub(x, 1, alpha=1. + 1.6j)
# RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)
samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]
else:
raise Exception("Invalid variant!")
return samples
def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),
SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),
SampleInput(_make_tensor_helper(()), args=(0,)),
]
if supports_dtype_kwargs:
# NOTE: if `dtype` is not same as input, then inplace variants fail with
# `provided dtype must match the dtype of self tensor in cumsum`
samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))
return samples
def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((), (0, 1, 1)),
((S, S, S, S), (0, 3, 1)),
((S, S, S, S), (1, 3, 1)),
((S, S, S, S), (2, 3, 1)),
((S, S, S, S), (3, 3, 1)),
((S, S, S, S), (0, 3, 2)),
((S, S, S, S), (1, 3, 2)),
((S, S, S, S), (2, 3, 2)),
((S, S, S, S), (3, 3, 2)),
((S, S, S, S), (0, 4, 1)),
((S, S, S, S), (1, 4, 1)),
((S, S, S, S), (2, 4, 1)),
((S, S, S, S), (3, 4, 1)),
((M,), (0, 3, 1)),
((M,), (0, 3, 2)),
((M,), (0, 3, 3)),
((1000,), (0, 3, 11)),
((1000,), (0, 2, 27)),
((10, 10), (0, 1, 2)),
((10, 10), (1, 2, 3)),
((10, 10), (1, 2, 2)),
((S, S, S), (2, 3, 2)),
)
sample_inputs = []
for shape, arguments in test_cases:
sample_inputs += [SampleInput(make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=arguments)]
return sample_inputs
def sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S, S), (S, S, S), False),
((), (), False),
((S, S, S), (S,), False),
((S,), (S, S, S), True),
((S, 1, S), (S, S), True),
)
def generator():
for x_shape, y_shape, broadcasts_input in cases:
yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if list_args:
cases = (
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)
)
else:
cases = ( # type: ignore[assignment]
((S, S, S), (2,)),
((S, S, S), (S, 1)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_msort(op_info, device, dtype, requires_grad):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
sample = SampleInput(make_tensor((S, M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))
return [largesample, sample]
def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = (
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),
# broadcast rhs with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),
# broadcast rhs and weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),
# broadcast_lhs
SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# scalar broadcast_lhs
SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# tensor broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),
broadcasts_input=True),
)
if dtype.is_complex:
samples = samples + ( # type: ignore[assignment]
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),
SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),
)
return samples
def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):
cases = (
((2, 2, 2), (2, 2, 2), (2)),
((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),
)
samples = []
for first_shape, second_shape, dims in cases:
samples.append(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
kwargs=dict(dims=dims,)))
return tuple(samples)
def sample_inputs_kron(op_info, device, dtype, requires_grad):
test_cases = (
((S, S), (M, L)),
)
sample_inputs = []
for input_shape, other_shape in test_cases:
input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample = SampleInput(input, args=(other,))
sample_inputs.append(sample)
return tuple(sample_inputs)
def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
)
),
SampleInput(
make_tensor((), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
)
),
)
def sample_inputs_scatter(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
(_tensor(()), (0, zero.clone().detach(), 2.5)),
)
samples = []
for tensor, args in test_cases:
samples.append(SampleInput(tensor, args=args))
if not requires_grad:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'add'}
))
if dtype.is_floating_point:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'multiply'}
))
return samples
def sample_inputs_scatter_add(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
)
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),)
return samples
def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((M, M), ()),
((M, M), (2,),),
((S, M, M), ()),
((S, M, M), (2,)),
((3, 3, S, S), ()),)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, M, S)))
yield SampleInput(make_arg(()))
return list(generator())
def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, S)))
yield SampleInput(make_arg((S, S), noncontiguous=True))
return list(generator())
def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1, 1)),
)
def generator():
for shape, args_or_shape in cases:
# Update `args` based on operator
if op_info.name == 'resize_':
# resize_ takes shape/tuple of ints,
args = (args_or_shape, )
elif op_info.name == 'resize_as_':
# resize_as_ takes another tensor
args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]
else:
raise ValueError("sample_inputs_resize_ops is being used with incorrect operator")
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S * S, S)),
((S * S, S), (S, S, S)),
((S,), (S,)),
((), ()),
((), (1,)))
def generator():
for case in cases:
shape, args = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(args, )))
if op_info.name != "view" and len(shape) >= 2:
yield(SampleInput(inp.transpose(0, 1), args=(args, )))
return list(generator())
def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for case in cases:
shape, shape_other = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))
if op_info.name != "view_as" and len(shape) >= 2:
yield(SampleInput(inp.transpose(0, 1), args=(make_arg(shape_other, requires_grad=False),)))
return list(generator())
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (1, 2)),
((S, S, S), (-1, 2)),
((S, S, S), (-1, -1)),
((S, S, S), (1, -1)),
((S,), (0, 2))
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
scalar: Union[int, float, complex] = 3
if dtype.is_floating_point:
scalar = 3.14
elif dtype.is_complex:
scalar = 3.14j
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),
SampleInput(_make_tensor_helper(()), args=(scalar,)),
]
return samples
def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=(args, )))
return list(generator())
def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, 1, 1), (S, S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for shape, shape_other in cases:
yield(SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(shape_other, requires_grad=False), )))
return list(generator())
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def make_bool_mask(shape):
# Make sure atleast one element is nonzero,
# except for empty tensor
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
if mask_t.numel() == 0:
return mask_t
elif mask_t.numel() == 1:
mask_t.fill_(True)
return mask_t
if mask_t.sum() == 0:
def random_index(shape):
return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))
mask_t[random_index(mask_t.shape)] = True
return mask_t
return mask_t
cases = (((M, M), (M, M), (M, M), False),
((M, 1, M), (M, M), (M, M, 1), True),
((), (), (), False),
((M, 1, M), (), (M, M, 1), True),
((), (M, M), (), True),)
def generator():
for shape, mask_shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape),
args=(make_bool_mask(mask_shape), make_arg(other_shape)),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (2,)),
((S, S, S), (S, 1)),
((S, S, S), (S, -1)))
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
test_cases = [
(_tensor((S, S, S)), (2,)),
(_tensor((S, S, S)), (2, 1,)),
(_tensor((S, S, S)), (2, -1,)),
(_tensor((S, S, S)), (2, 1, True,)),
(_tensor((S, S, S)), (2, -1, True,)),
(_tensor((S,)), (2, 0,)),
(_tensor((S,)), (2, 0, True,)),
(_tensor(()), (1,)),
(_tensor(()), (1, 0,)),
(_tensor(()), (1, 0, True))
]
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)
shapes = ((), (S,), (L, M, S))
num_classess = (-1, 10)
return [
SampleInput(
make_input(
shape,
low=0,
high=10 if num_classes == -1 else num_classes // 2,
),
kwargs=dict(num_classes=num_classes),
)
for shape, num_classes in itertools.product(shapes, num_classess)
]
def sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input()),
SampleInput(make_input(), kwargs=dict(beta=3)),
SampleInput(make_input(low=1), kwargs=dict(threshold=1)),
]
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
def make_input():
input = make_fullrank_matrices_with_distinct_singular_values(12, 12, device=device, dtype=dtype)
return input.requires_grad_(requires_grad)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
return [
SampleInput(make_input().reshape(*shape_lhs, *shape_rhs), kwargs=dict(ind=len(shape_lhs)))
for shape_lhs, shape_rhs in shapes
]
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes_and_kwargs = [
((), None),
((S,), dict(reduction="mean")),
((S,), dict(reduction="sum")),
((S,), dict(reduction="none")),
((S, S), None),
((S, S, S), None),
]
return [
SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)
for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batch_size = 2
num_channels = 3
modes = ("bilinear", "nearest")
align_cornerss = (False, True)
padding_modes = ("zeros", "border", "reflection")
sample_inputs = []
for dim in (2, 3):
input = _make_tensor((batch_size, num_channels, *[S] * dim))
grid = _make_tensor((batch_size, *[S] * dim, dim))
modes_ = (*modes, "bicubic") if dim == 2 else modes
for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):
sample_inputs.append(
SampleInput(
input,
args=(grid,),
kwargs=dict(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
)
)
return sample_inputs
def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
batch_size, num_classes = shape = (2, 3)
input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [
((*shape, 1), dict()),
((*shape, 1, 2), dict()),
((*shape, 1, 2, 3), dict()),
(shape, dict(weight=make_tensor((num_classes,), device=device, dtype=dtype).abs())),
(shape, dict(ignore_index=num_classes // 2)),
(shape, dict(reduction="sum")),
(shape, dict(reduction="mean")),
]
sample_inputs = []
for input_shape, kwargs in input_shape_and_kwargs:
input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)
target = make_tensor(
(batch_size, *input_shape[2:]),
low=0,
high=num_classes,
device=device,
dtype=torch.long,
requires_grad=requires_grad
)
sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))
return sample_inputs
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo('exp'),
ForeachFuncInfo('acos'),
ForeachFuncInfo('asin'),
ForeachFuncInfo('atan'),
ForeachFuncInfo('cos'),
ForeachFuncInfo('cosh'),
ForeachFuncInfo('log'),
ForeachFuncInfo('log10'),
ForeachFuncInfo('log2'),
ForeachFuncInfo('tan'),
ForeachFuncInfo('tanh'),
ForeachFuncInfo('sin'),
ForeachFuncInfo('sinh'),
ForeachFuncInfo(
'neg',
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
sample_inputs_func=sample_inputs_foreach,
safe_casts_outputs=False,
),
ForeachFuncInfo(
'sqrt',
dtypes=floating_types(),
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
),
ForeachFuncInfo(
'ceil',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erf',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erfc',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'expm1',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'floor',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'log1p',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'round',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'frac',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'reciprocal',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'sigmoid',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'trunc',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'abs',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
safe_casts_outputs=False,
supports_forward_ad=True,
),
]
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"sub",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"mul",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
ForeachFuncInfo(
"div",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
"addcdiv",
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
]
foreach_minmax_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"maximum",
dtypesIfCPU=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
ForeachFuncInfo(
"minimum",
dtypesIfCPU=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
max_ = np.maximum(x.dtype.type(0), -x)
z = np.exp(-max_) + np.exp(-x - max_)
return -(max_ + np.log(z))
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype.
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
return scipy.special.polygamma(n, x).astype(np_dtype)
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
if weight is None and bias is not None:
Y = Y + bias.reshape(-1)
elif weight is not None and bias is None:
Y = Y * weight.reshape(-1)
elif weight is not None and bias is not None:
Y = Y * weight.reshape(-1) + bias.reshape(-1)
return Y.reshape(*inp.shape)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.conj().transpose(-2, -1), *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
"""Gradcheck wrpper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
"""
return op(input.triu() if upper else input.tril(), upper)
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
),
supports_inplace_autograd=False,
assert_autodiffed=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cuda" not implemented for 'BFloat16'
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble]),
)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False,
supports_forward_ad=True),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_binary_pwise),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=floating_and_complex_types_and(torch.float16),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=floating_and_complex_types_and(torch.float16),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),
OpInfo('addmv',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
backward_dtypesIfROCM=floating_types_and(torch.half),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_reference_testing')],
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# addbmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/55907
SkipInfo('TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=floating_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater else [],
torch.complex64, torch.complex128),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view', device_type='cuda')],
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# baddbmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_baddbmm),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# bmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
skips=(
# bmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_inplace_autograd=False,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types(),
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_forward_ad=True,
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Complex gradcheck tests asinh at points 0 + ix for x > 1 which are points
# where asinh is not differentiable
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
OpInfo('atan2',
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_atan2,
),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
)),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('bitwise_and',
dtypes=integral_types_and(torch.bool),
supports_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
supports_autograd=False),
OpInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist,
),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
# RuntimeError: torch.cholesky: U(1,1) is zero, singular U.
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),)),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_types(),
# TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs
# with complex dtype.
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True
# for complex tensors
SkipInfo('TestCommon', 'test_dtypes'),
# cholesky_inverse does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),)),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_chunk,
supports_out=False),
OpInfo('clone',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_clone,
supports_forward_ad=True,
supports_out=False),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_contiguous,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('symeig',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_symeig,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
# NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors
OpInfo('clamp',
aliases=('clip',),
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_clamp),
UnaryUfuncInfo('clamp',
variant_test_name='scalar',
aliases=('clip', ),
decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),
ref=np.clip,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/54841
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
),
sample_kwargs=sample_kwargs_clamp_scalar,
sample_inputs_func=sample_inputs_clamp_scalar),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_sparse=True,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex),
OpInfo('complex',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_complex,
supports_forward_ad=True,
),
OpInfo('copysign',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_copysign,
supports_inplace_autograd=False,
supports_forward_ad=True,
),
OpInfo('corrcoef',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_corrcoef,
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_cov,
supports_out=False,
supports_forward_ad=True,
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit'),)),
OpInfo('cross',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True,
skips=(
# AssertionError: UserWarning not triggered :
# Resized a non-empty tensor but did not warn about it.
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('cumsum',
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumsum does not handle correctly out= dtypes
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumprod does not handle correctly out= dtypes
SkipInfo('TestCommon', 'test_out',
dtypes=[torch.float32]),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
OpInfo('diff',
op=torch.diff,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diff),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_forward_ad=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="trunc"),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="floor"),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_binary_pwise,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
skips=(
# Because expand does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
supports_forward_ad=True,
supports_out=False),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_expand_as,
skips=(
# Because expand_as does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
supports_out=False),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('eq',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('fmod',
ref=np.fmod,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
OpInfo('remainder',
ref=np.remainder,
dtypesIfCPU=all_types_and(torch.float16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('remainder',
ref=np.remainder,
variant_test_name='autodiffed',
dtypesIfCPU=all_types_and(torch.float16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
# Reference for disabling extremals
# https://github.com/pytorch/pytorch/issues/51948
handles_extremals=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
ref=np.fft.fftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
ref=np.fft.hfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
ref=np.fft.rfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
ref=np.fft.rfftn,
ndimensional=True,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
ref=np.fft.ifft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
ref=np.fft.ifftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
ref=np.fft.ihfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
ref=np.fft.irfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
ref=np.fft.irfftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else _NOTHING,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_i0_i1),
UnaryUfuncInfo('special.i0e',
aten_name='special_i0e',
ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 3e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1',
aten_name='special_i1',
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float: 1e-4}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1e',
aten_name='special_i1e',
ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.ndtr',
aten_name='special_ndtr',
decorators=(precisionOverride({torch.bfloat16: 5e-3,
torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),
safe_casts_outputs=True),
BinaryUfuncInfo('floor_divide',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[skipCUDAIfRocm],
supports_out=False,
supports_forward_ad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
SkipInfo('TestUnaryUfuncs', 'test_batch_vs_slicing'),
SkipInfo('TestUnaryUfuncs', 'test_contig_vs_every_other'),
SkipInfo('TestUnaryUfuncs', 'test_contig_vs_transposed'),
SkipInfo('TestUnaryUfuncs', 'test_non_contig_expand'),
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
OpInfo('ge',
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
dtypesIfCPU=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_geqrf,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('gt',
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=(
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient),
OpInfo('inverse',
op=torch.inverse,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('isin',
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin),
OpInfo('kthvalue',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kthvalue),
OpInfo('le',
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('linalg.det',
op=torch.linalg.det,
aliases=('det', ),
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False),
OpInfo('linalg.det',
op=torch.linalg.det,
variant_test_name='singular',
aliases=('det', ),
dtypes=double_types(),
backward_dtypes=double_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed
# Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570
SkipInfo('TestGradients', 'test_fn_grad', device_type='cuda', dtypes=(torch.complex128,)),
SkipInfo('TestCommon', 'test_dtypes'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
# This test fails because singular inputs cannot be reliably
# generated unless we're using double types
SkipInfo('TestOpInfo', 'test_unsupported_dtypes'),
SkipInfo('TestOpInfo', 'test_unsupported_backward',
dtypes=(torch.float32, torch.complex64,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
dtypes=floating_and_complex_types(),
# TODO: RuntimeError: While computing batched gradients,
# got: vmap: Calling Tensor.as_strided is not supported
# unless the batch dims being vmapped over are at the front of the tensor (in memory layout).
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
# RuntimeError: torch.linalg.cholesky: U(1,1) is zero, singular U.
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.cond',
aten_name='linalg_cond',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.eig',
aten_name='linalg_eig',
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eig,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigvals',
aten_name='linalg_eigvals',
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigh',
aten_name='linalg_eigh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigvalsh',
aten_name='linalg_eigvalsh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('linalg.householder_product',
aten_name='linalg_householder_product',
op=torch.linalg.householder_product,
aliases=('orgqr', ),
dtypes=floating_and_complex_types(),
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
op=torch.linalg.lstsq,
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
supports_autograd=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_matrix_power',
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
sample_inputs_func=sample_inputs_linalg_matrix_power,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('linalg.multi_dot',
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name='linalg_multi_dot',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('linalg.norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_norm,
aten_name='linalg_norm',
skips=(
# linalg.norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
dtypes=floating_and_complex_types(),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
# linalg.matrix_norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('linalg.qr',
aten_name='linalg_qr',
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_qr,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_slogdet,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.vector_norm',
op=torch.linalg.vector_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_vector_norm,
aten_name='linalg_vector_norm',
skips=(
# linalg.vector_norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat, torch.cdouble]),
)),
OpInfo('logaddexp',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
OpInfo('logaddexp2',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_autograd=False,
skips=(
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
OpInfo('lt',
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_lu_solve,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
supports_out=True,
sample_inputs_func=sample_inputs_lu_unpack,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# cuda gradchecks are slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),
)),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_fill,
supports_forward_ad=True,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
supports_forward_ad=True,
supports_out=False),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_masked_select),
OpInfo('matrix_exp',
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_matrix_exp,
supports_out=False,
),
OpInfo('matmul',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_matmul,
skips=(
# matmul does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('max',
op=torch.max,
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,),
OpInfo('max',
op=torch.max,
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True,
skips=(
# max does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),)),
OpInfo('max',
op=torch.max,
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('median',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of median do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# TODO: review with var_mean tests in test_autograd.py
SkipInfo('TestJit', 'test_variant_consistency_jit'),
SkipInfo('TestGradients', 'test_fn_grad'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
SkipInfo('TestGradients', 'test_forward_mode_AD'))),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# TODO: fix along with var_mean autograd tests
SkipInfo('TestJit', 'test_variant_consistency_jit'),
SkipInfo('TestGradients', 'test_fn_grad'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
SkipInfo('TestGradients', 'test_forward_mode_AD'))),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
# Our implementation corresponds to "ij" indexing for
# numpy.meshgrid, but its default value is "xy".
ref=lambda *tensors: np.meshgrid(*tensors, indexing='ij'),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
SkipInfo('TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_forward_ad=True),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_forward_ad=True),
OpInfo('min',
op=torch.min,
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,),
OpInfo('min',
op=torch.min,
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True,
skips=(
# min does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('min',
op=torch.min,
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
# TODO(@heitorschueroff) Add test for dtype kwarg
OpInfo('mean',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_reduction,
# Need to skip out test because one of the overload for mean does not support it
# TODO(@heitorschueroff) fix this when implementing ReductionInfo
skips=(SkipInfo('TestCommon', 'test_out'),)),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile),
OpInfo('maximum',
op=torch.maximum,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('minimum',
op=torch.minimum,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo('softmax',
aliases=('nn.functional.softmax',),
aten_name='softmax',
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True,
supports_out=False),
OpInfo('softmax',
aliases=('nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_out=False),
OpInfo('nn.functional.normalize',
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize,
skips=(
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(onlyOnCPUAndCUDA,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax,
skips=(
# FIXME: aminmax does not check for safe casting to output
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_cosine_similarity),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.relu',
aten_name="relu",
supports_autograd=True,
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
dtypesIfCPU=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.layer_norm',
aten_name='layer_norm',
aliases=('layer_norm',),
ref=reference_layer_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),
'TestCommon', 'test_reference_testing'
),
unittest.skipIf("tbb" in os.getenv("BUILD_ENVIRONMENT", ""), "This test makes TBB Sad"),
],
sample_inputs_func=sample_inputs_layer_norm,),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# op name not found in JIT graph
# There are multiple aten ops, namely reflection_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# op name not found in JIT graph
# There are multiple aten ops, namely replication_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
check_batched_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_unfold,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
dtypesIfCPU=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
dtypes=floating_types(),
sample_inputs_func=sample_inputs_leaky_relu,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_out=False,
dtypesIfCPU=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_avgpool2d),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
ref=reference_logsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
# autodiff_nonfusible_nodes=["aten::log_sigmoid"],
decorators=[
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_normal'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_hard'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
],
),
OpInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_nextafter),
OpInfo('topk',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_topk,
skips=(
# Topk is not raising a warning when the out is resized
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('nn.functional.hardshrink',
aten_name="hardshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
OpInfo('nn.functional.hardtanh',
aten_name="hardtanh",
dtypesIfCPU=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),
backward_dtypesIfCPU=all_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"],
),
OpInfo('nn.functional.gelu',
aten_name="gelu",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::gelu"]),
OpInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mm,
skips=(
# mm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mode,),
MvlGammaInfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
OpInfo('ne',
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_narrow),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
assert_autodiffed=True,),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_dist,
skips=(
# dist does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
OpInfo('permute',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_permute),
OpInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
# Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_pow,
supports_inplace_autograd=False,
supports_forward_ad=True,
assert_autodiffed=True,
),
OpInfo('float_power',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_pow,
supports_forward_ad=True,
skips=(
SkipInfo('TestMathBits', 'test_conj_view', device_type='cuda'),),),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr,
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('roll',
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_roll),
OpInfo('rot90',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_rot90),
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.complex64])
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_out=False,
assert_autodiffed=True),
OpInfo('split',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_out=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_split_with_sizes,
supports_out=False,
assert_autodiffed=True),
OpInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::add'],),
OpInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
supports_forward_ad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
OpInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
OpInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],
torch.complex64, torch.complex128),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],
torch.complex64, torch.complex128),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_matmul,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view')],
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=all_types_and(torch.bfloat16, torch.half),
dtypesIfCPU=floating_types_and(torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
OpInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
OpInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_tensor',
supports_out=False,
supports_inplace_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=[torch.cfloat, torch.cdouble]),
),
sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_scalar',
supports_out=False,
supports_inplace_autograd=False,
sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),
assert_autodiffed=True,),
OpInfo('select',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_autograd=False,),
OpInfo('solve',
op=torch.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('std',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: std does support out in some signatures
supports_out=False,
assert_autodiffed=True,
),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "tanh_backward_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
supports_out=False,
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half),
dtypesIfCPU=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool),
dtypesIfCPU=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
handles_complex_extremals=False),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16])),
safe_casts_outputs=True,
handles_complex_extremals=False),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.inv_ex',
aten_name='linalg_inv_ex',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_complex_to_float=True),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('eig',
op=torch.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_eig,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
skipCUDAIfRocm
],),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
supports_out=False,
sample_inputs_func=sample_inputs_einsum,
skips=(
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svdvals',
op=torch.linalg.svdvals,
aten_name='linalg_svdvals',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svdvals,
check_batched_gradgrad=False,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack]),
OpInfo('polar',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_polar),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Probably related to the way the function is
# scripted for JIT tests (or maybe not).
# RuntimeError:
# Arguments for call are not valid.
# The following variants are available:
# aten::polygamma(int n, Tensor self) -> (Tensor):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# The original call is:
# File "<string>", line 3
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo('special.polygamma',
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name='special_polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Probably related to the way the function is
# scripted for JIT tests (or maybe not).
# RuntimeError:
# Arguments for call are not valid.
# The following variants are available:
# aten::polygamma(int n, Tensor self) -> (Tensor):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# The original call is:
# File "<string>", line 3
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_1',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_2',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_3',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_4',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4})),
OpInfo('ravel',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_reshape,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_reshape_as,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Because view does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_inputs_func=sample_inputs_view_reshape,
),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Because view_as does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_inputs_func=sample_inputs_view_as_reshape_as,
),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_fill),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_copy,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_index_select,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_add,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=False,
op=torch.Tensor.__getitem__,
sample_inputs_func=sample_inputs_getitem,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit'),)),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_sort,
skips=(
# sort does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter,),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_add,
supports_out=False),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True),
OpInfo('hypot',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hypot,
),
OpInfo('histogram',
dtypes=_dispatch_dtypes(), # histogram is only implemented on CPU
dtypesIfCPU=floating_types(),
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
SkipInfo('TestJit', 'test_variant_consistency_jit'),),),
OpInfo('cat',
ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),
aliases=('concat',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cat_concat,
supports_forward_ad=True,
assert_autodiffed=True,
skips=(
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
SkipInfo('TestJit', 'test_jit_alias_remapping'),)),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
SkipInfo('TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
check_batched_gradgrad=False,
skips=(
# torch.unfold does not exist so we get a RuntimeError.
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
# Skip operator schema test because this is a functional and not an operator
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
check_batched_gradgrad=False,
skips=(
# msort does not correctly warn when resizing out= inputs.
SkipInfo('TestCommon', 'test_out',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# torch.repeat does not exist so we get a RuntimeError.
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_repeat_tile),
OpInfo('squeeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_squeeze),
OpInfo('fill_',
op=lambda x, scalar: torch.fill_(x.clone(), scalar),
method_variant=None,
inplace_variant=torch.Tensor.fill_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_fill_),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=None,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_cumulative_trapezoid),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=sample_unsqueeze),
OpInfo('var',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: revisit, some var signatures do support out (see std, too)
supports_out=False,
assert_autodiffed=True,
),
OpInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_xlogy),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('special.xlog1py',
aten_name='special_xlog1py',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_xlog1py),
OpInfo('special.zeta',
aten_name='special_zeta',
dtypes=all_types_and(torch.bool),
supports_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_binary_pwise),
# OpInfo entry to verify the gradient formula of `other`/`q`
OpInfo('special.zeta',
op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
aten_name='special_zeta',
variant_test_name='grad',
dtypes=all_types_and(torch.bool),
supports_autograd=True,
safe_casts_outputs=True,
skips=(
# Lambda doesn't work in JIT test
SkipInfo("TestJit", "test_variant_consistency_jit"),
),
sample_inputs_func=sample_inputs_zeta),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
aliases=('swapdims', 'swapaxes'),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kron),
OpInfo('inner',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Currently failing due to an INTERNAL_ASSERT_FAILED error.
# Reference: https://github.com/pytorch/pytorch/issues/56314
SkipInfo("TestJit", "test_variant_consistency_jit", dtypes=[torch.float32]),
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)
),
OpInfo('logcumsumexp',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', ),
ref=reference_sigmoid if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# TODO: FIXME: sigmoid fails on complex inputs that require grad
SkipInfo('TestCommon', 'test_dtypes'),
# Reference: https://github.com/pytorch/pytorch/issues/56012
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.complex64]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.complex64]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('special.entr',
ref=scipy.special.entr if TEST_SCIPY else _NOTHING,
aten_name='special_entr',
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-1,
torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_entr),
UnaryUfuncInfo('special.ndtri',
ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aten_name='special_ndtri',
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else _NOTHING,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
)),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else _NOTHING,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
safe_casts_outputs=True),
OpInfo(
'logdet',
supports_out=False,
sample_inputs_func=sample_inputs_logdet,
decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True),
UnaryUfuncInfo('logit',
ref=scipy.special.logit if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit,
safe_casts_outputs=True),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other: torch.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
supports_out=False,
skips=(
# test does not work with passing lambda for op
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo('norm',
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
)
),
OpInfo('norm',
variant_test_name='nuc',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
variant_test_name='fro',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
variant_test_name='inf',
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# following 3 tests failed intermittenly
SkipInfo('TestJit', 'test_variant_consistency_jit',
device_type='cpu', dtypes=(torch.complex64,)),
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cpu', dtypes=(torch.complex128,)),
SkipInfo('TestGradients', 'test_fn_gradgrad',
device_type='cpu', dtypes=(torch.complex128,)),
)
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,),
UnaryUfuncInfo('special.erfcx',
ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,
aten_name='special_erfcx',
decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.softplus",
ref=reference_softplus,
sample_inputs_func=sample_inputs_softplus,
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
skips=(
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
),
OpInfo(
"nn.functional.mse_loss",
ref=reference_mse_loss,
sample_inputs_func=sample_inputs_mse_loss,
supports_out=False,
dtypesIfCPU=floating_types_and(torch.float16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"nn.functional.grid_sample",
ref=_NOTHING,
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
ReductionOpInfo(
'all',
identity=True,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: does not support passing keepdim without dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
SkipInfo('TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: does not support passing keepdim without dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
SkipInfo('TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=lambda a, dim=None, keepdim=False, **kwargs: np.amax(a, axis=dim, keepdims=keepdim, **kwargs),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=lambda a, dim=None, keepdim=False, **kwargs: np.amin(a, axis=dim, keepdims=keepdim, **kwargs),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
SkipInfo('TestReductions', 'test_dim_single_keepdim'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
SkipInfo('TestReductions', 'test_dim_multi_keepdim'),
SkipInfo('TestReductions', 'test_dim_multi_unsorted_keepdim'),
SkipInfo('TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
SkipInfo('TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
supports_out=False,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_prod,
skips=(
# FIXME: prod does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: sum does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=False,
promotes_int_to_int64=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: nansum does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: nansum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: nansum does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
OpInfo(
"nn.functional.nll_loss",
ref=_NOTHING,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
]
# Common operator groupings
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse is True]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]
# TODO: review porting these to make_tensor
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove
# these from here
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.triu(offset).nonzero().to(dtype).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
# TODO: move into common_utils.py or the test suite(s) that use this
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
pass
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
# TODO: move into common_utils.py or the test suite(s) that use this
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
if arg.tensor.dtype == torch.float:
return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))
if arg.tensor.dtype == torch.cfloat:
return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
| 45.807884 | 131 | 0.598241 |
ace4fa32b62feebaaa6043bc03d12e08cad4eef8 | 388 | py | Python | src/twitchy/__init__.py | CodeSpent/Twitchy | 82a46b5fa1355ce46f29b8eb397d748a4f4cdb2a | [
"MIT"
] | 2 | 2021-02-28T03:06:23.000Z | 2021-08-23T01:55:19.000Z | src/twitchy/__init__.py | CodeSpent/twitch-helix | 82a46b5fa1355ce46f29b8eb397d748a4f4cdb2a | [
"MIT"
] | 5 | 2020-10-13T05:58:48.000Z | 2020-11-25T19:34:48.000Z | src/twitchy/__init__.py | CodeSpent/Twitchy | 82a46b5fa1355ce46f29b8eb397d748a4f4cdb2a | [
"MIT"
] | 3 | 2020-10-21T18:33:16.000Z | 2020-11-24T23:42:03.000Z | # -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
from .api import Helix
try:
# Change here if project is renamed and does not equal the package name
dist_name = "twitchy"
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
| 27.714286 | 75 | 0.762887 |
ace4fa4b23343dc765a63dba20b5bd44a6ddce7f | 1,332 | py | Python | src/test/update_test.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 76 | 2020-04-03T01:21:47.000Z | 2021-12-06T02:54:53.000Z | src/test/update_test.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 7 | 2020-04-06T04:44:10.000Z | 2021-05-17T12:38:15.000Z | src/test/update_test.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 2 | 2020-10-23T10:25:16.000Z | 2020-10-28T14:16:57.000Z | import json
from nebulo.gql.relay.node_interface import NodeIdStructure
SQL_UP = """
CREATE TABLE account (
id serial primary key,
name text not null,
created_at timestamp without time zone default (now() at time zone 'utc')
);
INSERT INTO account (id, name) VALUES
(1, 'oliver'),
(2, 'rachel'),
(3, 'sophie');
"""
def test_update_mutation(client_builder):
client = client_builder(SQL_UP)
account_id = 1
node_id = NodeIdStructure(table_name="account", values={"id": account_id}).serialize()
query = f"""
mutation {{
updateAccount(input: {{
clientMutationId: "gjwl"
nodeId: "{node_id}"
account: {{
name: "Buddy"
}}
}}) {{
clientMutationId
account {{
dd: id
nodeId
name
}}
}}
}}
"""
with client:
resp = client.post("/", json={"query": query})
assert resp.status_code == 200
payload = json.loads(resp.text)
assert isinstance(payload["data"]["updateAccount"]["account"], dict)
assert payload["data"]["updateAccount"]["account"]["dd"] == account_id
assert payload["data"]["updateAccount"]["account"]["nodeId"] == node_id
assert payload["data"]["updateAccount"]["account"]["name"] == "Buddy"
assert payload["data"]["updateAccount"]["clientMutationId"] == "gjwl"
assert len(payload["errors"]) == 0
| 25.132075 | 90 | 0.630631 |
ace4fa6e64891245379ce70ed859560bd47f1b29 | 778 | py | Python | sa/interfaces/igetobjectstatus.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/interfaces/igetobjectstatus.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/interfaces/igetobjectstatus.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# IGetObjectStatus interface
# ---------------------------------------------------------------------
# Copyright (C) 2007-2010 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC Modules
from noc.core.interface.base import BaseInterface
from .base import ListOfParameter, DictParameter, StringParameter, BooleanParameter
class IGetObjectStatus(BaseInterface):
returns = ListOfParameter(
element=DictParameter(attrs={"name": StringParameter(), "status": BooleanParameter()})
)
preview = "NOC.sa.managedobject.scripts.ShowObjectStatus"
| 35.363636 | 94 | 0.552699 |
ace4fb888dfee80b7d25a4f42eed14be64a1bbee | 3,817 | py | Python | views/ShowTreeView.py | leandromaia/mvc_by_python | c87b77f881ae49356047a378161218545163d00d | [
"MIT"
] | 1 | 2022-01-13T15:14:38.000Z | 2022-01-13T15:14:38.000Z | views/ShowTreeView.py | leandromaia/mvc_by_python | c87b77f881ae49356047a378161218545163d00d | [
"MIT"
] | null | null | null | views/ShowTreeView.py | leandromaia/mvc_by_python | c87b77f881ae49356047a378161218545163d00d | [
"MIT"
] | 2 | 2021-10-19T23:37:36.000Z | 2021-11-30T18:54:42.000Z | import tkinter as tk
from tkinter import ttk
from views.View import View
class ShowTreeView(tk.Tk, View):
'''View responsible for showing registered customers'''
PAD = 10
COLUMN_WIDTH = 200
THEADER = [
"Id", "First name", "Last name", "Zipcode", "Price paid"
]
def __init__(self, controller):
'''Controller of this view'''
super().__init__()
self.title("Show Customers")
self.showTreeViewController = controller
self._make_mainFrame()
self._make_title()
self._show_customers()
def _make_mainFrame(self):
'''Creates view's frame'''
self.frame_main = ttk.Frame(self)
self.frame_main.pack(padx=self.PAD, pady=self.PAD)
def _make_title(self):
'''Sets view's title'''
title = ttk.Label(self.frame_main, text="Customers Manager - Show", font=("Helvetica", 20))
title.pack(padx=self.PAD, pady=self.PAD)
def update(self):
'''Refreshes view'''
self.frame_customers.destroy()
self._show_customers()
def _contextMenu_display(self, event):
'''Displays view's context menu'''
self.contextMenu = tk.Menu(self.frame_main, tearoff=0)
self.contextMenu.add_command(label="Edit", command=lambda: self.showTreeViewController.btnEdit(self.contextMenu_selectedId))
self.contextMenu.add_command(label="Delete", command=self.showTreeViewController.btnDel)
# Take data from the row that was clicked
# Ex: tv.item(data) => {'text': 1, 'image': '', 'values': ['name', 'lastname', 3213, '321.00'], 'open': 0, 'tags': ''}
rowSelected = self.tv.identify_row(event.y)
# Check if some data was taken
if rowSelected:
# Take data selected and put them in a list
self.contextMenu_selectedId = self.tv.item(rowSelected)['text']
# Let the row that was clicked as selected
self.tv.focus(rowSelected)
self.tv.selection_set(rowSelected)
# Open context menu
self.contextMenu.selection = self.tv.set(rowSelected)
self.contextMenu.post(event.x_root, event.y_root)
def _show_customers(self):
'''Displays customers on screen'''
customers = self.showTreeViewController.getCustomers()
self.frame_customers = tk.Frame(self.frame_main)
self.frame_customers.pack(fill="x")
frame_customersView = tk.Frame(self.frame_customers)
frame_customersView.pack()
# Create TreeView widget
self.tv = ttk.Treeview(frame_customersView)
# Create columns and name them (so them can be referenced)
self.tv['columns'] = self.THEADER[1:]
# Put columns info
# Header
self.tv.heading("#0", text=self.THEADER[0], anchor=tk.W)
self.tv.column("#0", anchor=tk.W, width=100)
for i in range(1, len(self.THEADER)):
self.tv.heading(self.THEADER[i], text=self.THEADER[i])
self.tv.column(self.THEADER[i], anchor="center", width=self.COLUMN_WIDTH)
# Data
for customer in customers:
self.tv.insert("", tk.END, text=customer[0], values=customer[1:])
# Put tree view on frame
self.tv.grid(sticky=(tk.N, tk.S, tk.W, tk.E))
self.tv.grid_rowconfigure(0, weight=1)
self.tv.grid_columnconfigure(0, weight=1)
# Add listener for enable the context menu
self.tv.bind("<Button-3>", self._contextMenu_display)
btn = ttk.Button(self.frame_customers, text="Update data", command=self.update)
btn.pack()
def main(self):
self.mainloop()
def close(self):
return
| 35.342593 | 132 | 0.605973 |
ace4fbbfd7bd21345f0f4258a9fc898d21f8c6fc | 3,932 | py | Python | restricted_boltzmann_machine/Unsupervised.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | null | null | null | restricted_boltzmann_machine/Unsupervised.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | null | null | null | restricted_boltzmann_machine/Unsupervised.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | 1 | 2019-09-29T12:47:32.000Z | 2019-09-29T12:47:32.000Z | #!/usr/bin/python
'''
Learning Machines
Taught by Patrick Hebron at NYU ITP
Restricted Boltzmann Machine (RBM) implementation.
'''
import numpy as np
import Activation
from MnistReporter import *
import time
class Rbm:
def __init__(self, name, sizeV, sizeH, continuous = False):
self.name = name
self.is_crbm = continuous
# Initialize weights:
self.weights = np.array( np.random.uniform( -1.0 / sizeV, 1.0 / sizeV, ( sizeV, sizeH ) ) )
# Initialize biases:
self.biasH = np.zeros( sizeH )
self.biasV = np.zeros( sizeV )
def getErrorRate(self, samples, reconstructions):
'''returns mean square error'''
return np.mean( np.square( samples - reconstructions ) )
def trainEpoch(self, training_samples, learn_rate, cd_steps, batch_size):
error = 0.0
num_rows = training_samples.shape[ 0 ]
# Iterate over each training batch:
for bstart in range( 0, num_rows, batch_size ):
# Compute batch stop index:
bstop = min( bstart + batch_size, num_rows )
# Compute batch size:
bsize = bstop - bstart
# Compute batch multiplier:
bmult = learn_rate * ( 1.0 / float( bsize ) )
# Slice data:
bsamples = training_samples[ bstart:bstop, : ]
# Get hidden activations and samples:
aH_0, sH_0 = self.getHiddenSample( bsamples )
# Perform each contrastive divergence step:
for i in range( cd_steps ):
aV_inf, sV_inf, aH_inf, sH_inf = self.getGibbsHvh( ( sH_0 if i == 0 else sH_inf ) )
# Update weights:
self.weights += bmult * ( np.dot( bsamples.T, aH_0 ) - np.dot( sV_inf.T, aH_inf ) )
# Update biases:
self.biasV += bmult * np.mean( bsamples - sV_inf, axis = 0 )
self.biasH += bmult * np.mean( aH_0 - aH_inf, axis = 0 )
# Scale batch error and accumulate total:
error += self.getErrorRate( bsamples, sV_inf ) * ( float( bsize ) / float( num_rows ) )
# Return training error:
return error
def train(self, training_samples, validation_samples, learn_rate, cd_steps, epochs, batch_size = 10, report_freq = 10, report_buff = 100):
# Setup error reporter:
error_reporter = MnistUnsupervisedReporter( self.name, report_freq, report_buff )
# Perform each training epoch:
for epoch in range( epochs ):
training_error = self.trainEpoch( training_samples, learn_rate, cd_steps, batch_size )
# Report error, if applicable:
if ( epoch + 1 ) % report_freq == 0:
# Compute validation error:
val_aH, val_sH, val_aV, val_sV = self.getGibbsVhv( validation_samples )
validation_error = self.getErrorRate( validation_samples, val_sV )
# Update error reporter:
error_reporter.update( epoch, training_error, validation_error, validation_samples, val_sV )
# Save final training visualization to image:
error_reporter.saveImage( 'report_' + str(time.time()) + '_' + self.name + '_training.png' )
def getHiddenActivations(self, inputV):
return Activation.sigmoid( np.dot( inputV, self.weights ) + self.biasH )
def getVisibleActivations(self, inputH):
return Activation.sigmoid( np.dot( inputH, self.weights.T ) + self.biasV )
def getHiddenSample(self, inputV, force_binomial = False):
aH = self.getHiddenActivations( inputV )
return [ aH, self.getSample( aH ) if ( force_binomial or not self.is_crbm ) else aH ]
def getVisibleSample(self, inputH, force_binomial = False):
aV = self.getVisibleActivations( inputH )
return [ aV, self.getSample( aV ) if ( force_binomial or not self.is_crbm ) else aV ]
def getGibbsHvh(self, inputH, force_binomial = False):
aV, sV = self.getVisibleSample( inputH, force_binomial )
aH, sH = self.getHiddenSample( sV, force_binomial )
return [ aV, sV, aH, sH ]
def getGibbsVhv(self, inputV, force_binomial = False):
aH, sH = self.getHiddenSample( inputV, force_binomial )
aV, sV = self.getVisibleSample( sH, force_binomial )
return [ aH, sH, aV, sV ]
@staticmethod
def getSample(activations):
return np.random.binomial( 1, activations, activations.shape )
| 39.32 | 139 | 0.708291 |
ace4fccafa47121d9ae0f5147770e4ef7bd114f1 | 852 | py | Python | content/utils/models/genre.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | content/utils/models/genre.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | content/utils/models/genre.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | """
Created: 22 Apr. 2020
Author: Jordan Prechac
"""
from content.models import Genre
from content.utils.types import _validate_list
# -----------------------------------------------------------------------------
def add_genres_to_object(genres, obj, *args, **kwargs):
genres = _validate_list(genres)
genre_objects = [Genre.objects.get_or_create(x) for x in genres]
obj.genres.add(*genre_objects)
return True
def remove_genres_from_object(genres, obj, *args, **kwargs):
genres = _validate_list(genres)
genre_objects = [Genre.objects.get_or_create(x) for x in genres]
obj.genres.remove(*genre_objects)
return True
def set_genres_on_object(genres, obj, *args, **kwargs):
genres = _validate_list(genres)
genre_objects = [Genre.objects.get_or_create(x) for x in genres]
obj.genres.set(*genre_objects)
| 25.818182 | 79 | 0.661972 |
ace4fcd6adc892827b3a83c0c6e49f733e715ba0 | 6,758 | py | Python | preproc/defs.py | dlesbre/preprocessor | 097591e508bc44e2a2421707b6f32e7e09f53113 | [
"MIT"
] | 1 | 2020-12-30T19:30:42.000Z | 2020-12-30T19:30:42.000Z | preproc/defs.py | Lesbre/preprocessor | 521070f57f2f6be0717ce605d9dcee0e4fa91fae | [
"MIT"
] | null | null | null | preproc/defs.py | Lesbre/preprocessor | 521070f57f2f6be0717ce605d9dcee0e4fa91fae | [
"MIT"
] | null | null | null | """
This module contains various definitions and constants used by the preprocessor
namely:
- class ArgumentParserNoExit(argparse.ArgumentParser)
which raises an error rather than exit.
- class Position to represent position to command
- enum WarningMode to configure the Preprocessor
- function trim to pretty-print docstrings
- function process_string to process read string ("\\n" into newline)
- functions is_integer or to_integer to get ints from strings
- function get_identifier_name to find the first identifier in a string
"""
import argparse
import enum
import re
from typing import Tuple
PREPROCESSOR_NAME = "preproc"
PREPROCESSOR_VERSION = "0.0.1"
REGEX_IDENTIFIER: str = "[_a-zA-Z][_a-zA-Z0-9]*"
REGEX_IDENTIFIER_WRAPPED: str = "(^|(?<=([^_a-zA-Z0-9]))){}((?=([^_a-zA-Z0-9]))|$)"
REGEX_IDENTIFIER_END: str = "$|[^_a-zA-Z0-9]"
REGEX_IDENTIFIER_BEGIN: str = "^|[^_a-zA-Z]"
REGEX_STRING: str = '""|".*?[^\\\\]"'
REGEX_INTEGER: str = r"-?\ *[0-9]+(?:[_0-9]*[0-9])?"
class Position:
"""represents a position to a command
#1{% #2cmd#3 args#4 %}#5...#6{% endcmd %}#7
- #1 - begin
- #2 - cmd_begin
- #3 - cmd_argbegin
- #4 - cmd_end
- #5 - end
#6 and #7 values are meaningless if not a block
- #6 - endblock_begin
- #7 - endblock_end
these values are relative to the start of the source file
being scanned. For values relative to the start of the string
use relative_begin, relative_end...
offset represents the offset between current string and source"""
offset: int = 0
begin: int = 0
end: int = 0
cmd_begin: int = 0
cmd_end: int = 0
cmd_argbegin: int = 0
endblock_begin: int = 0
endblock_end: int = 0
def to_relative(self: "Position", value: int) -> int:
"""transform a value relative to source into one relative to current string"""
return value - self.offset
def from_relative(self: "Position", value: int) -> int:
"""transform a value relative to current string into one relative to source"""
return value + self.offset
relative_begin: property = property(
lambda self: self.to_relative(self.begin),
lambda self, value: setattr(self, "begin", self.from_relative(value)),
doc="same as begin, but relative to start of current parsed string\n"
"(begin is relative to start of file)"
)
relative_end: property = property(
lambda self: self.to_relative(self.end),
lambda self, value: setattr(self, "end", self.from_relative(value)),
doc="same as end, but relative to start of current parsed string\n"
"(end is relative to start of file)"
)
relative_cmd_begin: property = property(
lambda self: self.to_relative(self.cmd_begin),
lambda self, value: setattr(self, "cmd_begin", self.from_relative(value)),
doc="same as cmd_begin, but relative to start of current parsed string\n"
"(cmd_begin is relative to start of file)"
)
relative_cmd_end: property = property(
lambda self: self.to_relative(self.cmd_end),
lambda self, value: setattr(self, "cmd_end", self.from_relative(value)),
doc="same as cmd_end, but relative to start of current parsed string\n"
"(cmd_end is relative to start of file)"
)
relative_cmd_argbegin: property = property(
lambda self: self.to_relative(self.cmd_argbegin),
lambda self, value: setattr(self, "cmd_argbegin", self.from_relative(value)),
doc="same as cmd_argbegin, but relative to start of current parsed string\n"
"(cmd_argbegin is relative to start of file)"
)
relative_endblock_begin: property = property(
lambda self: self.to_relative(self.endblock_begin),
lambda self, value: setattr(self, "endblock_begin", self.from_relative(value)),
doc="same as endblock_begin, but relative to start of current parsed string\n"
"(endblock_begin is relative to start of file)"
)
relative_endblock_end: property = property(
lambda self: self.to_relative(self.endblock_end),
lambda self, value: setattr(self, "endblock_end", self.from_relative(value)),
doc="same as endblock_end, but relative to start of current parsed string\n"
"(endblock_end is relative to start of file)"
)
def copy(self:"Position") -> "Position":
"""creates an independent copy"""
new = Position()
new.offset = self.offset
new.begin = self.begin
new.end = self.end
new.cmd_begin = self.cmd_begin
new.cmd_end = self.cmd_end
new.cmd_argbegin = self.cmd_argbegin
new.endblock_begin = self.endblock_begin
new.endblock_end = self.endblock_end
return new
@enum.unique
class TokenMatch(enum.IntEnum):
"""Used to represent Open/Closed tokens"""
OPEN = 0
CLOSE = 1
def process_string(string: str) -> str:
"""Change escape sequences to the chars they match
ex: process_string("\\\\n") -> "\\n\""""
replacements = [
("\\\\", "\x00"),
("\\n", "\n"),
("\\t", "\t"),
("\\r", "\r"),
('\\"', '\"'),
("\\'", "'"),
("\x00", "\\"),
]
for search, replace in replacements:
string = string.replace(search, replace)
return string
class ArgumentParserNoExit(argparse.ArgumentParser):
"""subclass of argparse.ArgumentParser which
raises an error rather than exiting when parsing fails"""
def error(self, message):
raise argparse.ArgumentError(None, message)
def get_identifier_name(string: str) -> Tuple[str, str, int]:
"""finds the first identifier in string:
Returns:
tuple str, str, int - identifier, rest_of_string, start_of_rest_of_string
returns ("","", -1) if None found"""
match = re.match(
r"\s*({})({}.*$)".format(REGEX_IDENTIFIER, REGEX_IDENTIFIER_END),
string, re.DOTALL
)
if match is None:
return ("", "", -1)
return match.group(1), match.group(2), match.start(2)
def is_integer(string: str) -> bool:
"""returns True if string can safely be converted
to a integer with to_integer(string)"""
return re.match(REGEX_INTEGER, string.strip()) is not None
def to_integer(string: str) -> int:
"""converts string to integer"""
return int(string.strip().replace(" ", "").replace("_", ""))
def trim(docstring: str) -> str:
"""trim a docstring before display"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = 1000 # a large integer
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < 1000:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
| 33.959799 | 83 | 0.699171 |
ace4fd69647e23d202dd68630f1ba6eae946a348 | 1,105 | py | Python | project/program/forms/presenter_form.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 7 | 2018-10-09T19:14:37.000Z | 2019-11-25T13:43:38.000Z | project/program/forms/presenter_form.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 16 | 2018-11-01T21:42:17.000Z | 2019-03-10T16:59:25.000Z | project/program/forms/presenter_form.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 5 | 2018-10-28T17:33:06.000Z | 2018-11-22T00:12:55.000Z | from django import forms
from django.utils.translation import ugettext_lazy as _
from parler.admin import TranslatableModelForm
from ..models import Activity, Presenter
CREATE_ACTIVITY_CHOICES = (
('', _('No')), # Add No as option
) + Activity.TYPE_CHOICES
class PresenterModelForm(TranslatableModelForm):
'''
Custom admin form for Presenters.
Adds the option to create empty unpublished activity of given type.
'''
create_empty_activity = forms.ChoiceField(
required=False,
label=_('Assign empty activity'),
help_text=_('This will create an empty and unpublished ' \
'activity of the given type with the same ' \
'photo as the presenter\'s'),
choices=CREATE_ACTIVITY_CHOICES,
)
class Meta:
model = Presenter
fields = (
'first',
'last',
'occupation',
'short_bio',
'quote',
'image',
'image_shadows',
'link',
'create_empty_activity',
'is_published',
)
| 26.309524 | 71 | 0.58914 |
ace4fe1adcf87182b2b2aa10508f7fcc0dbfa568 | 4,308 | py | Python | python_modules/dagster/dagster_tests/core_tests/storage_tests/test_event_log.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/storage_tests/test_event_log.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/storage_tests/test_event_log.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | null | null | null | import time
import pytest
from dagster import seven
from dagster.core.events import DagsterEvent, DagsterEventType, EngineEventData
from dagster.core.events.log import DagsterEventRecord
from dagster.core.storage.event_log import (
CREATE_EVENT_LOG_SQL,
INSERT_EVENT_SQL,
EventLogInvalidForRun,
InMemoryEventLogStorage,
SqliteEventLogStorage,
)
def test_in_memory_event_log_storage_run_not_found():
storage = InMemoryEventLogStorage()
assert storage.get_logs_for_run('bar') == []
def test_in_memory_event_log_storage_store_events_and_wipe():
storage = InMemoryEventLogStorage()
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run('foo')) == 1
storage.wipe()
assert len(storage.get_logs_for_run('foo')) == 0
def test_filesystem_event_log_storage_run_not_found():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
assert storage.get_logs_for_run('bar') == []
def test_filesystem_event_log_storage_store_events_and_wipe():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run('foo')) == 1
storage.wipe()
assert len(storage.get_logs_for_run('foo')) == 0
def test_event_log_delete():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run('foo')) == 1
storage.delete_events('foo')
assert len(storage.get_logs_for_run('foo')) == 0
def test_filesystem_event_log_storage_run_corrupted():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
with open(storage.filepath_for_run_id('foo'), 'w') as fd:
fd.write('some nonsense')
with pytest.raises(EventLogInvalidForRun) as exc:
storage.get_logs_for_run('foo')
assert exc.value.run_id == 'foo'
def test_filesystem_event_log_storage_run_corrupted_bad_data():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
with storage._connect('foo') as conn: # pylint: disable=protected-access
conn.cursor().execute(CREATE_EVENT_LOG_SQL)
conn.cursor().execute(INSERT_EVENT_SQL, ('{bar}', None, None))
with pytest.raises(EventLogInvalidForRun) as exc:
storage.get_logs_for_run('foo')
assert exc.value.run_id == 'foo'
with storage._connect('bar') as conn: # pylint: disable=protected-access
conn.cursor().execute(CREATE_EVENT_LOG_SQL)
conn.cursor().execute(INSERT_EVENT_SQL, ('3', None, None))
with pytest.raises(EventLogInvalidForRun) as exc:
storage.get_logs_for_run('bar')
assert exc.value.run_id == 'bar'
| 34.464 | 81 | 0.618152 |
ace4fe9861781b0074fb60a62313205d7c87af3b | 18,687 | py | Python | lib/model/rpn/rpn_distil_cifrcn.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | 1 | 2022-03-11T03:08:04.000Z | 2022-03-11T03:08:04.000Z | lib/model/rpn/rpn_distil_cifrcn.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | 3 | 2022-03-07T03:04:34.000Z | 2022-03-25T12:28:09.000Z | lib/model/rpn/rpn_distil_cifrcn.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | null | null | null | from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from model.utils.config import cfg
from .proposal_layer import _ProposalLayer
from .anchor_target_layer import _AnchorTargetLayer
from model.utils.net_utils import _smooth_l1_loss
import numpy as np
import math
import pdb
import time
#from model.faster_rcnn.discriminator import Discriminator
class _RPN_distil(nn.Module):
""" region proposal network """
def __init__(self, din):
super(_RPN_distil, self).__init__()
self.din = din # get depth of input feature map, e.g., 512
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feat_stride = cfg.FEAT_STRIDE[0]
# define the convrelu layers processing input feature map
self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
# define bg/fg classifcation score layer
self.nc_score_out = len(self.anchor_scales) * len(self.anchor_ratios) * 2 # 2(bg/fg) * 9 (anchors)
self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
# define anchor box offset prediction layer
self.nc_bbox_out = len(self.anchor_scales) * len(self.anchor_ratios) * 4 # 4(coords) * 9 (anchors)
self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)
# define proposal layer
self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
# define anchor target layer
self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.rpn_loss_cls = torch.Tensor([0]).cuda()
self.rpn_loss_box = torch.Tensor([0]).cuda()
@staticmethod
def reshape(x, d):
input_shape = x.size()
x = x.view(
input_shape[0],
int(d),
int(float(input_shape[1] * input_shape[2]) / float(d)),
input_shape[3]
)
return x
def forward(self, base_feat, im_info, gt_boxes, num_boxes, fasterRCNN_org=None):
batch_size = base_feat.size(0)
# return feature map after convrelu layer
rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
# get rpn classification score
rpn_cls_score = self.RPN_cls_score(rpn_conv1)
rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)
rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)
# get rpn offsets to the anchor boxes
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)
'''
#################### cat old and new rpn pred #############################
rpn_cls_prob=torch.cat((rpn_cls_prob_ori,rpn_cls_prob),dim=1)
rpn_bbox_pred = torch.cat((rpn_bbox_pred_ori, rpn_bbox_pred), dim=1)
rpn_cls_score = torch.cat((rpn_cls_score_ori,rpn_cls_score),dim=1)
rpn_cls_score_reshape = torch.cat((rpn_cls_score_reshape_ori, rpn_cls_score_reshape), dim=1)
###########################################################################
'''
# proposal layer
cfg_key = 'TRAIN' if self.training else 'TEST'
rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data,
im_info, cfg_key))
self.rpn_loss_cls = torch.Tensor([0]).cuda()
self.rpn_loss_box = torch.Tensor([0]).cuda()
# generating training labels and build the rpn loss
if self.training:
assert gt_boxes is not None
rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))
# compute classification loss
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
rpn_label = rpn_data[0].view(batch_size, -1)
rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))
rpn_cls_score = torch.index_select(rpn_cls_score.view(-1,2), 0, rpn_keep)
rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)
rpn_label = Variable(rpn_label.long())
self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
fg_cnt = torch.sum(rpn_label.data.ne(0))
rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
# compute bbox regression loss
rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
rpn_bbox_targets = Variable(rpn_bbox_targets)
self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3, dim=[1,2,3])
if fasterRCNN_org:
################# ori fasterrcnn #####################################################
rpn_conv1_ori = F.relu(fasterRCNN_org.RCNN_rpn.RPN_Conv(base_feat), inplace=True)
'''
rpn_cls_score_ori = fasterRCNN_org.RCNN_rpn.RPN_cls_score(rpn_conv1_ori)
rpn_cls_score_reshape_ori = fasterRCNN_org.RCNN_rpn.reshape(rpn_cls_score_ori, 2)
rpn_cls_prob_reshape_ori = F.softmax(rpn_cls_score_reshape_ori, 1)
rpn_cls_prob_ori = fasterRCNN_org.RCNN_rpn.reshape(rpn_cls_prob_reshape_ori, fasterRCNN_org.RCNN_rpn.nc_score_out)
rpn_bbox_pred_ori = fasterRCNN_org.RCNN_rpn.RPN_bbox_pred(rpn_conv1_ori)
cfg_key_ori = 'TRAIN'
rois_ori = fasterRCNN_org.RCNN_rpn.RPN_proposal((rpn_cls_prob_ori.data, rpn_bbox_pred_ori.data,
im_info, cfg_key_ori))
############################################################################################
############################## distil rpn cls and bbox reg loss ##########################################
rpn_data_ori = fasterRCNN_org.RCNN_rpn.RPN_anchor_target((rpn_cls_score_ori.data, gt_boxes, im_info, num_boxes))
rpn_cls_score_ori = rpn_cls_score_reshape_ori.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
rpn_label_ori = rpn_data_ori[0].view(batch_size, -1)
rpn_keep_ori = Variable(rpn_label_ori.view(-1).ne(-1).nonzero().view(-1))
rpn_cls_score_ori = torch.index_select(rpn_cls_score_ori.view(-1, 2), 0, rpn_keep_ori)
################### distillation loss #################
l1_loss_fn = torch.nn.MSELoss(reduce=True, size_average=True) ###L1Loss
# rcnn_cls_distil_loss=l1_loss_fn(cls_score_remove_add_cls,cls_score_org_rcnn) ### L2 loss
##### ce loss
#cls_prob_org_rcnn = F.softmax(cls_score_org_rcnn, 1)
rpn_cls_pred_ori = rpn_cls_score_ori.argmax(dim=1, keepdim=True).view(-1)
self.rpn_cls_distil_loss = F.cross_entropy(rpn_cls_score, rpn_cls_pred_ori) ### cross_entropy
#cls_preb_org_rcnn = F.softmax(cls_score_org_rcnn / T, 1)
#rcnn_cls_distil_loss = alpha * SoftCrossEntropy(cls_score_remove_add_cls / T, cls_preb_org_rcnn,
# reduction='average')
self.rpn_bbox_distil_loss = l1_loss_fn(rpn_bbox_pred_ori, rpn_bbox_pred) ### l1 loss
#######################################################################################
'''
# ################################# distil rpn_conv loss ################################
# rpn_conv1_fea = rpn_conv1.squeeze(dim=0) # .mul(base_feat.squeeze(dim=0))
# rpn_conv1_ori_fea = rpn_conv1_ori.squeeze(dim=0) # .mul(base_feat_org.squeeze(dim=0))
# rpn_conv1_fea_att_sum_c = torch.mean(rpn_conv1_fea, dim=0) # /base_feat.shape[1]
# rpn_conv1_ori_fea_att_sum_c = torch.mean(rpn_conv1_ori_fea, dim=0) # /base_feat_org.shape[1]
# rpn_conv1_norm = torch.norm(rpn_conv1_fea_att_sum_c, p=2, keepdim=True)
# rpn_conv1_org_norm = torch.norm(rpn_conv1_ori_fea_att_sum_c, p=2, keepdim=True)
# # base_fea_norm = base_fea_att_sum_c/torch.norm(base_fea_att_sum_c, p=2, keepdim=True)
# # base_fea_org_norm = base_fea_org_att_sum_c/torch.norm(base_fea_org_att_sum_c, p=2, keepdim=True)
l1_loss_fn = torch.nn.MSELoss(reduce=True, size_average=True)
self.rpn_conv1_distil_loss = l1_loss_fn(rpn_conv1, rpn_conv1_ori)
#######################################################################################
return rois, self.rpn_loss_cls, self.rpn_loss_box,self.rpn_conv1_distil_loss#,self.rpn_cls_distil_loss,self.rpn_bbox_distil_loss
return rois, self.rpn_loss_cls, self.rpn_loss_box
class _RPN_distil_residual(nn.Module):
""" region proposal network """
def __init__(self, din):
super(_RPN_distil_residual, self).__init__()
self.din = din # get depth of input feature map, e.g., 512
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feat_stride = cfg.FEAT_STRIDE[0]
# define the convrelu layers processing input feature map
self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
# define bg/fg classifcation score layer
self.nc_score_out = len(self.anchor_scales) * len(self.anchor_ratios) * 2 # 2(bg/fg) * 9 (anchors)
self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
# define anchor box offset prediction layer
self.nc_bbox_out = len(self.anchor_scales) * len(self.anchor_ratios) * 4 # 4(coords) * 9 (anchors)
self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)
# define proposal layer
self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
# define anchor target layer
self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
@staticmethod
def reshape(x, d):
input_shape = x.size()
x = x.view(
input_shape[0],
int(d),
int(float(input_shape[1] * input_shape[2]) / float(d)),
input_shape[3]
)
return x
def forward(self, base_feat, im_info, gt_boxes, num_boxes, fasterRCNN_org=None, fasterRCNN_residual=None,base_feat_org=None,base_feat_residual=None,base_feat_inc=None):
batch_size = base_feat.size(0)
# return feature map after convrelu layer
rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
# get rpn classification score
rpn_cls_score = self.RPN_cls_score(rpn_conv1)
rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)
rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)
# get rpn offsets to the anchor boxes
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)
'''
#################### cat old and new rpn pred #############################
rpn_cls_prob=torch.cat((rpn_cls_prob_ori,rpn_cls_prob),dim=1)
rpn_bbox_pred = torch.cat((rpn_bbox_pred_ori, rpn_bbox_pred), dim=1)
rpn_cls_score = torch.cat((rpn_cls_score_ori,rpn_cls_score),dim=1)
rpn_cls_score_reshape = torch.cat((rpn_cls_score_reshape_ori, rpn_cls_score_reshape), dim=1)
###########################################################################
'''
# proposal layer
cfg_key = 'TRAIN' if self.training else 'TEST'
rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data,
im_info, cfg_key))
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
# generating training labels and build the rpn loss
if self.training:
assert gt_boxes is not None
rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))
# compute classification loss
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
rpn_label = rpn_data[0].view(batch_size, -1)
rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))
rpn_cls_score = torch.index_select(rpn_cls_score.view(-1, 2), 0, rpn_keep)
rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)
rpn_label = Variable(rpn_label.long())
self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
fg_cnt = torch.sum(rpn_label.data.ne(0))
rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
# compute bbox regression loss
rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
rpn_bbox_targets = Variable(rpn_bbox_targets)
self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3, dim=[1, 2, 3])
if fasterRCNN_org and fasterRCNN_residual:
################# ori fasterrcnn #####################################################
rpn_conv1_ori = F.relu(fasterRCNN_org.RCNN_rpn.RPN_Conv(base_feat_org), inplace=True)
rpn_conv1_residual = F.relu(fasterRCNN_residual.RCNN_rpn.RPN_Conv(base_feat_residual), inplace=True)
rpn_conv1_inc = F.relu(self.RPN_Conv(base_feat_inc), inplace=True)
'''
rpn_cls_score_ori = fasterRCNN_org.RCNN_rpn.RPN_cls_score(rpn_conv1_ori)
rpn_cls_score_reshape_ori = fasterRCNN_org.RCNN_rpn.reshape(rpn_cls_score_ori, 2)
rpn_cls_prob_reshape_ori = F.softmax(rpn_cls_score_reshape_ori, 1)
rpn_cls_prob_ori = fasterRCNN_org.RCNN_rpn.reshape(rpn_cls_prob_reshape_ori, fasterRCNN_org.RCNN_rpn.nc_score_out)
rpn_bbox_pred_ori = fasterRCNN_org.RCNN_rpn.RPN_bbox_pred(rpn_conv1_ori)
cfg_key_ori = 'TRAIN'
rois_ori = fasterRCNN_org.RCNN_rpn.RPN_proposal((rpn_cls_prob_ori.data, rpn_bbox_pred_ori.data,
im_info, cfg_key_ori))
############################################################################################
'''
'''
############################## distil rpn cls and bbox reg loss ##########################################
rpn_data_ori = fasterRCNN_org.RCNN_rpn.RPN_anchor_target((rpn_cls_score_ori.data, gt_boxes, im_info, num_boxes))
rpn_cls_score_ori = rpn_cls_score_reshape_ori.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
rpn_label_ori = rpn_data_ori[0].view(batch_size, -1)
rpn_keep_ori = Variable(rpn_label_ori.view(-1).ne(-1).nonzero().view(-1))
rpn_cls_score_ori = torch.index_select(rpn_cls_score_ori.view(-1, 2), 0, rpn_keep_ori)
################### distillation loss #################
l1_loss_fn = torch.nn.MSELoss(reduce=True, size_average=True) ###L1Loss
# rcnn_cls_distil_loss=l1_loss_fn(cls_score_remove_add_cls,cls_score_org_rcnn) ### L2 loss
##### ce loss
#cls_prob_org_rcnn = F.softmax(cls_score_org_rcnn, 1)
rpn_cls_pred_ori = rpn_cls_score_ori.argmax(dim=1, keepdim=True).view(-1)
self.rpn_cls_distil_loss = F.cross_entropy(rpn_cls_score, rpn_cls_pred_ori) ### cross_entropy
#cls_preb_org_rcnn = F.softmax(cls_score_org_rcnn / T, 1)
#rcnn_cls_distil_loss = alpha * SoftCrossEntropy(cls_score_remove_add_cls / T, cls_preb_org_rcnn,
# reduction='average')
self.rpn_bbox_distil_loss = l1_loss_fn(rpn_bbox_pred_ori, rpn_bbox_pred) ### l1 loss
#######################################################################################
'''
################################# distil rpn_conv loss ################################
rpn_conv1_fea = rpn_conv1_inc.squeeze(dim=0)#rpn_conv1 # .mul(base_feat.squeeze(dim=0))
rpn_conv1_ori_fea = rpn_conv1_ori.squeeze(dim=0) # .mul(base_feat_org.squeeze(dim=0))
rpn_conv1_res_fea=rpn_conv1_fea-rpn_conv1_ori_fea
rpn_conv1_residual_fea = rpn_conv1_residual.squeeze(dim=0) # .mul(base_feat_org.squeeze(dim=0))
#rpn_conv1_fea_att_sum_c = torch.mean(rpn_conv1_fea, dim=0) # /base_feat.shape[1]
#rpn_conv1_ori_fea_att_sum_c = torch.mean(rpn_conv1_ori_fea, dim=0) # /base_feat_org.shape[1]
rpn_conv1_res_fea_att_sum_c = torch.mean(rpn_conv1_res_fea, dim=0) # /base_feat_org.shape[1]
rpn_conv1_residual_fea_att_sum_c = torch.mean(rpn_conv1_residual_fea, dim=0) # /base_feat_org.shape[1]
#rpn_conv1_norm = torch.norm(rpn_conv1_fea_att_sum_c, p=2, keepdim=True)
#rpn_conv1_org_norm = torch.norm(rpn_conv1_ori_fea_att_sum_c, p=2, keepdim=True)
rpn_conv1_res_norm = torch.norm(rpn_conv1_res_fea_att_sum_c, p=2, keepdim=True)
rpn_conv1_residual_norm = torch.norm(rpn_conv1_residual_fea_att_sum_c, p=2, keepdim=True)
# base_fea_norm = base_fea_att_sum_c/torch.norm(base_fea_att_sum_c, p=2, keepdim=True)
# base_fea_org_norm = base_fea_org_att_sum_c/torch.norm(base_fea_org_att_sum_c, p=2, keepdim=True)
l1_loss_fn = torch.nn.L1Loss(reduce=True, size_average=True)
self.rpn_conv1_distil_loss = l1_loss_fn(rpn_conv1_res_norm, rpn_conv1_residual_norm)
#######################################################################################
return rois, self.rpn_loss_cls, self.rpn_loss_box, self.rpn_conv1_distil_loss # ,self.rpn_cls_distil_loss,self.rpn_bbox_distil_loss
return rois, self.rpn_loss_cls, self.rpn_loss_box | 55.616071 | 172 | 0.605341 |
ace4ff298e8faf29c723a3cf28356a2979159920 | 5,998 | py | Python | datasette/views/index.py | ghing/datasette | 5a184a5d211d3226e0417ee5cf8476cd887cd35e | [
"Apache-2.0"
] | 2 | 2020-01-05T20:59:16.000Z | 2020-08-27T06:25:43.000Z | datasette/views/index.py | jsfenfen/datasette | f3a087a578ae2c418103ad144b08c2fc8ad9c31d | [
"Apache-2.0"
] | null | null | null | datasette/views/index.py | jsfenfen/datasette | f3a087a578ae2c418103ad144b08c2fc8ad9c31d | [
"Apache-2.0"
] | null | null | null | import hashlib
import json
from datasette.utils import check_visibility, CustomJSONEncoder
from datasette.utils.asgi import Response, Forbidden
from datasette.version import __version__
from .base import BaseView
# Truncate table list on homepage at:
TRUNCATE_AT = 5
# Only attempt counts if database less than this size in bytes:
COUNT_DB_SIZE_LIMIT = 100 * 1024 * 1024
class IndexView(BaseView):
name = "index"
def __init__(self, datasette):
self.ds = datasette
async def get(self, request, as_format):
await self.check_permission(request, "view-instance")
databases = []
for name, db in self.ds.databases.items():
visible, database_private = await check_visibility(
self.ds,
request.actor,
"view-database",
name,
)
if not visible:
continue
table_names = await db.table_names()
hidden_table_names = set(await db.hidden_table_names())
views = []
for view_name in await db.view_names():
visible, private = await check_visibility(
self.ds,
request.actor,
"view-table",
(name, view_name),
)
if visible:
views.append({"name": view_name, "private": private})
# Perform counts only for immutable or DBS with <= COUNT_TABLE_LIMIT tables
table_counts = {}
if not db.is_mutable or db.size < COUNT_DB_SIZE_LIMIT:
table_counts = await db.table_counts(10)
# If any of these are None it means at least one timed out - ignore them all
if any(v is None for v in table_counts.values()):
table_counts = {}
tables = {}
for table in table_names:
visible, private = await check_visibility(
self.ds,
request.actor,
"view-table",
(name, table),
)
if not visible:
continue
table_columns = await db.table_columns(table)
tables[table] = {
"name": table,
"columns": table_columns,
"primary_keys": await db.primary_keys(table),
"count": table_counts.get(table),
"hidden": table in hidden_table_names,
"fts_table": await db.fts_table(table),
"num_relationships_for_sorting": 0,
"private": private,
}
if request.args.get("_sort") == "relationships" or not table_counts:
# We will be sorting by number of relationships, so populate that field
all_foreign_keys = await db.get_all_foreign_keys()
for table, foreign_keys in all_foreign_keys.items():
count = len(foreign_keys["incoming"] + foreign_keys["outgoing"])
tables[table]["num_relationships_for_sorting"] = count
hidden_tables = [t for t in tables.values() if t["hidden"]]
visible_tables = [t for t in tables.values() if not t["hidden"]]
tables_and_views_truncated = list(
sorted(
(t for t in tables.values() if t not in hidden_tables),
key=lambda t: (
t["num_relationships_for_sorting"],
t["count"] or 0,
t["name"],
),
reverse=True,
)[:TRUNCATE_AT]
)
# Only add views if this is less than TRUNCATE_AT
if len(tables_and_views_truncated) < TRUNCATE_AT:
num_views_to_add = TRUNCATE_AT - len(tables_and_views_truncated)
for view in views[:num_views_to_add]:
tables_and_views_truncated.append(view)
databases.append(
{
"name": name,
"hash": db.hash,
"color": db.hash[:6]
if db.hash
else hashlib.md5(name.encode("utf8")).hexdigest()[:6],
"path": self.database_url(name),
"tables_and_views_truncated": tables_and_views_truncated,
"tables_and_views_more": (len(visible_tables) + len(views))
> TRUNCATE_AT,
"tables_count": len(visible_tables),
"table_rows_sum": sum((t["count"] or 0) for t in visible_tables),
"show_table_row_counts": bool(table_counts),
"hidden_table_rows_sum": sum(
t["count"] for t in hidden_tables if t["count"] is not None
),
"hidden_tables_count": len(hidden_tables),
"views_count": len(views),
"private": database_private,
}
)
if as_format:
headers = {}
if self.ds.cors:
headers["Access-Control-Allow-Origin"] = "*"
return Response(
json.dumps({db["name"]: db for db in databases}, cls=CustomJSONEncoder),
content_type="application/json; charset=utf-8",
headers=headers,
)
else:
return await self.render(
["index.html"],
request=request,
context={
"databases": databases,
"metadata": self.ds.metadata(),
"datasette_version": __version__,
"private": not await self.ds.permission_allowed(
None, "view-instance", default=True
),
},
)
| 39.202614 | 92 | 0.502501 |
ace4ffbe257f81700c038a8af0575be538b556b1 | 6,050 | py | Python | bc/standardpages/migrations/0012_auto_20200331_1646.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-02-27T07:27:17.000Z | 2021-02-27T07:27:17.000Z | bc/standardpages/migrations/0012_auto_20200331_1646.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | null | null | null | bc/standardpages/migrations/0012_auto_20200331_1646.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-06-09T15:56:54.000Z | 2021-06-09T15:56:54.000Z | # Generated by Django 2.2.10 on 2020-03-31 15:46
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
("standardpages", "0011_merge_20200331_1633"),
]
operations = [
migrations.AlterField(
model_name="informationpage",
name="body",
field=wagtail.core.fields.StreamField(
[
(
"heading",
wagtail.core.blocks.CharBlock(
classname="full title",
icon="title",
template="patterns/molecules/streamfield/blocks/heading_block.html",
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
("image", wagtail.images.blocks.ImageChooserBlock()),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
]
),
),
("embed", wagtail.embeds.blocks.EmbedBlock()),
(
"local_area_links",
wagtail.core.blocks.StructBlock(
[
(
"introduction",
wagtail.core.blocks.RichTextBlock(
default="<p>Select your local area for information:</p>",
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
),
),
(
"aylesbury_vale_url",
wagtail.core.blocks.URLBlock(
label="Aylesbury Vale URL", required=False
),
),
(
"chiltern_url",
wagtail.core.blocks.URLBlock(
label="Chiltern URL", required=False
),
),
(
"south_bucks_url",
wagtail.core.blocks.URLBlock(
label="South Bucks URL", required=False
),
),
(
"wycombe_url",
wagtail.core.blocks.URLBlock(
label="Wycombe URL", required=False
),
),
(
"postscript",
wagtail.core.blocks.RichTextBlock(
default='<p>Or <a href="https://www.gov.uk/find-local-council">find your area based on your postcode</a>.</p>',
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
required=False,
),
),
]
),
),
("table", wagtail.contrib.table_block.blocks.TableBlock()),
(
"button",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(classname="title"),
),
(
"link_url",
wagtail.core.blocks.URLBlock(required=False),
),
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
required=False
),
),
]
),
),
]
),
),
]
| 42.013889 | 151 | 0.258017 |
ace4ffc5345fe1a694f5ec0ee5276bdcfd8bc6ba | 4,335 | py | Python | .history/predict_20210815200535.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | .history/predict_20210815200535.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | .history/predict_20210815200535.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | #----------------------------------------------------#
# 对视频中的predict.py进行了修改,
# 将单张图片预测、摄像头检测和FPS测试功能
# 整合到了一个py文件中,通过指定mode进行模式的修改。
#----------------------------------------------------#
import time
import cv2
import numpy as np
from PIL import Image
#from moviepy.editor import *
from moviepy.editor import AudioFileClip
import subprocess
from yolo import YOLO
if __name__ == "__main__":
video_path = "./video/wzry_output.mp4"
video = AudioFileClip(video_path) #视频所在路径
#audio = video.audio
music_path = "./viceo/wzry.mp3"
video.write_audiofile('./video/wzry.mp3')
cmd=f'ffmpeg -i {video_path} -i {music_path} -codec copy ./video/out_music.mp4'
subprocess.call(cmd, shell=True) #这个是执行命令 cmd 相当于直接在黑窗口打命令
exit()
yolo = YOLO()
#-------------------------------------------------------------------------#
# mode用于指定测试的模式:
# 'predict'表示单张图片预测
# 'video'表示视频检测
# 'fps'表示测试fps
#-------------------------------------------------------------------------#
mode = "video"
#-------------------------------------------------------------------------#
# video_path用于指定视频的路径,当video_path=0时表示检测摄像头
# video_save_path表示视频保存的路径,当video_save_path=""时表示不保存
# video_fps用于保存的视频的fps
# video_path、video_save_path和video_fps仅在mode='video'时有效
# 保存视频时需要ctrl+c退出才会完成完整的保存步骤,不可直接结束程序。
#-------------------------------------------------------------------------#
video_path = './video/wzry.mp4'
video_save_path = "./video/wzry_output.mp4"
video_fps = 25.0
if mode == "predict":
'''
1、该代码无法直接进行批量预测,如果想要批量预测,可以利用os.listdir()遍历文件夹,利用Image.open打开图片文件进行预测。
具体流程可以参考get_dr_txt.py,在get_dr_txt.py即实现了遍历还实现了目标信息的保存。
2、如果想要进行检测完的图片的保存,利用r_image.save("img.jpg")即可保存,直接在predict.py里进行修改即可。
3、如果想要获得预测框的坐标,可以进入yolo.detect_image函数,在绘图部分读取top,left,bottom,right这四个值。
4、如果想要利用预测框截取下目标,可以进入yolo.detect_image函数,在绘图部分利用获取到的top,left,bottom,right这四个值
在原图上利用矩阵的方式进行截取。
5、如果想要在预测图上写额外的字,比如检测到的特定目标的数量,可以进入yolo.detect_image函数,在绘图部分对predicted_class进行判断,
比如判断if predicted_class == 'car': 即可判断当前目标是否为车,然后记录数量即可。利用draw.text即可写字。
'''
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
elif mode == "video":
# video = VideoFileClip(video_path) #视频所在路径
# audio = video.audio
capture=cv2.VideoCapture(video_path)
if video_save_path!="":
fourcc = cv2.VideoWriter_fourcc(*'XVID')
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)
fps = 0.0
while(True):
t1 = time.time()
# 读取某一帧
ref,frame=capture.read()
if frame is None:
break
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
# 进行检测
frame = np.array(yolo.detect_image(frame))
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("video",frame)
c= cv2.waitKey(1) & 0xff
if video_save_path!="":
out.write(frame)
if c==27:
capture.release()
break
capture.release()
out.release()
cv2.destroyAllWindows()
elif mode == "fps":
test_interval = 100
img = Image.open('img/street.jpg')
tact_time = yolo.get_FPS(img, test_interval)
print(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1')
else:
raise AssertionError("Please specify the correct mode: 'predict', 'video' or 'fps'.")
| 36.125 | 111 | 0.5391 |
ace50147728f5441bd52779b0911c5cf4cc9e3a4 | 10,589 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/operations/_shared_galleries_operations.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/operations/_shared_galleries_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/operations/_shared_galleries_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
location: str,
*,
shared_to: Optional[Union[str, "_models.SharedToValues"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"location": _SERIALIZER.url("location", location, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if shared_to is not None:
_query_parameters['sharedTo'] = _SERIALIZER.query("shared_to", shared_to, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
location: str,
gallery_unique_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"location": _SERIALIZER.url("location", location, 'str'),
"galleryUniqueName": _SERIALIZER.url("gallery_unique_name", gallery_unique_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class SharedGalleriesOperations(object):
"""SharedGalleriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
shared_to: Optional[Union[str, "_models.SharedToValues"]] = None,
**kwargs: Any
) -> Iterable["_models.SharedGalleryList"]:
"""List shared galleries by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param shared_to: The query parameter to decide what shared galleries to fetch when doing
listing operations. Default value is None.
:type shared_to: str or ~azure.mgmt.compute.v2020_09_30.models.SharedToValues
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedGalleryList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.SharedGalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
api_version=api_version,
shared_to=shared_to,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
api_version=api_version,
shared_to=shared_to,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SharedGalleryList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries"} # type: ignore
@distributed_trace
def get(
self,
location: str,
gallery_unique_name: str,
**kwargs: Any
) -> "_models.SharedGallery":
"""Get a shared gallery by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param gallery_unique_name: The unique name of the Shared Gallery.
:type gallery_unique_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedGallery, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.SharedGallery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
location=location,
gallery_unique_name=gallery_unique_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedGallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}"} # type: ignore
| 40.262357 | 190 | 0.656814 |
ace5016035104c9ff2b199e734ceeefc82a00091 | 6,263 | py | Python | code/track.py | swhan0329/VDL_speed_estimation | f1b162dae7bb3253417164e0e22185eea46028f2 | [
"MIT"
] | 6 | 2021-03-28T19:25:27.000Z | 2022-01-04T03:23:35.000Z | code/track.py | swhan0329/VDL_speed_estimation | f1b162dae7bb3253417164e0e22185eea46028f2 | [
"MIT"
] | 3 | 2021-03-28T19:27:52.000Z | 2021-04-24T02:16:25.000Z | code/track.py | swhan0329/VDL_speed_estimation | f1b162dae7bb3253417164e0e22185eea46028f2 | [
"MIT"
] | 1 | 2021-04-17T04:48:06.000Z | 2021-04-17T04:48:06.000Z | class TrackState:
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track:
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age : int
The maximum number of consecutive misses before the track state is
set to `Deleted`.
feature : Optional[ndarray]
Feature vector of the detection this track originates from. If not None,
this feature is added to the `features` cache.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
hits : int
Total number of measurement updates.
age : int
Total number of frames since first occurance.
time_since_update : int
Total number of frames since last measurement update.
state : TrackState
The current track state.
features : List[ndarray]
A cache of features. On each measurement update, the associated feature
vector is added to this list.
## Added by KETI to calculate vehicle speed using VDLs ##
time_passing_vline_start : int
Start time to calculate own speed
time_passing_vline_end: int
End time to calculate own speed
class_name : string
Name of the vehicle (Car, Truck, Bus)
driving_lane : int
A number of driving lane
speed : int
Speed of the vehicle
speed_time: int
Interval time between time_passing_vline_start and time_passing_vline_end
speed_update: bool
Flag that determines whether to calculate speed or not
"""
def __init__(self, mean, covariance, track_id, n_init, max_age,
feature=None, class_name=None):
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.hits = 1
self.age = 1
self.time_since_update = 0
self.state = TrackState.Tentative
self.features = []
if feature is not None:
self.features.append(feature)
self._n_init = n_init
self._max_age = max_age
self.time_passing_vline_start = 0
self.time_passing_vline_end = 0
self.class_name = class_name
self.driving_lane = 0
self.speed = 0
self.speed_time = 0
self.speed_update = True
def to_tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
Returns
-------
ndarray
The bounding box.
"""
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
def to_tlbr(self):
"""Get current position in bounding box format `(min x, miny, max x,
max y)`.
Returns
-------
ndarray
The bounding box.
"""
ret = self.to_tlwh()
ret[2:] = ret[:2] + ret[2:]
return ret
def get_class(self):
if self.class_name == 0:
self.class_name = 'bus'
elif self.class_name == 1:
self.class_name = 'car'
elif self.class_name == 2:
self.class_name = 'truck'
return self.class_name
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
self.age += 1
self.time_since_update += 1
def update(self, kf, detection):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
detection : Detection
The associated detection.
"""
self.mean, self.covariance = kf.update(
self.mean, self.covariance, detection.to_xyah())
self.features.append(detection.feature)
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed).
"""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted | 32.117949 | 82 | 0.588536 |
ace501f59a4a37ae639e06ecba971f69eadf9331 | 32,523 | py | Python | tests/model_test.py | Jaymon/prom | b9eab53f9cc9870a3212e96129671c36a749aa94 | [
"MIT"
] | 8 | 2018-04-10T17:42:34.000Z | 2022-01-14T09:20:23.000Z | tests/model_test.py | Jaymon/prom | b9eab53f9cc9870a3212e96129671c36a749aa94 | [
"MIT"
] | 85 | 2018-03-29T00:48:28.000Z | 2021-10-16T07:31:02.000Z | tests/model_test.py | firstopinion/prom | b9eab53f9cc9870a3212e96129671c36a749aa94 | [
"MIT"
] | 3 | 2019-02-19T23:50:37.000Z | 2021-05-12T02:07:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import pickle
import json
import datetime
import testdata
from . import BaseTestCase, EnvironTestCase
from prom.compat import *
from prom.model import Orm, OrmPool
from prom.config import Field, Index
import prom
class PickleOrm(Orm):
"""This is only needed to test the test_pickling() method"""
foo = Field(int, True)
bar = Field(str, True)
ifoobar = Index("foo", "bar")
class OrmPoolTest(BaseTestCase):
def test_lifecycle(self):
orm_class = self.get_orm_class()
pks = self.insert(orm_class, 10)
pool = OrmPool(orm_class, 1)
o = pool[pks[0]]
self.assertEqual(pks[0], o.pk)
o = pool[pks[0]]
self.assertEqual(pks[0], o.pk)
o = pool[pks[0]]
self.assertEqual(pks[0], o.pk)
pool[pks[1]]
self.assertEqual([2], list(pool.pq.keys()))
pool[pks[0]]
self.assertEqual([1], list(pool.pq.keys()))
pool[pks[1]]
self.assertEqual([2], list(pool.pq.keys()))
pool[pks[0]]
self.assertEqual([1], list(pool.pq.keys()))
pool = OrmPool(orm_class, len(pks) - 1)
for pk in pks:
o = pool[pk]
self.assertEqual(pk, o.pk)
self.assertEqual(list(pool.pq.keys())[0], pks[1])
class OrmTest(EnvironTestCase):
def test_custom__id_pk(self):
orm_class = self.get_orm_class(
_id=Field(str, True, size=36, pk=True)
)
o = orm_class.create(_id="foo")
o2 = orm_class.query.one()
self.assertEqual(o.pk, o2.pk)
def test_field_access(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="2")
self.assertEqual(1, o.foo)
self.assertIsNone(o.pk)
with self.assertRaises(AttributeError):
o.fkdasljfdkfsalk
orm_class = self.get_orm_class()
orm_class._id = None
o = orm_class(foo=1, bar="2")
self.assertIsNone(o.pk)
self.assertIsNone(o._id)
self.assertEqual(1, o.foo)
with self.assertRaises(AttributeError):
o.fkdasljfdkfsalk
def test_aliases_1(self):
class Foo(Orm):
ip_address = Field(str, False, aliases=["ip"])
ip = "1.2.3.4"
f = Foo(ip="1.2.3.4")
self.assertEqual(ip, f.ip)
self.assertEqual(ip, f.ip_address)
f = Foo()
f.ip = ip
self.assertEqual(ip, f.ip)
self.assertEqual(ip, f.ip_address)
f = Foo(ip="1.2.3.4")
del f.ip
self.assertIsNone(f.ip)
self.assertIsNone(f.ip_address)
def test_alias_pk(self):
o = self.create_orm()
self.assertEqual(o.pk, o._id)
def test_removed_field(self):
orm_class = self.get_orm_class()
o = orm_class.create(foo=1, bar="2")
o.save()
orm_class.schema.fields.pop("bar")
for count, o in enumerate(orm_class.query, 1):
pass
self.assertLess(0, count)
def test_required_field_not_set_update(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="two")
o.save()
# create a new instance and make it look like an existing instance
o2 = orm_class(
foo=o.foo,
bar=o.bar,
)
o2._interface_pk = o.pk
fields = o2.to_interface()
self.assertFalse("_created" in fields)
o._created = None
with self.assertRaises(KeyError):
fields = o.to_interface()
del o._created
fields = o.to_interface()
self.assertFalse("_created" in fields)
def test_f_class_definition(self):
class FCD(Orm):
_id = _created = _updated = None
class foo(Field):
type = int
def fset(self, orm, v):
return super(FCD.foo, self).fset(orm, v) + 1
class bar(Field):
type = int
def fget(self, orm, v):
return super(FCD.bar, self).fget(orm, v) + 2
o = FCD(foo=1, bar=1)
self.assertEqual(2, o.foo)
self.assertEqual(3, o.bar)
class Foo(Field):
type = int
def fset(self, orm, v):
return super(Foo, self).fset(orm, v) + 1
class Bar(Field):
type = int
def fget(self, orm, v):
return super(Bar, self).fget(orm, v) + 2
orm_class = self.get_orm_class(
foo=Foo,
bar=Bar,
)
o = orm_class(foo=1, bar=1)
self.assertEqual(2, o.foo)
self.assertEqual(3, o.bar)
def test_to_interface_insert(self):
orm_class = self.get_orm_class(
foo=Field(int, True, default=1),
bar=Field(str, False)
)
o = orm_class()
fields = o.to_interface()
self.assertTrue("foo" in fields)
self.assertFalse("bar" in fields)
orm_class = self.get_orm_class()
o = orm_class()
# missing foo
with self.assertRaises(KeyError):
fields = o.to_interface()
o.foo = 1
# missing bar
with self.assertRaises(KeyError):
fields = o.to_interface()
o.bar = "2"
fields = o.to_interface()
self.assertFalse("_id" in fields)
def test_to_interface_update(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="2")
o.save()
fields = o.to_interface()
self.assertEqual(1, len(fields)) # _updated would be the only field
o.foo = None
with self.assertRaises(KeyError):
fields = o.to_interface()
o.foo = 1
o._id = None
with self.assertRaises(KeyError):
fields = o.to_interface()
def test_created_updated(self):
orm_class = self.get_orm_class()
now = datetime.datetime.utcnow()
o = orm_class(foo=1, bar="1")
self.assertIsNone(o._created)
self.assertIsNone(o._updated)
o.save()
self.assertLess(now, o._created)
self.assertLess(now, o._updated)
_created = o._created
_updated = o._updated
o.foo=2
o.save()
self.assertEqual(_created, o._created)
self.assertLess(_updated, o._updated)
def test_created_set(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="1")
_created = testdata.get_past_datetime()
o._created = _created
o.save()
o2 = o.query.eq_pk(o.pk).one()
self.assertEqual(_created, o2._created)
_created2 = testdata.get_past_datetime()
o2._created = _created2
o2.save()
o3 = o.query.eq_pk(o.pk).one()
self.assertEqual(_created2, o3._created)
def test_updated_set(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="1")
_updated = testdata.get_past_datetime()
o._updated = _updated
o.save()
o2 = o.query.eq_pk(o.pk).one()
self.assertEqual(_updated, o2._updated)
_updated2 = testdata.get_past_datetime()
o2._updated = _updated2
o2.save()
o3 = o.query.eq_pk(o.pk).one()
self.assertEqual(_updated2, o3._updated)
def test_hydrate_1(self):
"""make sure you can add/update and change the primary key and all of that
works as expected"""
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="1")
o.save()
self.assertLess(0, o.pk)
with self.assertRaises(orm_class.interface.UniqueError):
o2 = orm_class(_id=o.pk, foo=2, bar="2")
o2.save()
o3 = o.query.one_pk(o.pk)
self.assertTrue(o3.is_hydrated())
o3._id = o.pk + 1
self.assertNotEqual(o.pk, o3.pk)
o3.save()
o4 = o3.query.one_pk(o3.pk)
self.assertEqual(o3.pk, o4.pk)
self.assertNotEqual(o.pk, o3.pk)
def test_hydrate_2(self):
orm_class = self.get_orm_class(
foo=Field(int, True),
bar=Field(str, default=lambda *_, **__: "lambda bar"),
)
o = orm_class.hydrate(foo=1)
self.assertEqual("lambda bar", o.bar)
def test_no_pk(self):
orm_class = self.get_orm_class()
orm_class._id = None
pks = self.insert(orm_class, 1)
om1 = orm_class.query.one()
om2 = orm_class.query.is_foo(om1.foo).one()
self.assertEqual(om1.foo, om2.foo)
def test_int_pk(self):
"""Postgres was returning longs for primary keys in py2.7, this was different
behavior than SQLite and python 3, which returns int since 2.7+ transparently
handles ints of arbitrary size, this makes sure that ints are returned for
primary key"""
orm_class = self.get_orm_class()
o = orm_class.create(foo=1, bar="1")
self.assertTrue(isinstance(o.pk, int))
def test_create_pk(self):
"""there was a bug that if you set the pk then it wouldn't set the updated
or created datestamps, this makes sure that is fixed"""
orm_class = self.get_orm_class()
pk = testdata.get_int()
o = orm_class.create(foo=1, bar="1", _id=pk)
self.assertEqual(pk, o.pk)
def test_change_pk(self):
# create a row at pk 1
orm_class = self.get_orm_class()
o = orm_class.create(foo=1, bar="one")
self.assertEqual(1, o.pk)
# move that row to pk 2
o._id = 2
o.save()
o2 = o.query.one_pk(o.pk)
for k in o.schema.fields.keys():
self.assertEqual(getattr(o, k), getattr(o2, k))
o2.foo = 11
o2.save()
# now move it back to pk 1
o2._id = 1
o2.save()
o3 = o.query.one_pk(o2.pk)
for k in o2.schema.fields.keys():
self.assertEqual(getattr(o2, k), getattr(o3, k))
# we should only have 1 row in the db, our row we've changed from 1 -> 2 -> 1
self.assertEqual(1, o2.query.count())
def test_overrides(self):
class FOIndexOverride(Orm):
table_name = "FOIndexOverride_table"
_created = None
index_created = None
s = FOIndexOverride.schema
self.assertFalse("index_created" in s.indexes)
self.assertFalse("_created" in s.fields)
def test_field_iset(self):
"""make sure a field with an iset method will be called at the correct time"""
class FOFieldISetOrm(Orm):
table_name = "FOFieldISetOrm_table"
foo = Field(int)
@foo.isetter
def foo(self, val):
val = 100 if self.is_update() else 10
return val
#o = FOFieldISetOrm(foo=1)
o = FOFieldISetOrm()
o.insert()
self.assertEqual(10, o.foo)
o.foo = 20
o.update()
self.assertEqual(100, o.foo)
def test_field_iget(self):
"""make sure a field with an iget method will be called at the correct time"""
class FOFieldIGetOrm(Orm):
table_name = "FOFieldIGetOrm_table"
foo = Field(int)
@foo.igetter
def foo(cls, val):
return 1000
o = FOFieldIGetOrm()
o.foo = 20
o.insert()
o2 = o.query.one_pk(o.pk)
self.assertEqual(1000, o2.foo)
def test_iget_iset_insert_update(self):
"""Topher noticed when you set both iset and iget to wrap a value (in this case
to have a dict become a json string as it goes into the db but a dict any other
time) that the value would be iset correctly on insert/update, but then it wouldn't
go back to the iget value on success, this test makes sure that is fixed"""
class IGetSetInsertUpdateOrm(Orm):
table_name = "IGetSetInsertUpdateOrm_table"
#interface = self.get_interface()
foo = Field(str)
@foo.isetter
def foo(self, val):
if val is None: return val
return json.dumps(val)
@foo.igetter
def foo(cls, val):
if val is None: return val
return json.loads(val)
o = IGetSetInsertUpdateOrm()
o.foo = {"foo": 1, "bar": 2}
self.assertTrue(isinstance(o.foo, dict))
o.insert()
self.assertTrue(isinstance(o.foo, dict))
o.foo = {"foo": 2, "bar": 1}
self.assertTrue(isinstance(o.foo, dict))
o.update()
self.assertTrue(isinstance(o.foo, dict))
o2 = o.query.one_pk(o.pk)
self.assertTrue(isinstance(o2.foo, dict))
self.assertEqual(o.foo, o2.foo)
def test_field_getattr(self):
class FOFieldGAOrm(Orm):
table_name = "fofgaorm_table"
foo = Field(int)
@foo.fsetter
def foo(self, val):
return getattr(self, "bar", 10)
bar = Field(int)
@bar.fsetter
def bar(self, val):
return getattr(self, "foo", 10)
# this test passes if it doesn't raise an exception
o = FOFieldGAOrm()
def test_field_lifecycle(self):
orm_class = self.get_orm_class(
foo=Field(int)
)
o = orm_class.create(foo=1)
self.assertEqual(1, o.foo)
o.foo = 2
self.assertTrue("foo" in o.modified_fields)
o.save()
o2 = o.query.one_pk(o.pk)
self.assertEqual(2, o.foo)
del o.foo
self.assertEqual(None, o.foo)
self.assertFalse("foo" in o.modified_fields)
def test___delattr__(self):
class DAOrm(Orm):
table_name = "daorm_table"
foo = Field(int)
bar = Field(str)
o = DAOrm()
o.foo = 1
o.bar = "1"
self.assertEqual(1, o.foo)
self.assertEqual("1", o.bar)
del o.foo
self.assertEqual(None, o.foo)
del o.bar
self.assertEqual(None, o.bar)
o.che = "yay"
self.assertEqual("yay", o.che)
del o.che
with self.assertRaises(AttributeError):
o.che
def test___setattr__(self):
class SAOrm(Orm):
table_name = "saorm_table"
foo = Field(int)
bar = Field(str)
o = SAOrm()
o.foo = 1
o.bar = "1"
self.assertTrue(o.modified_fields)
o.save()
self.assertFalse(o.modified_fields)
o.foo = 2
self.assertTrue(o.modified_fields)
o.save()
self.assertFalse(o.modified_fields)
o.foo = None
o.bar = None
self.assertEqual(2, len(o.modified_fields))
def test_creation(self):
class COrm(Orm):
foo = Field(int)
bar = Field(str)
s = COrm.schema
self.assertTrue(s.foo)
self.assertTrue(s.bar)
self.assertTrue(s.pk)
self.assertTrue(s._created)
self.assertTrue(s._updated)
def test_none(self):
orm_class = self.get_orm_class()
orm_class.foo.required = False
orm_class.bar.required = False
t1 = orm_class()
t2 = orm_class(foo=None, bar=None)
self.assertEqual(t1.fields, t2.fields)
t1.save()
t2.save()
t11 = orm_class.query.one_pk(t1.pk)
t22 = orm_class.query.one_pk(t2.pk)
ff = lambda orm: orm.schema.normal_fields
self.assertEqual(ff(t11), ff(t22))
self.assertEqual(ff(t1), ff(t11))
self.assertEqual(ff(t2), ff(t22))
t3 = orm_class(foo=1)
self.assertEqual(1, t3.foo)
self.assertEqual(None, t3.bar)
t3.save()
self.assertEqual(1, t3.foo)
self.assertEqual(None, t3.bar)
t3 = orm_class.query.one_pk(t3.pk)
self.assertEqual(1, t3.foo)
self.assertEqual(None, t3.bar)
def test_jsonable(self):
orm_class = self.get_orm_class()
orm_class.dt = Field(datetime.datetime)
t = orm_class.hydrate(foo=1, bar="blah", dt=datetime.datetime.utcnow())
d = t.jsonable()
self.assertEqual(1, d['foo'])
self.assertEqual("blah", d['bar'])
self.assertTrue("dt" in d)
t = orm_class.hydrate(foo=1)
d = t.jsonable()
self.assertEqual(1, d['foo'])
self.assertFalse("bar" in d)
def test_modify_1(self):
class TM(Orm):
table_name = self.get_table_name()
bar = Field(str, True)
che = Field(str, False)
@che.fsetter
def che(self, field_val):
if field_val is None: return field_val
if not field_val.startswith('boom'):
raise ValueError("what the heck?")
return field_val
t = TM(bar='bam')
with self.assertRaises(ValueError):
t = TM(che='bam')
t = TM(che='boom')
self.assertIsNone(t.pk)
self.assertIsNone(t._created)
self.assertIsNone(t._updated)
def test_modify_2(self):
orm_class = self.get_orm_class(
foo=Field(str, False),
bar=Field(dict, False, default=dict),
che=Field(dict, False, default=dict),
)
t = orm_class()
self.assertTrue(t.is_modified())
for k in ["bar", "che"]:
self.assertTrue(k in t.modified_fields)
t.bar["foo"] = 1
t.save()
t2 = t.query.one_pk(t.pk)
self.assertEqual(t.bar["foo"], t2.bar["foo"])
t2.bar["foo"] = 2
t2.save()
t3 = t.query.one_pk(t.pk)
self.assertEqual(t3.bar["foo"], t2.bar["foo"])
def test_modify_none(self):
class TModifyNone(Orm):
table_name = self.get_table_name()
foo = Field(str, False)
o = TModifyNone()
o.foo = 1
o.save()
o2 = o.query.one_pk(o.pk)
o2.foo = None
o2.save()
self.assertIsNone(o2.foo)
o3 = o.query.one_pk(o.pk)
self.assertIsNone(o3.foo)
def test_modified_1(self):
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="2")
mfs = o.modified_fields
self.assertEqual(2, len(mfs))
for field_name in ["foo", "bar"]:
self.assertTrue(field_name in mfs)
o.save()
mfs = o.modified_fields
self.assertEqual(0, len(mfs))
o.foo += 1
mfs = o.modified_fields
self.assertEqual(1, len(mfs))
self.assertTrue("foo" in mfs)
o2 = o.requery()
mfs = o2.modified_fields
self.assertEqual(0, len(mfs))
def test_unicode(self):
"""
Jarid was having encoding issues, so I'm finally making sure prom only ever
returns unicode strings
"""
orm_class = self.get_orm_class()
table_name = self.get_table_name()
orm_class.schema = self.get_schema(
self.get_table_name(),
foo=Field(unicode, True),
bar=Field(str, True),
che=Field(str, False),
baz=Field(int, False),
)
t = orm_class.create(
foo=testdata.get_unicode_name(),
bar=testdata.get_unicode_words(),
che=testdata.get_unicode_words().encode('utf-8'),
baz=testdata.get_int(1, 100000)
)
t2 = orm_class.query.one_pk(t.pk)
self.assertEqual(t.foo, t2.foo)
self.assertEqual(t.bar, t2.bar)
#self.assertEqual(t.che, t2.che.encode('utf-8'))
self.assertEqual(t.che.decode("utf-8"), t2.che)
self.assertTrue(isinstance(t.baz, int))
def test_query(self):
orm_class = self.get_orm_class()
pks = self.old_insert(orm_class.interface, orm_class.schema, 5)
lc = orm_class.query.in_pk(pks).count()
self.assertEqual(len(pks), lc)
def test___int__(self):
orm_class = self.get_orm_class()
pk = self.old_insert(orm_class.interface, orm_class.schema, 1)[0]
t = orm_class.query.one_pk(pk)
self.assertEqual(pk, int(t))
def test_query_class(self):
"""make sure you can set the query class and it is picked up correctly"""
class QueryClassTormQuery(prom.Query):
pass
class QueryClassTorm(Orm):
query_class = QueryClassTormQuery
pass
other_orm_class = self.get_orm_class()
self.assertEqual(QueryClassTorm.query_class, QueryClassTormQuery)
self.assertEqual(other_orm_class.query_class, prom.Query)
def test_property_autodiscover(self):
m = testdata.create_module([
"import prom",
"",
"class FooQuery(prom.Query):",
" pass",
"",
"class Foo(prom.Orm):",
" schema = prom.Schema('foo')",
" query_class = FooQuery",
"",
"class BarQuery(prom.Query):",
" pass",
"",
"class Bar(Foo):",
" schema = prom.Schema('bar')",
" query_class = BarQuery",
" pass",
"",
"class CheQuery(prom.Query):",
" pass",
])
fooq = m.module()
# first try with the instance calling first
f = fooq.Foo()
self.assertEqual(f.query_class, fooq.Foo.query_class)
f = fooq.Foo()
self.assertEqual(f.query.__class__.__name__, fooq.Foo.query.__class__.__name__)
f = fooq.Foo()
self.assertEqual(f.interface, fooq.Foo.interface)
# now try with the class calling first
b = fooq.Bar()
self.assertEqual(fooq.Bar.query_class, b.query_class)
b = fooq.Bar()
self.assertEqual(fooq.Bar.query.__class__.__name__, b.query.__class__.__name__)
b = fooq.Bar()
self.assertEqual(fooq.Bar.interface, b.interface)
# now make sure we can manipulate it
fooq.Foo.query_class = fooq.CheQuery
f = fooq.Foo()
self.assertEqual(fooq.CheQuery, f.query_class)
self.assertEqual(fooq.CheQuery, fooq.Foo.query_class)
self.assertEqual(fooq.CheQuery, f.query.__class__)
self.assertEqual(fooq.CheQuery, fooq.Foo.query.__class__)
def test_interface(self):
i = self.get_interface()
#i = Torm.interface
self.assertFalse(i is None)
class TormInterface2Orm(Orm):
pass
i = TormInterface2Orm.interface
self.assertFalse(i is None)
# now let's make sure a different orm with a bad connection name gets flagged
class TormInterfaceOrm(Orm):
connection_name = "blkasdfjksdafjdkfklsd"
pass
with self.assertRaises(KeyError):
i = TormInterfaceOrm.interface
def test___init__(self):
orm_class = self.get_orm_class()
t = orm_class(foo=1)
self.assertTrue('foo' in t.modified_fields)
self.assertEqual(1, t.foo)
def test___init___default_fset(self):
orm_class = self.get_orm_class(
foo=Field(int, default=5),
bar=Field(int, fset=lambda o, v: 6 if v is None else v),
che=Field(int)
)
o = orm_class()
self.assertEqual(5, o.foo)
self.assertEqual(6, o.bar)
self.assertIsNone(o.che)
o.modify(che=7)
self.assertEqual(5, o.foo)
self.assertEqual(6, o.bar)
self.assertEqual(7, o.che)
o = orm_class(foo=1)
self.assertEqual(1, o.foo)
self.assertEqual(6, o.bar)
self.assertIsNone(o.che)
o.modify(che=7, bar=8)
self.assertEqual(1, o.foo)
self.assertEqual(8, o.bar)
self.assertEqual(7, o.che)
o = orm_class(foo=1, bar=2, che=3)
self.assertEqual(1, o.foo)
self.assertEqual(2, o.bar)
self.assertEqual(3, o.che)
def test_save(self):
orm_class = self.get_orm_class()
t = orm_class()
with self.assertRaises(KeyError):
t.save()
t = orm_class(foo=1, bar="value 1", this_is_ignored="as it should be")
self.assertEqual(1, t.foo)
self.assertEqual("value 1", t.bar)
self.assertIsNone(t.pk)
self.assertTrue(t.is_modified())
t.save()
self.assertIsNotNone(t.pk)
self.assertFalse(t.is_modified())
t.foo = 2
t.bar = "value 2"
self.assertTrue(t.is_modified())
t.save()
self.assertEqual(2, t.foo)
self.assertEqual("value 2", t.bar)
# set should only update timestamps and stuff without changing unmodified values
self.assertFalse(t.is_modified())
r = t.save()
self.assertTrue(r)
# make sure it persisted
t.interface.close()
t2 = orm_class.query.is_pk(t.pk).one()
self.assertFalse(t2.is_modified())
self.assertEqual(2, t2.foo)
self.assertEqual("value 2", t2.bar)
self.assertEqual(t.fields, t2.fields)
def test_delete(self):
t = self.get_orm(foo=1, bar="value 1")
r = t.delete()
self.assertFalse(r)
t.save()
self.assertTrue(t.pk)
_id = t.pk
t.delete()
self.assertFalse(t.pk)
self.assertTrue(t.is_modified())
# make sure it persists
t.interface.close()
t2 = t.query.one_pk(_id)
self.assertEqual(None, t2)
def test_create_1(self):
orm_class = self.get_orm_class()
t = orm_class.create(foo=1000, bar="value1000")
self.assertLess(0, t.pk)
self.assertEqual(1000, t.foo)
self.assertEqual("value1000", t.bar)
def test_create_2(self):
"""https://github.com/Jaymon/prom/issues/124"""
kwargs = {
"foo": 1,
"bar": "2"
}
orm_class = self.get_orm_class()
o = orm_class.create(**kwargs)
self.assertTrue(isinstance(o._created, datetime.datetime))
kwargs["_id"] = o.pk + 1
o2 = orm_class.create(**kwargs)
self.assertTrue(isinstance(o._created, datetime.datetime))
def test_fields(self):
orm_class = self.get_orm_class()
t = orm_class.create(foo=1000, bar="value1000")
d = t.fields
for f in t.schema.fields:
self.assertTrue(f in d)
# just make sure changing the dict doesn't mess up the Orm instance
d["_id"] = d["_id"] + 1
self.assertNotEqual(d["_id"], t.pk)
def test_pickling(self):
t = PickleOrm(foo=10000, bar="value10000")
p = pickle.dumps(t)
t2 = pickle.loads(p)
self.assertEqual(t.fields, t2.fields)
self.assertEqual(t.modified_fields, t2.modified_fields)
t.save()
p = pickle.dumps(t)
t2 = pickle.loads(p)
self.assertEqual(t.fields, t2.fields)
self.assertEqual(t.modified_fields, t2.modified_fields)
t2.foo += 1
t2.save()
t3 = PickleOrm.query.one_pk(t2.pk)
self.assertEqual(t3.fields, t2.fields)
def test_transaction(self):
"""with transaction context managers weren't working correctly when the
second insert would fail, the first insert was still going through, this
test helped me reproduce, diagnose, and fix the problem"""
# CRUD
class TransTorm1(Orm):
table_name = "trans_torm_1"
foo = Field(str, True)
@classmethod
def creation(cls, d):
with cls.interface.transaction():
d['foo'] = "foo"
tt = cls.create(**d)
d['tt1_id'] = tt.pk
m = TransTorm2.create(**d)
return tt
class TransTorm2(Orm):
table_name = "trans_torm_2"
bar = Field(str, True, max_size=10)
tt1_id = Field(TransTorm1, True)
TransTorm1.install()
TransTorm2.install()
# actual test starts here
self.assertEqual(0, TransTorm1.query.count())
#d = {"bar": testdata.get_ascii(32)}
d = {}
#with self.assertRaises(prom.InterfaceError):
with self.assertRaises(Exception):
tt = TransTorm1.creation(d)
self.assertEqual(0, TransTorm1.query.count())
def test_non_int_primary_key(self):
class Nipk(Orm):
table_name = "non_int_pk_1"
_id = Field(str, True, pk=True, max_size=64)
class Nipk2(Orm):
table_name = "non_int_pk_2"
nipk_id = Field(Nipk, True)
# since our pk no longer is auto-increment we always have to provide it
with self.assertRaises(prom.InterfaceError):
Nipk.create()
n = Nipk.create(_id="pk1")
self.assertEqual("pk1", n.pk)
self.assertEqual("pk1", n._id)
with self.assertRaises(ValueError):
pk = int(n)
self.assertEqual("pk1", str(n))
with self.assertRaises(prom.InterfaceError):
Nipk.create(_id="pk1")
n2 = Nipk2.create(nipk_id=n.pk)
self.assertEqual(n.pk, n2.nipk_id)
def test_failure_save(self):
"""test to make sure saving on a table that doesn't exist doesn't actually fail"""
class FailureSetTorm(Orm):
interface = self.get_interface()
schema = self.get_schema()
f = FailureSetTorm(foo=1, bar="value 1")
f.save()
self.assertTrue(f.pk)
def test_failure_get(self):
"""test to make sure getting on a table that doesn't exist works without raising
an error
"""
orm_class = self.get_orm_class()
o = orm_class(foo=1, bar="value 1")
o2 = o.query.one()
# we succeeded if no error was raised
def test_fk(self):
mpath = testdata.create_module([
"from prom import Field, Orm",
"",
"class Foo(Orm):",
" pass",
"",
"class Bar(Orm):",
" foo_id = Field(Foo, True)",
"",
"class Che(Orm):",
" foo_id = Field(Foo, False)",
" bar_id = Field(Bar, True)",
"",
"class Boo(Orm):",
" pass",
])
Foo = mpath.module().Foo
Bar = mpath.module().Bar
Che = mpath.module().Che
Boo = mpath.module().Boo
b = Bar(foo_id=5)
self.assertEqual(5, b.fk(Foo))
c = Che(foo_id=10, bar_id=20)
self.assertEqual(10, c.fk(Foo))
self.assertEqual(20, c.fk(Bar))
c = Che()
self.assertEqual(None, c.fk(Foo))
self.assertEqual(None, c.fk(Bar))
with self.assertRaises(ValueError):
c.fk(Boo)
def test_subquery_1(self):
count = 10
mpath = testdata.create_module([
"from prom import Field, Orm",
"",
"class Foo(Orm):",
" pass",
"",
"class Bar(Orm):",
" foo_id = Field(Foo, True)",
])
Foo = mpath.module().Foo
Bar = mpath.module().Bar
foo_ids = self.insert(Foo, count)
for foo_id in foo_ids:
Bar.create(foo_id=foo_id)
q = Bar.query.in_foo_id(Foo.query.select_pk())
self.assertEqual(count, len(q.get()))
q = Bar.query.in_foo_id(Foo.query.select_pk().gt_pk(count + 10000))
self.assertEqual(0, len(q.get()))
q = Bar.query.is_foo_id(Foo.query.select_pk().limit(1))
self.assertEqual(1, len(q.get()))
def test_subquery_2(self):
"""Similar test as subquery_1 but makes sure query_class works as expected also"""
count = 10
mpath = testdata.create_module([
"from prom import Field, Orm, Query",
"",
"class Foo(Orm):",
" pass",
"",
"class BarQuery(Query):",
" pass",
"",
"class Bar(Orm):",
" foo_id = Field(Foo, True)",
" query_class = BarQuery",
])
Foo = mpath.module().Foo
Bar = mpath.module().Bar
foo_ids = self.insert(Foo, count)
for foo_id in foo_ids:
Bar.create(foo_id=foo_id)
q = Bar.query.in_foo_id(Foo.query.select_pk())
self.assertEqual(count, len(q.get()))
| 28.832447 | 91 | 0.555791 |
ace502a9144ad45a039db13c218d96fa41482d0c | 2,759 | py | Python | tests/moreutils_tests/test_csvxfind.py | dannguyen/csvkitcat | c0ab288047887c10614838042a01e1514e0a26c7 | [
"MIT"
] | 3 | 2020-08-18T17:27:31.000Z | 2020-09-24T02:58:41.000Z | tests/moreutils_tests/test_csvxfind.py | dannguyen/csvkat | c0ab288047887c10614838042a01e1514e0a26c7 | [
"MIT"
] | null | null | null | tests/moreutils_tests/test_csvxfind.py | dannguyen/csvkat | c0ab288047887c10614838042a01e1514e0a26c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import six
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkitcat.exceptions import ArgumentErrorTK, ColumnIdentifierError
from csvkitcat.moreutils.csvxfind import CSVXfind, launch_new_instance
from tests.utils import CSVKitTestCase, stdin_as_string
from unittest import skip as skiptest
class TestCSVXfind(CSVKitTestCase):
Utility = CSVXfind
default_args = ["a", "|"]
def test_launch_new_instance(self):
with patch.object(
sys,
"argv",
[self.Utility.__name__.lower(), *self.default_args, "examples/dummy.csv"],
):
launch_new_instance()
def test_basic_dummy_match(self):
"""
creates 1 xfind col
"""
self.assertLines(
["a", r"\d", "examples/dummy.csv"], ["a,b,c,a_xfind", "1,2,3,1",]
)
def test_basic_dummy_no_match(self):
"""
creates 1 xfind col even when there's no match
"""
self.assertLines(["a", ";", "examples/dummy.csv"], ["a,b,c,a_xfind", "1,2,3,",])
def test_basic_mentions(self):
self.assertLines(
["text", r"@\w+", "examples/mentions.csv"],
[
"id,text,text_xfind",
"1,hey,",
"2,hello @world,@world",
'3,"Just like @a_prayer, your @Voice can take me @there!",@a_prayer;@Voice;@there',
],
)
def test_basic_mentions_specify_delimiter(self):
self.assertLines(
["-D", ", ", "text", r"@\w+", "examples/mentions.csv"],
[
"id,text,text_xfind",
"1,hey,",
"2,hello @world,@world",
'3,"Just like @a_prayer, your @Voice can take me @there!","@a_prayer, @Voice, @there"',
],
)
def test_basic_mentions_limit_matches(self):
self.assertLines(
["-n", "2", "text", r"@\w+", "examples/mentions.csv"],
[
"id,text,text_xfind",
"1,hey,",
"2,hello @world,@world",
'3,"Just like @a_prayer, your @Voice can take me @there!",@a_prayer;@Voice',
],
)
### error stuff
def test_error_when_invalid_column_given_as_argument(self):
with self.assertRaises(ColumnIdentifierError) as c:
self.get_output(["WTF", ";", "examples/dummy.csv"])
def test_error_when_multiple_columns_given_as_argument(self):
with self.assertRaises(ArgumentErrorTK) as c:
self.get_output(["a,b,c", ";", "examples/dummy.csv"])
assert "argument expects exactly one column identifier" in str(c.exception)
| 31.352273 | 103 | 0.563248 |
ace5034b0150c21fdabf2b26d4889e9ad55f56e3 | 558 | py | Python | tests/audit/test_mds_audit.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | 1 | 2021-02-08T16:51:57.000Z | 2021-02-08T16:51:57.000Z | tests/audit/test_mds_audit.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | null | null | null | tests/audit/test_mds_audit.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | null | null | null | import os
from chroma_agent.device_plugins.audit.lustre import MdsAudit
from tests.test_utils import PatchedContextTestCase
class TestMdsAudit(PatchedContextTestCase):
def test_mdd_obd_skipped(self):
"""Test that the mdd_obd device is skipped for 2.x audits (HYD-437)"""
tests = os.path.join(os.path.dirname(__file__), "..")
self.test_root = os.path.join(
tests, "data/lustre_versions/2.9.58_86_g2383a62/mds_mgs"
)
super(TestMdsAudit, self).setUp()
self.assertFalse(MdsAudit.is_available())
| 34.875 | 78 | 0.704301 |
ace5038dd51b06a5c35c040ae766baab9d0bf6f2 | 1,862 | py | Python | google-apac/Google-APAC-2017-University-Test/Practice-Round-APAC-test-2017/D.Sums-of-Sums.py | sogapalag/problems | 0ea7d65448e1177f8b3f81124a82d187980d659c | [
"MIT"
] | 1 | 2020-04-04T14:56:12.000Z | 2020-04-04T14:56:12.000Z | google-apac/Google-APAC-2017-University-Test/Practice-Round-APAC-test-2017/D.Sums-of-Sums.py | sogapalag/problems | 0ea7d65448e1177f8b3f81124a82d187980d659c | [
"MIT"
] | null | null | null | google-apac/Google-APAC-2017-University-Test/Practice-Round-APAC-test-2017/D.Sums-of-Sums.py | sogapalag/problems | 0ea7d65448e1177f8b3f81124a82d187980d659c | [
"MIT"
] | null | null | null | import numpy as np
def solution():
T = int(input())
for t in range(1, T+1):
solve(t)
def solve(t):
n, q = map(int, input().split(' '))
a = list(map(int, input().split(' ')))
x = np.cumsum(a)
y = np.cumsum(x)
x = x.tolist()
y = y.tolist()
x.insert(0, 0)
y.insert(0, 0)
#print(x)
#print(y)
print('Case #%d:' % (t))
for i in range(q):
l, r = map(int, input().split(' '))
ans = cum(a, x, y, r) - cum(a, x, y, l-1)
print(ans)
def cum(a, x, y, k):
n = len(x)-1
left, right = min(a), y[-1]
aless, aequal = 0, 0
while left < right:
mid = (left+right)>>1
less, equal = 0, 0
j = 0
for i in range(1, n+1):
while j<i and x[i]-x[j]>mid:
j += 1
if i!=j:
if x[i]-x[j]==mid:
less += i-j-1
else:
less += i-j
if x[i]-x[j]==mid:
equal += 1
if less+equal < k:
left = mid+1
else:
right = mid
aless = less
aequal = equal
res = (k-aless) * left
#print(res, k, aless, left)
j = 0
#print(x)
#print(y)
for i in range(1, n+1):
while j<i and x[i]-x[j]>left:
j += 1
#print(i, j, res)
if i!=j:
if x[i]-x[j]<left:
#tricky here notice j is 0
if j!=0:
addi = ((i-j)*x[i] - (y[i-1]-y[j-1]))
else:
addi = (i-j)*x[i] - y[i-1]
res += addi
#print(res, addi, '<')
else:
addi = ((i-j-1)*x[i] - (y[i-1]-y[j]))
res += addi
#print(res, addi, '=')
#print(i, res)
#print(res)
return res
solution()
| 25.506849 | 57 | 0.373792 |
ace5047e90492c243747b3f980fb7e71675959cd | 13,825 | py | Python | test/unit3/test_dictviews.py | timmartin/skulpt | 2e3a3fbbaccc12baa29094a717ceec491a8a6750 | [
"MIT"
] | 2,671 | 2015-01-03T08:23:25.000Z | 2022-03-31T06:15:48.000Z | test/unit3/test_dictviews.py | timmartin/skulpt | 2e3a3fbbaccc12baa29094a717ceec491a8a6750 | [
"MIT"
] | 972 | 2015-01-05T08:11:00.000Z | 2022-03-29T13:47:15.000Z | test/unit3/test_dictviews.py | timmartin/skulpt | 2e3a3fbbaccc12baa29094a717ceec491a8a6750 | [
"MIT"
] | 845 | 2015-01-03T19:53:36.000Z | 2022-03-29T18:34:22.000Z | import copy
import unittest
class DictSetTest(unittest.TestCase):
def test_constructors_not_callable(self):
kt = type({}.keys())
self.assertRaises(TypeError, kt, {})
self.assertRaises(TypeError, kt)
it = type({}.items())
self.assertRaises(TypeError, it, {})
self.assertRaises(TypeError, it)
vt = type({}.values())
self.assertRaises(TypeError, vt, {})
self.assertRaises(TypeError, vt)
def test_dict_keys(self):
d = {1: 10, "a": "ABC"}
keys = d.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), {1, "a"})
self.assertEqual(keys, {1, "a"})
self.assertNotEqual(keys, {1, "a", "b"})
self.assertNotEqual(keys, {1, "b"})
self.assertNotEqual(keys, {1})
self.assertNotEqual(keys, 42)
self.assertIn(1, keys)
self.assertIn("a", keys)
self.assertNotIn(10, keys)
self.assertNotIn("Z", keys)
self.assertEqual(d.keys(), d.keys())
e = {1: 11, "a": "def"}
self.assertEqual(d.keys(), e.keys())
del e["a"]
self.assertNotEqual(d.keys(), e.keys())
def test_dict_items(self):
d = {1: 10, "a": "ABC"}
items = d.items()
self.assertEqual(len(items), 2)
self.assertEqual(set(items), {(1, 10), ("a", "ABC")})
self.assertEqual(items, {(1, 10), ("a", "ABC")})
self.assertNotEqual(items, {(1, 10), ("a", "ABC"), "junk"})
self.assertNotEqual(items, {(1, 10), ("a", "def")})
self.assertNotEqual(items, {(1, 10)})
self.assertNotEqual(items, 42)
self.assertIn((1, 10), items)
self.assertIn(("a", "ABC"), items)
self.assertNotIn((1, 11), items)
self.assertNotIn(1, items)
self.assertNotIn((), items)
self.assertNotIn((1,), items)
self.assertNotIn((1, 2, 3), items)
self.assertEqual(d.items(), d.items())
e = d.copy()
self.assertEqual(d.items(), e.items())
e["a"] = "def"
self.assertNotEqual(d.items(), e.items())
def test_dict_mixed_keys_items(self):
d = {(1, 1): 11, (2, 2): 22}
e = {1: 1, 2: 2}
self.assertEqual(d.keys(), e.items())
self.assertNotEqual(d.items(), e.keys())
def test_dict_values(self):
d = {1: 10, "a": "ABC"}
values = d.values()
self.assertEqual(set(values), {10, "ABC"})
self.assertEqual(len(values), 2)
def test_dict_repr(self):
d = {1: 10, "a": "ABC"}
self.assertIsInstance(repr(d), str)
r = repr(d.items())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or
r == "dict_items([(1, 10), ('a', 'ABC')])")
r = repr(d.keys())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_keys(['a', 1])" or
r == "dict_keys([1, 'a'])")
r = repr(d.values())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_values(['ABC', 10])" or
r == "dict_values([10, 'ABC'])")
def test_keys_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'b': 3, 'c': 2}
d3 = {'d': 4, 'e': 5}
d4 = {'d': 4}
class CustomSet(set):
def intersection(self, other):
return CustomSet(super().intersection(other))
self.assertEqual(d1.keys() & d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() & d2.keys(), {'b'})
self.assertEqual(d1.keys() & d3.keys(), set())
self.assertEqual(d1.keys() & set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() & set(d2.keys()), {'b'})
self.assertEqual(d1.keys() & set(d3.keys()), set())
self.assertEqual(d1.keys() & tuple(d1.keys()), {'a', 'b'})
self.assertEqual(d3.keys() & d4.keys(), {'d'})
self.assertEqual(d4.keys() & d3.keys(), {'d'})
self.assertEqual(d4.keys() & set(d3.keys()), {'d'})
self.assertIsInstance(d4.keys() & frozenset(d3.keys()), set)
self.assertIsInstance(frozenset(d3.keys()) & d4.keys(), set)
self.assertIs(type(d4.keys() & CustomSet(d3.keys())), set)
self.assertIs(type(d1.keys() & []), set)
self.assertIs(type([] & d1.keys()), set)
self.assertEqual(d1.keys() | d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() | d2.keys(), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | set(d2.keys()), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | (1, 2), {'a', 'b', 1, 2})
self.assertEqual(d1.keys() ^ d1.keys(), set())
self.assertEqual(d1.keys() ^ d2.keys(), {'a', 'c'})
self.assertEqual(d1.keys() ^ d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ set(d1.keys()), set())
self.assertEqual(d1.keys() ^ set(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() ^ set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ tuple(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() - d1.keys(), set())
self.assertEqual(d1.keys() - d2.keys(), {'a'})
self.assertEqual(d1.keys() - d3.keys(), {'a', 'b'})
self.assertEqual(d1.keys() - set(d1.keys()), set())
self.assertEqual(d1.keys() - set(d2.keys()), {'a'})
self.assertEqual(d1.keys() - set(d3.keys()), {'a', 'b'})
self.assertEqual(d1.keys() - (0, 1), {'a', 'b'})
self.assertFalse(d1.keys().isdisjoint(d1.keys()))
self.assertFalse(d1.keys().isdisjoint(d2.keys()))
self.assertFalse(d1.keys().isdisjoint(list(d2.keys())))
self.assertFalse(d1.keys().isdisjoint(set(d2.keys())))
self.assertTrue(d1.keys().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.keys().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.keys().isdisjoint(['x', 'y']))
self.assertTrue(d1.keys().isdisjoint({}))
self.assertTrue(d1.keys().isdisjoint(d3.keys()))
de = {}
self.assertTrue(de.keys().isdisjoint(set()))
self.assertTrue(de.keys().isdisjoint([]))
self.assertTrue(de.keys().isdisjoint(de.keys()))
self.assertTrue(de.keys().isdisjoint([1]))
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(
d1.items() & d1.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() & d2.items(), {('b', 2)})
self.assertEqual(d1.items() & d3.items(), set())
self.assertEqual(d1.items() & set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() & set(d2.items()), {('b', 2)})
self.assertEqual(d1.items() & set(d3.items()), set())
self.assertEqual(d1.items() | d1.items(),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | d2.items(),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() | set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | set(d2.items()),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | set(d3.items()),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() ^ d1.items(), set())
self.assertEqual(d1.items() ^ d2.items(),
{('a', 1), ('a', 2)})
self.assertEqual(d1.items() ^ d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() - d1.items(), set())
self.assertEqual(d1.items() - d2.items(), {('a', 1)})
self.assertEqual(d1.items() - d3.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() - set(d1.items()), set())
self.assertEqual(d1.items() - set(d2.items()), {('a', 1)})
self.assertEqual(d1.items() - set(d3.items()), {('a', 1), ('b', 2)})
self.assertFalse(d1.items().isdisjoint(d1.items()))
self.assertFalse(d1.items().isdisjoint(d2.items()))
self.assertFalse(d1.items().isdisjoint(list(d2.items())))
self.assertFalse(d1.items().isdisjoint(set(d2.items())))
self.assertTrue(d1.items().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.items().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.items().isdisjoint({}))
self.assertTrue(d1.items().isdisjoint(d3.items()))
de = {}
self.assertTrue(de.items().isdisjoint(set()))
self.assertTrue(de.items().isdisjoint([]))
self.assertTrue(de.items().isdisjoint(de.items()))
self.assertTrue(de.items().isdisjoint([1]))
def test_set_operations_with_iterator(self):
origin = {1: 2, 3: 4}
self.assertEqual(origin.keys() & iter([1, 2]), {1})
self.assertEqual(origin.keys() | iter([1, 2]), {1, 2, 3})
self.assertEqual(origin.keys() ^ iter([1, 2]), {2, 3})
self.assertEqual(origin.keys() - iter([1, 2]), {3})
items = origin.items()
self.assertEqual(items & iter([(1, 2)]), {(1, 2)})
self.assertEqual(items ^ iter([(1, 2)]), {(3, 4)})
self.assertEqual(items | iter([(1, 2)]), {(1, 2), (3, 4)})
self.assertEqual(items - iter([(1, 2)]), {(3, 4)})
def test_set_operations_with_noniterable(self):
with self.assertRaises(TypeError):
{}.keys() & 1
with self.assertRaises(TypeError):
{}.keys() | 1
with self.assertRaises(TypeError):
{}.keys() ^ 1
with self.assertRaises(TypeError):
{}.keys() - 1
with self.assertRaises(TypeError):
{}.items() & 1
with self.assertRaises(TypeError):
{}.items() | 1
with self.assertRaises(TypeError):
{}.items() ^ 1
with self.assertRaises(TypeError):
{}.items() - 1
def test_recursive_repr(self):
d = {}
d[42] = d.values()
r = repr(d)
# Cannot perform a stronger test, as the contents of the repr
# are implementation-dependent. All we can say is that we
# want a str result, not an exception of any sort.
self.assertIsInstance(r, str)
d[42] = d.items()
r = repr(d)
# Again.
self.assertIsInstance(r, str)
# def test_deeply_nested_repr(self):
# d = {}
# for i in range(sys.getrecursionlimit() + 100):
# d = {42: d.values()}
# self.assertRaises(RecursionError, repr, d)
def test_copy(self):
d = {1: 10, "a": "ABC"}
self.assertRaises(TypeError, copy.copy, d.keys())
self.assertRaises(TypeError, copy.copy, d.values())
self.assertRaises(TypeError, copy.copy, d.items())
def test_compare_error(self):
class Exc(Exception):
pass
class BadEq:
def __hash__(self):
return 7
def __eq__(self, other):
raise Exc
k1, k2 = BadEq(), BadEq()
v1, v2 = BadEq(), BadEq()
d = {k1: v1}
self.assertIn(k1, d)
self.assertIn(k1, d.keys())
self.assertIn(v1, d.values())
self.assertIn((k1, v1), d.items())
self.assertRaises(Exc, d.__contains__, k2)
self.assertRaises(Exc, d.keys().__contains__, k2)
self.assertRaises(Exc, d.items().__contains__, (k2, v1))
self.assertRaises(Exc, d.items().__contains__, (k1, v2))
with self.assertRaises(Exc):
v2 in d.values()
# def test_pickle(self):
# d = {1: 10, "a": "ABC"}
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# self.assertRaises((TypeError, pickle.PicklingError),
# pickle.dumps, d.keys(), proto)
# self.assertRaises((TypeError, pickle.PicklingError),
# pickle.dumps, d.values(), proto)
# self.assertRaises((TypeError, pickle.PicklingError),
# pickle.dumps, d.items(), proto)
# def test_abc_registry(self):
# d = dict(a=1)
# self.assertIsInstance(d.keys(), collections.abc.KeysView)
# self.assertIsInstance(d.keys(), collections.abc.MappingView)
# self.assertIsInstance(d.keys(), collections.abc.Set)
# self.assertIsInstance(d.keys(), collections.abc.Sized)
# self.assertIsInstance(d.keys(), collections.abc.Iterable)
# self.assertIsInstance(d.keys(), collections.abc.Container)
# self.assertIsInstance(d.values(), collections.abc.ValuesView)
# self.assertIsInstance(d.values(), collections.abc.MappingView)
# self.assertIsInstance(d.values(), collections.abc.Sized)
# self.assertIsInstance(d.items(), collections.abc.ItemsView)
# self.assertIsInstance(d.items(), collections.abc.MappingView)
# self.assertIsInstance(d.items(), collections.abc.Set)
# self.assertIsInstance(d.items(), collections.abc.Sized)
# self.assertIsInstance(d.items(), collections.abc.Iterable)
# self.assertIsInstance(d.items(), collections.abc.Container)
if __name__ == "__main__":
unittest.main()
| 41.767372 | 76 | 0.525497 |
ace5056cf3730b15fd5c7d37f60431a79176fdc1 | 2,773 | py | Python | webStorm-APICloud/python_tools/Lib/distutils/tests/test_config.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/distutils/tests/test_config.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/distutils/tests/test_config.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.tests import support
PYPIRC = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
class PyPIRCCommandTestCase(support.TempdirManager, unittest.TestCase):
def setUp(self):
"""Patches the environment."""
if os.environ.has_key('HOME'):
self._old_home = os.environ['HOME']
else:
self._old_home = None
curdir = os.path.dirname(__file__)
os.environ['HOME'] = curdir
self.rc = os.path.join(curdir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
def tearDown(self):
"""Removes the patch."""
if self._old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = self._old_home
if os.path.exists(self.rc):
os.remove(self.rc)
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
f = open(self.rc, 'w')
try:
f.write(PYPIRC)
finally:
f.close()
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server1'), ('username', 'me')]
self.assertEquals(config, waited)
# old format
f = open(self.rc, 'w')
f.write(PYPIRC_OLD)
f.close()
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEquals(config, waited)
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| 26.160377 | 72 | 0.561486 |
ace50587d107ac6e28eafc7cf619da2c14a56ac8 | 2,644 | py | Python | model.py | fengredrum/Batch_D3PG | b1128db2b22ce6ba94665a066b1cc401f33145b5 | [
"MIT"
] | 2 | 2020-04-07T02:40:21.000Z | 2020-04-22T06:00:12.000Z | model.py | fengredrum/Batch_D3PG | b1128db2b22ce6ba94665a066b1cc401f33145b5 | [
"MIT"
] | 3 | 2021-04-30T21:14:28.000Z | 2021-09-08T01:55:24.000Z | model.py | fengredrum/Batch_D3PG | b1128db2b22ce6ba94665a066b1cc401f33145b5 | [
"MIT"
] | 1 | 2020-04-22T12:12:26.000Z | 2020-04-22T12:12:26.000Z | import torch
import torch.nn as nn
import numpy as np
from utils import init
class DDPGActor(nn.Module):
__constants__ = ['epsilon']
def __init__(self, obs_size, act_size, hidden_size_1=128, hidden_size_2=128, epsilon=0.3):
super(DDPGActor, self).__init__()
self.epsilon = epsilon
self.act_size = act_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.fc = nn.Sequential(
nn.Linear(obs_size, hidden_size_1),
nn.Tanh(),
nn.Linear(hidden_size_1, hidden_size_2),
nn.Tanh(),
nn.Linear(hidden_size_2, act_size),
# nn.Tanh(),
)
def forward(self, x):
action = self.fc(x)
# if not deterministic:
action = action + self.epsilon * torch.randn(action.size(), device=action.device)
action = action.clamp(-1., 1.)
return action
def sync_param(self, model):
self.load_state_dict(model.state_dict())
def alpha_sync_param(self, model, alpha=1e-3):
assert isinstance(alpha, float)
assert 0.0 < alpha <= 1.0
model_state = model.state_dict()
target_net_state = self.state_dict()
for key, value in model_state.items():
target_net_state[key] = (1. - alpha) * target_net_state[key] + alpha * value
self.load_state_dict(target_net_state)
class DDPGCritic(nn.Module):
def __init__(self, obs_size, act_size, hidden_size_1=128, hidden_size_2=128):
super(DDPGCritic, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.obs_fc = nn.Sequential(
nn.Linear(obs_size, hidden_size_1),
nn.ReLU(),
)
self.out_fc = nn.Sequential(
nn.Linear(hidden_size_1 + act_size, hidden_size_2),
nn.ReLU(),
nn.Linear(hidden_size_2, 1),
)
def forward(self, x, a):
obs_out = self.obs_fc(x)
return self.out_fc(torch.cat([obs_out, a], dim=1))
def sync_param(self, model):
self.load_state_dict(model.state_dict())
def alpha_sync_param(self, model, alpha=1e-3):
assert isinstance(alpha, float)
assert 0.0 < alpha <= 1.0
model_state = model.state_dict()
target_net_state = self.state_dict()
for key, value in model_state.items():
target_net_state[key] = (1. - alpha) * target_net_state[key] + alpha * value
self.load_state_dict(target_net_state)
| 32.243902 | 94 | 0.598336 |
ace505aeafb3f0e7a8442a1bcc4bfced9b80cfae | 661 | py | Python | pdf.py | AlexNecro/NBarcoder | 73377b146fd500a9099badb4d3cd2f56938fecce | [
"Apache-2.0"
] | 1 | 2021-08-16T08:16:57.000Z | 2021-08-16T08:16:57.000Z | pdf.py | AlexNecro/NBarcoder | 73377b146fd500a9099badb4d3cd2f56938fecce | [
"Apache-2.0"
] | null | null | null | pdf.py | AlexNecro/NBarcoder | 73377b146fd500a9099badb4d3cd2f56938fecce | [
"Apache-2.0"
] | null | null | null | from pdf2image import convert_from_path, convert_from_bytes, pdfinfo_from_path, pdfinfo_from_bytes
def get_page_filename(filename, pagenum):
page = get_page(filename, pagenum)
newfilename = filename + "_" + str(pagenum) + ".png"
page.save(newfilename, "PNG")
return newfilename
def get_page(filename, pagenum):
pages = convert_from_path(filename, dpi=300, first_page=pagenum, last_page=pagenum)
return pages[0]
def get_pdfinfo_fromfiles(filenames):
result = []
for filename in filenames:
print(filename)
info = pdfinfo_from_path(filename)
result.append(info)
return result
| 28.73913 | 99 | 0.697428 |
ace5065cbf6b274a6dee294ee78836f7c489b674 | 255 | py | Python | gerrypy/routes.py | GerryPy/GerryPy | 285873e0993fae6657eaa1a5aa9803b87e7ec6bb | [
"MIT"
] | 3 | 2016-12-22T07:32:26.000Z | 2021-02-14T03:08:47.000Z | gerrypy/routes.py | GerryPy/GerryPy | 285873e0993fae6657eaa1a5aa9803b87e7ec6bb | [
"MIT"
] | 24 | 2017-01-04T00:11:23.000Z | 2017-01-12T02:04:01.000Z | gerrypy/routes.py | GerryPy/GerryPy | 285873e0993fae6657eaa1a5aa9803b87e7ec6bb | [
"MIT"
] | 2 | 2017-02-06T23:58:50.000Z | 2017-07-10T18:14:55.000Z | def includeme(config):
config.add_static_view(name='static', path='gerrypy:static')
config.add_static_view('json', path='gerrypy:views')
config.add_route('home', '/')
config.add_route('map', '/map')
config.add_route('about', '/about')
| 36.428571 | 64 | 0.67451 |
ace506f31ab50b1247b1d801160d0326137fe53e | 8,446 | py | Python | graphene_django_cud/mutations/batch_create.py | goderecho/graphene-django-cud | 690dcbd989d857575b77961d5dc749b1ed4aca82 | [
"MIT"
] | null | null | null | graphene_django_cud/mutations/batch_create.py | goderecho/graphene-django-cud | 690dcbd989d857575b77961d5dc749b1ed4aca82 | [
"MIT"
] | null | null | null | graphene_django_cud/mutations/batch_create.py | goderecho/graphene-django-cud | 690dcbd989d857575b77961d5dc749b1ed4aca82 | [
"MIT"
] | null | null | null | import warnings
from collections import OrderedDict
from typing import Iterable
import graphene
from django.db import transaction
from graphene import InputObjectType
from graphene.types.utils import yank_fields_from_attrs
from graphene.utils.str_converters import to_snake_case
from graphene_django.registry import get_global_registry
from graphql import GraphQLError
from graphene_django_cud.mutations.core import DjangoCudBase, DjangoCudBaseOptions
from graphene_django_cud.registry import get_type_meta_registry
from graphene_django_cud.util import get_input_fields_for_model
class DjangoBatchCreateMutationOptions(DjangoCudBaseOptions):
use_type_name = None
class DjangoBatchCreateMutation(DjangoCudBase):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
_meta=None,
model=None,
permissions=None,
login_required=None,
fields=(),
only_fields=(), # Deprecated in favor of `fields`
exclude=(),
exclude_fields=(), # Deprecated in favor of `exclude`
optional_fields=(),
required_fields=(),
auto_context_fields={},
return_field_name=None,
many_to_many_extras=None,
foreign_key_extras=None,
many_to_one_extras=None,
one_to_one_extras=None,
type_name=None,
use_type_name=None,
field_types=None,
custom_fields=None,
**kwargs,
):
registry = get_global_registry()
meta_registry = get_type_meta_registry()
model_type = registry.get_type_for_model(model)
if auto_context_fields is None:
auto_context_fields = {}
if many_to_one_extras is None:
many_to_one_extras = {}
if foreign_key_extras is None:
foreign_key_extras = {}
if many_to_many_extras is None:
many_to_many_extras = {}
if one_to_one_extras is None:
one_to_one_extras = {}
if custom_fields is None:
custom_fields = {}
assert model_type, f"Model type must be registered for model {model}"
if not return_field_name:
# Pluralize
return_field_name = to_snake_case(model.__name__) + "s"
if fields and only_fields:
raise Exception("Cannot set both `fields` and `only_fields` on a mutation")
if exclude and exclude_fields:
raise Exception(
"Cannot set both `exclude` and `exclude_fields` on a mutation"
)
if only_fields:
fields = only_fields
warnings.warn(
"`only_fields` is deprecated in favor of `fields`", DeprecationWarning
)
if exclude_fields:
exclude = exclude_fields
warnings.warn(
"`exclude_fields` is deprecated in favor of `exclude`",
DeprecationWarning,
)
if use_type_name:
input_type_name = use_type_name
InputType = registry.get_converted_field(input_type_name)
if not InputType:
raise GraphQLError(
f"Could not find input type with name {input_type_name}"
)
else:
input_type_name = type_name or f"BatchCreate{model.__name__}Input"
model_fields = get_input_fields_for_model(
model,
fields,
exclude,
tuple(auto_context_fields.keys()) + optional_fields,
required_fields,
many_to_many_extras,
foreign_key_extras,
many_to_one_extras,
one_to_one_extras=one_to_one_extras,
parent_type_name=input_type_name,
field_types=field_types,
)
for name, field in custom_fields.items():
model_fields[name] = field
InputType = type(input_type_name, (InputObjectType,), model_fields)
# Register meta-data
meta_registry.register(
input_type_name,
{
"auto_context_fields": auto_context_fields or {},
"optional_fields": optional_fields,
"required_fields": required_fields,
"many_to_many_extras": many_to_many_extras,
"many_to_one_extras": many_to_one_extras,
"foreign_key_extras": foreign_key_extras,
"one_to_one_extras": one_to_one_extras,
"field_types": field_types or {},
},
)
registry.register_converted_field(input_type_name, InputType)
arguments = OrderedDict(input=graphene.List(InputType, required=True))
output_fields = OrderedDict()
output_fields[return_field_name] = graphene.List(model_type)
if _meta is None:
_meta = DjangoBatchCreateMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.optional_fields = optional_fields
_meta.required_fields = required_fields
_meta.permissions = permissions
_meta.auto_context_fields = auto_context_fields
_meta.many_to_many_extras = many_to_many_extras
_meta.foreign_key_extras = foreign_key_extras
_meta.many_to_one_extras = many_to_one_extras
_meta.one_to_one_extras = one_to_one_extras
_meta.field_types = field_types or {}
_meta.InputType = InputType
_meta.input_type_name = input_type_name
_meta.login_required = login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
@classmethod
def get_permissions(cls, root, info, input) -> Iterable[str]:
return super().get_permissions(root, info, input)
@classmethod
def check_permissions(cls, root, info, input) -> None:
return super().check_permissions(root, info, input)
@classmethod
def before_mutate(cls, root, info, input):
return super().before_mutate(root, info, input)
@classmethod
def before_save(cls, root, info, input, created_objects):
return super().before_save(root, info, input, created_objects)
@classmethod
def after_mutate(cls, root, info, input, created_objs, return_data):
return super().after_mutate(root, info, input, created_objs, return_data)
@classmethod
def after_create_obj(cls, root, info, input, obj, full_input):
return None
@classmethod
def validate(cls, root, info, input, full_input):
return super().validate(root, info, input, full_input)
@classmethod
def mutate(cls, root, info, input):
updated_input = cls.before_mutate(root, info, input)
if updated_input:
input = updated_input
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
cls.check_permissions(root, info, input)
Model = cls._meta.model
model_field_values = {}
auto_context_fields = cls._meta.auto_context_fields or {}
created_objs = []
with transaction.atomic():
for data in input:
cls.validate(root, info, data, input)
obj = cls.create_obj(
data,
info,
auto_context_fields,
cls._meta.many_to_many_extras,
cls._meta.foreign_key_extras,
cls._meta.many_to_one_extras,
cls._meta.one_to_one_extras,
Model,
)
new_obj = cls.after_create_obj(root, info, data, obj, input)
if new_obj is not None:
obj = new_obj
created_objs.append(obj)
updated_objs = cls.before_save(root, info, input, created_objs)
if updated_objs:
created_objs = updated_objs
return_data = {cls._meta.return_field_name: created_objs}
cls.after_mutate(root, info, input, created_objs, return_data)
return cls(**return_data)
| 34.056452 | 87 | 0.623017 |
ace508881b2498768582a7cc2b916c3c6d79b4f3 | 86 | py | Python | tests/test_version.py | cariad/cfp | 43336c45cf56abb7e037fa2f139ea86e679e77b0 | [
"MIT"
] | null | null | null | tests/test_version.py | cariad/cfp | 43336c45cf56abb7e037fa2f139ea86e679e77b0 | [
"MIT"
] | 6 | 2021-11-08T08:42:10.000Z | 2021-12-16T14:17:58.000Z | tests/test_version.py | cariad/cfp | 43336c45cf56abb7e037fa2f139ea86e679e77b0 | [
"MIT"
] | null | null | null | import cfp
def test_get_version() -> None:
assert cfp.__version__ == "-1.-1.-1"
| 14.333333 | 40 | 0.639535 |
ace509bee03341621f6fb91f2f66df1ee46b1f36 | 4,550 | py | Python | research/cv/mae/src/models/metric.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/mae/src/models/metric.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/mae/src/models/metric.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""metric"""
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.communication.management import GlobalComm
class ClassifyCorrectWithCache(nn.Cell):
"""ClassifyCorrectWithCache"""
def __init__(self, network, eval_dataset):
super(ClassifyCorrectWithCache, self).__init__(auto_prefix=False)
self._network = network
self.argmax = P.Argmax()
self.equal = P.Equal()
self.cast = P.Cast()
self.reduce_sum = P.ReduceSum()
self.allreduce = P.AllReduce(P.ReduceOp.SUM, GlobalComm.WORLD_COMM_GROUP)
self.assign_add = P.AssignAdd()
self.assign = P.Assign()
self._correct_num = Parameter(Tensor(0.0, mstype.float32), name="correct_num", requires_grad=False)
# save data to parameter
pdata = []
plabel = []
step_num = 0
for batch in eval_dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
pdata.append(batch["image"])
plabel.append(batch["label"])
step_num = step_num + 1
pdata = Tensor(np.array(pdata), mstype.float32)
plabel = Tensor(np.array(plabel), mstype.int32)
self._data = Parameter(pdata, name="pdata", requires_grad=False)
self._label = Parameter(plabel, name="plabel", requires_grad=False)
self._step_num = Tensor(step_num, mstype.int32)
def construct(self, index):
self._correct_num = 0
while index < self._step_num:
data = self._data[index]
label = self._label[index]
outputs = self._network(data)
y_pred = self.argmax(outputs)
y_pred = self.cast(y_pred, mstype.int32)
y_correct = self.equal(y_pred, label)
y_correct = self.cast(y_correct, mstype.float32)
y_correct_sum = self.reduce_sum(y_correct)
self._correct_num += y_correct_sum #self.assign(self._correct_num, y_correct_sum)
index = index + 1
total_correct = self.allreduce(self._correct_num)
return total_correct
class ClassifyCorrectCell(nn.Cell):
"""ClassifyCorrectCell"""
def __init__(self, network):
super(ClassifyCorrectCell, self).__init__(auto_prefix=False)
self._network = network
self.argmax = P.Argmax()
self.equal = P.Equal()
self.cast = P.Cast()
self.reduce_sum = P.ReduceSum()
self.allreduce = P.AllReduce(P.ReduceOp.SUM, GlobalComm.WORLD_COMM_GROUP)
def construct(self, data, label):
outputs = self._network(data)
y_pred = self.argmax(outputs)
y_pred = self.cast(y_pred, mstype.int32)
y_correct = self.equal(y_pred, label)
y_correct = self.cast(y_correct, mstype.float32)
y_correct = self.reduce_sum(y_correct)
total_correct = self.allreduce(y_correct)
return (total_correct,)
class DistAccuracy(nn.Metric):
"""DistAccuracy"""
def __init__(self, batch_size, device_num):
super(DistAccuracy, self).__init__()
self.clear()
self.batch_size = batch_size
self.device_num = device_num
def clear(self):
self._correct_num = 0
self._total_num = 0
def update(self, *inputs):
if len(inputs) != 1:
raise ValueError('Distribute accuracy needs 1 input (y_correct), but got {}'.format(len(inputs)))
y_correct = self._convert_data(inputs[0])
self._correct_num += y_correct
self._total_num += self.batch_size * self.device_num
def eval(self):
if self._total_num == 0:
raise RuntimeError('Accuracy can not be calculated, because the number of samples is 0.')
return self._correct_num / 50000
| 39.224138 | 109 | 0.654066 |
ace50b4140ed521c50e71a02898233f1e39887e4 | 253 | py | Python | models/__init__.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | null | null | null | models/__init__.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | null | null | null | models/__init__.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | 1 | 2021-07-28T11:18:00.000Z | 2021-07-28T11:18:00.000Z | from .callbacks import CheckpointCallback
from .mlp import get_simple_classifier_mlp, get_regularised_bn_dropout_classifier_mlp, get_simple_mutli_regressor_mlp, get_regularised_bn_dropout_mutli_regressor_mlp
from .process_dataset import process_dataset
| 63.25 | 165 | 0.916996 |
ace50be24164a92a539231fc5b28d8705828d87e | 4,578 | py | Python | keras/integration_test/preprocessing_test_utils.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 37,222 | 2017-12-13T00:52:55.000Z | 2022-03-31T22:34:35.000Z | keras/integration_test/preprocessing_test_utils.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 7,624 | 2017-12-13T01:03:40.000Z | 2022-03-31T23:57:24.000Z | keras/integration_test/preprocessing_test_utils.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 14,914 | 2017-12-13T02:30:46.000Z | 2022-03-30T14:49:16.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities for our Keras preprocessing integration tests."""
import os
import tensorflow.compat.v2 as tf
preprocessing = tf.keras.layers
BATCH_SIZE = 64
DS_SIZE = BATCH_SIZE * 16
STEPS = DS_SIZE / BATCH_SIZE
VOCAB_SIZE = 100
def make_dataset():
"""Make a simple structured dataset.
The dataset contains three feature columns.
- float_col: an unnormalized numeric column.
- int_col: an column of integer IDs.
- string_col: a column of fixed vocabulary terms.
Returns:
The dataset.
"""
tf.random.set_seed(197011)
floats = tf.random.uniform((DS_SIZE, 1), maxval=10, dtype="float32")
# Generate a 100 unique integer values, but over a wide range to showcase a
# common use case for IntegerLookup.
ints = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64")
ints = ints * 1000
# Use a fixed vocabulary of strings from 0 to 99, to showcase loading a
# vocabulary from a file.
strings = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64")
strings = tf.strings.as_string(strings)
features = {"float_col": floats, "int_col": ints, "string_col": strings}
# Random binary label.
labels = tf.random.uniform((DS_SIZE, 1), maxval=2, dtype="int64")
ds = tf.data.Dataset.from_tensor_slices((features, labels))
return ds
def make_preprocessing_model(file_dir):
"""Make a standalone preprocessing model."""
# The name of our keras.Input should match the column name in the dataset.
float_in = tf.keras.Input(shape=(1,), dtype="float32", name="float_col")
int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col")
string_in = tf.keras.Input(shape=(1,), dtype="string", name="string_col")
# We need to batch a dataset before adapting.
ds = make_dataset().batch(BATCH_SIZE)
# Normalize floats by adapting the mean and variance of the input.
normalization = preprocessing.Normalization()
normalization.adapt(ds.map(lambda features, labels: features["float_col"]))
float_out = normalization(float_in)
# Lookup ints by adapting a vocab of integer IDs.
int_lookup = preprocessing.IntegerLookup()
int_lookup.adapt(ds.map(lambda features, labels: features["int_col"]))
int_out = int_lookup(int_in)
# Lookup strings from a fixed file based vocabulary.
string_vocab = list(str(i) for i in range(VOCAB_SIZE))
vocab_file = os.path.join(file_dir, "vocab_file.txt")
with open(vocab_file, "w") as f:
f.write("\n".join(string_vocab))
string_lookup = preprocessing.StringLookup(vocabulary=vocab_file)
string_out = string_lookup(string_in)
return tf.keras.Model(
inputs=(float_in, int_in, string_in),
outputs=(float_out, int_out, string_out))
def make_training_model():
"""Make a trainable model for the preprocessed inputs."""
float_in = tf.keras.Input(shape=(1,), dtype="float32", name="float_col")
# After preprocessing, both the string and int column are integer ready for
# embedding.
int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col")
string_in = tf.keras.Input(shape=(1,), dtype="int64", name="string_col")
# Feed the lookup layers into an embedding.
int_embedding = tf.keras.layers.Embedding(VOCAB_SIZE + 1, 8, input_length=1)
int_out = int_embedding(int_in)
int_out = tf.keras.layers.Flatten()(int_out)
string_embedding = tf.keras.layers.Embedding(
VOCAB_SIZE + 1, 8, input_length=1)
string_out = string_embedding(string_in)
string_out = tf.keras.layers.Flatten()(string_out)
# Concatenate outputs.
concatate = tf.keras.layers.Concatenate()
# Feed our preprocessed inputs into a simple MLP.
x = concatate((float_in, int_out, string_out))
x = tf.keras.layers.Dense(32, activation="relu")(x)
x = tf.keras.layers.Dense(32, activation="relu")(x)
outputs = tf.keras.layers.Dense(1, activation="softmax")(x)
return tf.keras.Model(inputs=(float_in, int_in, string_in), outputs=outputs)
| 41.243243 | 80 | 0.718654 |
ace50cfc3b79055ec361a2f09aa7f33212b609df | 3,692 | py | Python | selfdrive/controls/lib/latcontrol_lqr.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | 28 | 2021-05-02T07:38:23.000Z | 2022-03-23T16:10:55.000Z | selfdrive/controls/lib/latcontrol_lqr.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | 51 | 2021-05-08T10:10:05.000Z | 2022-02-13T14:11:09.000Z | selfdrive/controls/lib/latcontrol_lqr.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | 188 | 2021-04-19T14:01:14.000Z | 2022-03-21T02:32:34.000Z | import math
import numpy as np
from common.numpy_fast import clip
from common.realtime import DT_CTRL
from cereal import log
from selfdrive.controls.lib.drive_helpers import get_steer_max
from selfdrive.ntune import nTune
class LatControlLQR():
def __init__(self, CP):
self.scale = CP.lateralTuning.lqr.scale
self.ki = CP.lateralTuning.lqr.ki
self.A = np.array(CP.lateralTuning.lqr.a).reshape((2, 2))
self.B = np.array(CP.lateralTuning.lqr.b).reshape((2, 1))
self.C = np.array(CP.lateralTuning.lqr.c).reshape((1, 2))
self.K = np.array(CP.lateralTuning.lqr.k).reshape((1, 2))
self.L = np.array(CP.lateralTuning.lqr.l).reshape((2, 1))
self.dc_gain = CP.lateralTuning.lqr.dcGain
self.x_hat = np.array([[0], [0]])
self.i_unwind_rate = 0.3 * DT_CTRL
self.i_rate = 1.0 * DT_CTRL
self.sat_count_rate = 1.0 * DT_CTRL
self.sat_limit = CP.steerLimitTimer
self.reset()
self.tune = nTune(CP, self)
def reset(self):
self.i_lqr = 0.0
self.sat_count = 0.0
def _check_saturation(self, control, check_saturation, limit):
saturated = abs(control) == limit
if saturated and check_saturation:
self.sat_count += self.sat_count_rate
else:
self.sat_count -= self.sat_count_rate
self.sat_count = clip(self.sat_count, 0.0, 1.0)
return self.sat_count > self.sat_limit
def update(self, active, CS, CP, VM, params, last_actuators, desired_curvature, desired_curvature_rate):
self.tune.check()
lqr_log = log.ControlsState.LateralLQRState.new_message()
steers_max = get_steer_max(CP, CS.vEgo)
#torque_scale = (0.45 + CS.vEgo / 60.0)**2 # Scale actuator model with speed
torque_scale = (0.13 + CS.vEgo / 60.0)**0.8
# Subtract offset. Zero angle should correspond to zero torque
steering_angle_no_offset = CS.steeringAngleDeg - params.angleOffsetAverageDeg
desired_angle = math.degrees(VM.get_steer_from_curvature(-desired_curvature, CS.vEgo, params.roll))
instant_offset = params.angleOffsetDeg - params.angleOffsetAverageDeg
desired_angle += instant_offset # Only add offset that originates from vehicle model errors
lqr_log.steeringAngleDesiredDeg = desired_angle
# Update Kalman filter
angle_steers_k = float(self.C.dot(self.x_hat))
e = steering_angle_no_offset - angle_steers_k
self.x_hat = self.A.dot(self.x_hat) + self.B.dot(CS.steeringTorqueEps / torque_scale) + self.L.dot(e)
if CS.vEgo < 0.3 or not active:
lqr_log.active = False
lqr_output = 0.
output_steer = 0.
self.reset()
else:
lqr_log.active = True
# LQR
u_lqr = float(desired_angle / self.dc_gain - self.K.dot(self.x_hat))
lqr_output = torque_scale * u_lqr / self.scale
# Integrator
if CS.steeringPressed:
self.i_lqr -= self.i_unwind_rate * float(np.sign(self.i_lqr))
else:
error = desired_angle - angle_steers_k
i = self.i_lqr + self.ki * self.i_rate * error
control = lqr_output + i
if (error >= 0 and (control <= steers_max or i < 0.0)) or \
(error <= 0 and (control >= -steers_max or i > 0.0)):
self.i_lqr = i
output_steer = lqr_output + self.i_lqr
output_steer = clip(output_steer, -steers_max, steers_max)
check_saturation = (CS.vEgo > 10) and not CS.steeringRateLimited and not CS.steeringPressed
saturated = self._check_saturation(output_steer, check_saturation, steers_max)
lqr_log.steeringAngleDeg = angle_steers_k
lqr_log.i = self.i_lqr
lqr_log.output = output_steer
lqr_log.lqrOutput = lqr_output
lqr_log.saturated = saturated
return output_steer, desired_angle, lqr_log
| 34.504673 | 106 | 0.692037 |
ace50d638aeb1a8f3eb7163e0642f34ac3c48e69 | 1,987 | py | Python | server/contests/auth/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 15 | 2019-02-27T19:28:23.000Z | 2019-07-20T17:54:46.000Z | server/contests/auth/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 9 | 2020-09-04T18:30:56.000Z | 2022-03-25T18:41:11.000Z | server/contests/auth/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 2 | 2019-03-29T14:15:47.000Z | 2019-04-12T06:08:11.000Z | from typing import Iterable, NamedTuple
from contests.models import User
from .core import sign_in, \
sign_up, \
forgot_password, \
reset_password, \
get_me, \
get_user_by_username, \
get_user_by_email, \
get_user_by_id, \
get_all_users
class SignInResult(NamedTuple):
user: User
token: str
class UserInput(NamedTuple):
username: str
name: str
password: str
class SignUpResult(NamedTuple):
user: User
token: str
def user_permissions_resolver(root: User, _info) -> Iterable[str]:
return map(lambda perm: perm.code, root.permissions.all())
def sign_in_mutate(_self, _info, user_id: int, password: str) -> SignInResult:
user, token = sign_in(user_id, password)
return SignInResult(user=user, token=token)
def sign_up_mutate(_self, _info,
user_id: int,
user: UserInput,
signup_code: str) -> SignUpResult:
new_user, token = sign_up(user_id, user.username,
user.name, user.password, signup_code)
return SignUpResult(user=new_user, token=token)
def forgot_password_mutate(_self, _info, user_id: int) -> User:
return forgot_password(user_id)
def reset_passwod_mutate(_self, _info,
user_id: int,
reset_password_otc: str,
new_password: str) -> User:
return reset_password(user_id, reset_password_otc, new_password)
def me_resolver(_root, info) -> User:
return get_me(info.context)
def user_resolver(_root, _info, user_id: int) -> User:
return get_user_by_id(user_id)
def users_resolver(_root, _info) -> Iterable[User]:
return get_all_users()
def user_by_username_resolver(_root, _info, contest_id: int, username: str) -> User:
return get_user_by_username(contest_id, username)
def user_by_email_resolver(_root, _info, contest_id: int, email: str) -> User:
return get_user_by_email(contest_id, email)
| 25.474359 | 84 | 0.673377 |
ace50df4a0804a84aba04359b33c21b694b9a509 | 2,133 | py | Python | guestbook.py | AppScale/guestbook | 35da0b652d213eba7b1ea18773a0e63cacfb03bf | [
"Apache-2.0"
] | null | null | null | guestbook.py | AppScale/guestbook | 35da0b652d213eba7b1ea18773a0e63cacfb03bf | [
"Apache-2.0"
] | null | null | null | guestbook.py | AppScale/guestbook | 35da0b652d213eba7b1ea18773a0e63cacfb03bf | [
"Apache-2.0"
] | 1 | 2021-06-08T09:46:09.000Z | 2021-06-08T09:46:09.000Z | import datetime
import jinja2
import os
import time
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
# We set a parent key on the 'Greetings' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def guestbook_key(guestbook_name='default_guestbook'):
return ndb.Key('Guestbook', guestbook_name)
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Greeting(ndb.Model):
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
greetings_query = Greeting.query(ancestor=guestbook_key()).order(-Greeting.date)
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class WaitHandler(webapp2.RequestHandler):
def get(self):
time.sleep(30)
self.response.out.write('done')
class Guestbook(webapp2.RequestHandler):
def post(self):
greeting = Greeting(parent=guestbook_key())
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
self.redirect('/')
application = webapp2.WSGIApplication([
('/', MainPage),
('/wait', WaitHandler),
('/sign', Guestbook),
], debug=True)
| 31.367647 | 89 | 0.644632 |
ace50eb9e1b835af185bbef8e0e1241023af6d2b | 62 | py | Python | HSTB/kluster/__version__.py | billshi-NOAA/kluster | d01da2427b28717a197091bb4dd7aee87b5bd49d | [
"CC0-1.0"
] | null | null | null | HSTB/kluster/__version__.py | billshi-NOAA/kluster | d01da2427b28717a197091bb4dd7aee87b5bd49d | [
"CC0-1.0"
] | null | null | null | HSTB/kluster/__version__.py | billshi-NOAA/kluster | d01da2427b28717a197091bb4dd7aee87b5bd49d | [
"CC0-1.0"
] | null | null | null | VERSION = (0, 5, 2)
__version__ = '.'.join(map(str, VERSION))
| 20.666667 | 41 | 0.612903 |
ace50eed1267520b13f49450b2e4bbdd2c53327c | 2,077 | py | Python | cride/circles/migrations/0001_initial.py | AlexisLoya/cride-django | 04a8617093bea5de07aa6398d650116e2e6683ab | [
"MIT"
] | null | null | null | cride/circles/migrations/0001_initial.py | AlexisLoya/cride-django | 04a8617093bea5de07aa6398d650116e2e6683ab | [
"MIT"
] | 3 | 2021-05-24T18:17:14.000Z | 2021-05-24T18:18:44.000Z | cride/circles/migrations/0001_initial.py | AlexisLoya/cride-django | 04a8617093bea5de07aa6398d650116e2e6683ab | [
"MIT"
] | null | null | null | # Generated by Django 2.0.10 on 2021-05-07 19:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Circle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')),
('name', models.CharField(max_length=140, verbose_name='circle name')),
('slug_name', models.SlugField(max_length=40, unique=True)),
('about', models.CharField(max_length=255, verbose_name='circle desscription')),
('picture', models.ImageField(blank=True, null=True, upload_to='circles/pictures')),
('rides_offered', models.PositiveIntegerField(default=0)),
('rides_taken', models.PositiveIntegerField(default=0)),
('verified', models.BooleanField(default=False, help_text='Verified circles are also known as official communities.', verbose_name='verified circle')),
('is_public', models.BooleanField(default=True, help_text='Public circle are listed in the main page so everyone know about their existence.')),
('is_limited', models.BooleanField(default=True, help_text='Limited circles can grow up to a fixed number of members.', verbose_name='limited')),
('members_limit', models.PositiveIntegerField(default=0, help_text='If circle is limited, the will be the limit on the number of members.')),
],
options={
'ordering': ['-rides_taken', '-rides_offered'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| 54.657895 | 167 | 0.63168 |
ace50f306fe42dbcec0122052db5b5116b682ef8 | 405 | py | Python | myspider/exceptions.py | Ilcyb/MySpider | a9894f5e80720bd747f59c1e82dc88b5b234d903 | [
"0BSD"
] | null | null | null | myspider/exceptions.py | Ilcyb/MySpider | a9894f5e80720bd747f59c1e82dc88b5b234d903 | [
"0BSD"
] | 1 | 2021-06-01T22:21:12.000Z | 2021-06-01T22:21:12.000Z | myspider/exceptions.py | Ilcyb/MySpider | a9894f5e80720bd747f59c1e82dc88b5b234d903 | [
"0BSD"
] | null | null | null | class SpiderError(Exception):
pass
class SaveCookiesError(SpiderError):
"""A save cookie error occurred"""
pass
class LoadCookiesError(SpiderError):
"""A load cookie error occurred"""
pass
class NotDirectoryError(SpiderError):
"""A not directory error occurred"""
pass
class DirectoryNotExistsError(SpiderError):
"""A directory not exists error occurred"""
pass
| 18.409091 | 47 | 0.708642 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.