blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68128ddef976c370dc87279299e61abaac08163a | 3f0410647ec6f7f597bb2adcc169b30c85c154bf | /dataduct/pipeline/default_object.py | 78d7251db70eb9c26c751ed787d2229b1fd9ebf8 | [
"Apache-2.0"
] | permissive | EverFi/dataduct | 5485239cc72a6aee4145634bf95a1dc5e67b28cd | 797cb719e6c2abeda0751ada3339c72bfb19c8f2 | refs/heads/staging | 2023-07-20T14:59:00.342480 | 2023-03-21T21:11:56 | 2023-03-21T21:11:56 | 96,341,718 | 3 | 0 | NOASSERTION | 2023-07-12T20:29:12 | 2017-07-05T16:54:48 | Python | UTF-8 | Python | false | false | 1,403 | py | """
Pipeline object class for default metadata
"""
from ..config import Config
from ..utils import constants as const
from .pipeline_object import PipelineObject
config = Config()
ROLE = config.etl['ROLE']
RESOURCE_ROLE = config.etl['RESOURCE_ROLE']
MAX_ACTIVE_INSTANCES = config.etl.get('MAX_ACTIVE_INSTANCES', const.ONE)
class DefaultObject(PipelineObject):
"""Default object added to all pipelines
"""
def __init__(self, id, pipeline_log_uri, sns=None, scheduleType='cron',
failureAndRerunMode='CASCADE', **kwargs):
"""Constructor for the DefaultObject class
Args:
id(str): must be 'Default' for this class
sns(sns): notify on failure
scheduleType(str): frequency type for the pipeline
failureAndRerunMode(str): aws input argument for failure mode
**kwargs(optional): Keyword arguments directly passed to base class
Note:
id must be Default for this object
"""
super(DefaultObject, self).__init__(
id='Default', # This should always have the default id
scheduleType=scheduleType,
failureAndRerunMode=failureAndRerunMode,
role=ROLE,
resourceRole=RESOURCE_ROLE,
maxActiveInstances=MAX_ACTIVE_INSTANCES,
pipelineLogUri=pipeline_log_uri,
onFail=sns
)
| [
"sb2nov@gmail.com"
] | sb2nov@gmail.com |
03e6521dbdeeba4a2d45ddaf701bf0485213ebac | d4df3c14cea021ab95dc208e915e88383f3c7371 | /Payload_Type/poseidon/mythic/agent_functions/jsimport.py | 755a354107c0d6348d572365a4a32d263f0633cf | [] | no_license | a32567901/poseidon | 79537ac7f082698137c7f77c746ecdc42ddc89f9 | 6c7d0bf52b67a952fa35a821d7c2d3e5a35cafd7 | refs/heads/master | 2023-06-18T06:25:17.575943 | 2021-07-12T20:44:03 | 2021-07-12T20:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from mythic_payloadtype_container.MythicCommandBase import *
import base64
import sys
import json
from mythic_payloadtype_container.MythicRPC import *
class JsImportArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"file_id": CommandParameter(
name="JXA Script to Load",
type=ParameterType.File,
description="Select the JXA Script to load into memory",
ui_position=1
),
}
async def parse_arguments(self):
self.load_args_from_json_string(self.command_line)
class JsImportCommand(CommandBase):
cmd = "jsimport"
needs_admin = False
help_cmd = "jsimport"
description = "Upload a script into memory for use with jsimport_call"
version = 1
author = "@its_a_feature_"
argument_class = JsImportArguments
attributes = CommandAttributes(
# uncomment when poseidon can dynamically compile commands
supported_os=[SupportedOS.MacOS]
)
attackmapping = []
async def create_tasking(self, task: MythicTask) -> MythicTask:
original_file_name = json.loads(task.original_params)["JXA Script to Load"]
response = await MythicRPC().execute("create_file", task_id=task.id,
file=base64.b64encode(task.args.get_arg("file_id")).decode(),
saved_file_name=original_file_name,
delete_after_fetch=True,
)
if response.status == MythicStatus.Success:
task.args.add_arg("file_id", response.response["agent_file_id"])
task.display_params = "script " + original_file_name
else:
raise Exception("Error from Mythic: " + response.error)
return task
async def process_response(self, response: AgentResponse):
pass | [
"codybthomas@gmail.com"
] | codybthomas@gmail.com |
9ffe4224c93a6d260380414f96e07e3b4b1def64 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pygments/lexers/smv.py | a4cbf9455e3d5cfbee0f1fbdae8b7da6a7f39a21 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 2,773 | py | """
pygments.lexers.smv
~~~~~~~~~~~~~~~~~~~
Lexers for the SMV languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, Text
__all__ = ['NuSMVLexer']
class NuSMVLexer(RegexLexer):
"""
Lexer for the NuSMV language.
.. versionadded:: 2.2
"""
name = 'NuSMV'
aliases = ['nusmv']
filenames = ['*.smv']
mimetypes = []
tokens = {
'root': [
# Comments
(r'(?s)\/\-\-.*?\-\-/', Comment),
(r'--.*\n', Comment),
# Reserved
(words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
'PREDICATES'), suffix=r'(?![\w$#-])'),
Keyword.Declaration),
(r'process(?![\w$#-])', Keyword),
(words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
suffix=r'(?![\w$#-])'), Keyword.Type),
(words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
(words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
Name.Builtin),
(words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
'xnor'), suffix=r'(?![\w$#-])'),
Operator.Word),
(words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
# Names
(r'[a-zA-Z_][\w$#-]*', Name.Variable),
# Operators
(r':=', Operator),
(r'[-&|+*/<>!=]', Operator),
# Literals
(r'\-?\d+\b', Number.Integer),
(r'0[su][bB]\d*_[01_]+', Number.Bin),
(r'0[su][oO]\d*_[0-7_]+', Number.Oct),
(r'0[su][dD]\d*_[\d_]+', Number.Decimal),
(r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
# Whitespace, punctuation and the rest
(r'\s+', Text.Whitespace),
(r'[()\[\]{};?:.,]', Punctuation),
],
}
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
d6a19c14ed21fd7fff372aff3356a947433b7b86 | f543f74749ff6aa7731438cb1c33f01c7c6296b2 | /ZenPacks/community/zenSiebelCRM/routers.py | 5eda913bc6c7fa4119561975d408e0a962def296 | [
"Apache-2.0"
] | permissive | j053ph4/ZenPacks.community.zenSiebelCRM | 5ff2438cbf778a53b47dcf1d23fd068412232e41 | fcddf900ff0290fa646722060a40e315e857e439 | refs/heads/master | 2021-01-01T16:39:24.706506 | 2015-03-20T16:29:19 | 2015-03-20T16:29:19 | 2,608,202 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from ZenPacks.community.ConstructionKit.ClassHelper import *
class zenSiebelCRMRouter(ClassHelper.zenSiebelCRMRouter):
''''''
| [
"janderson@agero.com"
] | janderson@agero.com |
eca07f6edf533d18f3e673a87f6e9048c8363109 | f39f870107ebd13914220b862a62709f22cd778d | /src/runrex/schema.py | 375806ac12b85718f53d37e288fd3d4851050c2d | [
"MIT"
] | permissive | kpwhri/runrex | 36efd549009d4c3cc77a498934cdcb5f92748d8e | 68f7e67419cd6b87ed86d755a760b6c5fcbfb07d | refs/heads/master | 2023-04-02T07:06:59.226692 | 2023-03-24T18:51:46 | 2023-03-24T18:51:46 | 224,006,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,624 | py | import json
import jsonschema
try:
from ruamel import yaml
except ModuleNotFoundError:
yaml = False
JSON_SCHEMA = {
'type': 'object',
'properties': {
'corpus': {
'type': 'object',
'properties': {
'directory': {'type': 'string'},
'directories': {
'type': 'array',
'items': {'type': 'string'}
},
'version': {'type': 'string'}, # text or lemma
'connections': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}, # database name; path to CSV file
'encoding': {'type': 'string'}, # for CSV file
'driver': {'type': 'string'},
'server': {'type': 'string'},
'database': {'type': 'string'},
'name_col': {'type': 'string'},
'text_col': {'type': 'string'}
}
}
},
}
},
'annotation': {
'type': 'object',
'properties': {
'file': {'type': 'string'}
}
},
'annotations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'file': {'type': 'string'}
}
}
},
'output': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'kind': {'type': 'string'}, # sql, csv, etc.
'path': {'type': 'string'},
'driver': {'type': 'string'},
'server': {'type': 'string'},
'database': {'type': 'string'},
'ignore': {'type': 'boolean'},
'encoding': {'type': 'string'},
}
},
'select': {
'type': 'object',
'properties': {
'start': {'type': 'number'},
'end': {'type': 'number'},
'encoding': {'type': 'string'},
'filenames': {
'type': 'array',
'items': {'type': 'string'}
}
}
},
'algorithm': {
'type': 'object',
'names': {
'type': 'array',
'items': {'type': 'string'}
}
},
'loginfo': {
'type': 'object',
'properties': {
'directory': {'type': 'string'},
'ignore': {'type': 'boolean'},
'encoding': {'type': 'string'},
'kind': {'type': 'string'},
}
},
'skipinfo': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'rebuild': {'type': 'boolean'},
'ignore': {'type': 'boolean'},
}
},
'logger': {
'type': 'object',
'properties': {
'verbose': {'type': 'boolean'}
}
}
}
}
def myexec(code):
import warnings
warnings.warn('Executing python external file: only do this if you trust it')
import sys
from io import StringIO
temp_stdout = sys.stdout
sys.stdout = StringIO()
try:
# try if this is a expression
ret = eval(code)
result = sys.stdout.getvalue()
if ret:
result = result + ret
except:
try:
exec(code)
except:
# you can use <traceback> module here
import traceback
buf = StringIO()
traceback.print_exc(file=buf)
error = buf.getvalue()
raise ValueError(error)
else:
result = sys.stdout.getvalue()
sys.stdout = temp_stdout
return result
def get_config(path):
with open(path) as fh:
if path.endswith('json'):
return json.load(fh)
elif path.endswith('yaml') and yaml:
return yaml.load(fh)
elif path.endswith('py'):
return eval(myexec(fh.read()))
else:
raise ValueError('Unrecognized configuration file type: {}'.format(path.split('.')[-1]))
def validate_config(path):
conf = get_config(path)
jsonschema.validate(conf, JSON_SCHEMA)
return conf
| [
"dcronkite@gmail.com"
] | dcronkite@gmail.com |
fae05f2b90f2dbd3d408b281c80207daa203395c | 8ed1430279ae52fd950dd0afe88549a100001e26 | /qa/rpc-tests/test_framework/key.py | a36d6d71e08f3b53918ffad15038dbf82f0d3830 | [
"MIT"
] | permissive | mirzaei-ce/core-najafbit | 9fb70dbd4e17ec1635d7b886db17f8aab3f592bb | 6de34210a9ba9cc3f21fee631bc1a1f4d12d445d | refs/heads/master | 2021-08-11T08:53:58.165742 | 2017-11-13T13:00:14 | 2017-11-13T13:00:14 | 110,548,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,367 | py | # Copyright (c) 2011 Sam Rushing
#
# key.py - OpenSSL wrapper
#
# This file is modified from python-najafbitlib.
#
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
4a8ff182ff64cd8354550c0be5e3c6d332c9e65e | fb5b258968d361e652e31a753b7729acea776470 | /tracker/extensions.py | 32059628cfe55f696c80043be6c0188722f973aa | [] | no_license | OpenDataServices/aid-transparency-tracker | ddbac46406dccd71b5b441e543e67d1819377da3 | d3d12dc3d038c24825374eb7aa74ed6e51266747 | refs/heads/master | 2020-06-14T00:53:28.939473 | 2019-06-30T15:18:55 | 2019-06-30T15:18:55 | 194,842,277 | 1 | 1 | null | 2019-07-02T10:30:35 | 2019-07-02T10:30:34 | null | UTF-8 | Python | false | false | 528 | py | """Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_webpack import Webpack
from flask_wtf.csrf import CSRFProtect
from flask_security import Security
csrf_protect = CSRFProtect()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
webpack = Webpack()
security = Security()
| [
"a.lulham@gmail.com"
] | a.lulham@gmail.com |
cd3649ce6be2f14862c05d40998542e059230fe7 | bcf88b912b9443c3326466c226f68a7e7ad5aa9d | /bdbag/fetch/transports/fetch_http.py | f55b0cd33f99c76fdd5b2177276a166c30c9865e | [
"Apache-2.0"
] | permissive | mvdbeek/bdbag | 33bc7e0275c720104af77654b0016024cb6ab012 | fe67b5bffc68b7dac823ce03d450ede3affccbef | refs/heads/master | 2020-03-25T05:17:09.646537 | 2018-07-12T03:58:06 | 2018-07-12T03:58:06 | 143,438,809 | 0 | 0 | null | 2018-08-03T14:42:27 | 2018-08-03T14:42:27 | null | UTF-8 | Python | false | false | 6,656 | py | import os
import datetime
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import certifi
from bdbag import urlsplit, get_typed_exception
import bdbag.fetch.auth.keychain as keychain
logger = logging.getLogger(__name__)
Kilobyte = 1024
Megabyte = 1024 ** 2
CHUNK_SIZE = 1024 * 10240
SESSIONS = dict()
HEADERS = {'Connection': 'keep-alive'}
def validate_auth_config(auth):
if not keychain.has_auth_attr(auth, 'auth_type'):
return False
if not keychain.has_auth_attr(auth, 'auth_params'):
return False
return True
def get_session(url, auth_config):
session = None
response = None
for auth in list((entry for entry in auth_config if hasattr(entry, 'uri') and (entry.uri.lower() in url.lower()))):
try:
if not validate_auth_config(auth):
continue
if auth.uri in SESSIONS:
session = SESSIONS[auth.uri]
break
else:
session = get_new_session()
if auth.auth_type == 'cookie':
if auth.auth_params and hasattr(auth.auth_params, 'cookies'):
cookies = auth.auth_params.cookies
for cookie in cookies:
name, value = cookie.split('=', 1)
session.cookies.set(name, value, domain=urlsplit(auth.uri).hostname, path='/')
SESSIONS[auth.uri] = session
break
# if we get here the assumption is that the auth_type is either http-basic or http-form
auth_uri = auth.uri
if keychain.has_auth_attr(auth, 'auth_uri'):
auth_uri = auth.auth_uri
if not (keychain.has_auth_attr(auth.auth_params, 'username') and
keychain.has_auth_attr(auth.auth_params, 'password')):
logging.warning(
"Missing required parameters [username, password] for auth_type [%s] for keychain entry [%s]" %
(auth.auth_type, auth.uri))
continue
if auth.auth_type == 'http-basic':
session.auth = (auth.auth_params.username, auth.auth_params.password)
auth_method = "post"
if keychain.has_auth_attr(auth.auth_params, 'auth_method'):
auth_method = auth.auth_params.auth_method.lower()
if auth_method == 'post':
response = session.post(auth_uri, auth=session.auth)
elif auth_method == 'get':
response = session.get(auth_uri, auth=session.auth)
else:
logging.warning("Unsupported auth_method [%s] for auth_type [%s] for keychain entry [%s]" %
(auth_method, auth.auth_type, auth.uri))
elif auth.auth_type == 'http-form':
response = session.post(auth_uri,
{auth.auth_params.username_field or "username": auth.auth_params.username,
auth.auth_params.password_field or "password": auth.auth_params.password})
if response.status_code > 203:
logger.warning(
'Authentication failed with Status Code: %s %s\n' % (response.status_code, response.text))
else:
logger.info("Session established: %s", auth.uri)
SESSIONS[auth.uri] = session
break
except Exception as e:
logger.warning("Unhandled exception during HTTP(S) authentication: %s" % get_typed_exception(e))
if not session:
url_parts = urlsplit(url)
base_url = str("%s://%s" % (url_parts.scheme, url_parts.netloc))
session = SESSIONS.get(base_url, None)
if not session:
session = get_new_session()
SESSIONS[base_url] = session
return session
def get_new_session():
session = requests.session()
retries = Retry(connect=5,
read=5,
backoff_factor=1.0,
status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
return session
def get_file(url, output_path, auth_config, headers=None, session=None):
try:
if not session:
session = get_session(url, auth_config)
output_dir = os.path.dirname(os.path.abspath(output_path))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not headers:
headers = HEADERS
else:
headers.update(HEADERS)
logger.info("Attempting GET from URL: %s" % url)
r = session.get(url, headers=headers, stream=True, verify=certifi.where())
if r.status_code == 401:
session = get_session(url, auth_config)
r = session.get(url, headers=headers, stream=True, verify=certifi.where())
if r.status_code != 200:
logger.error('HTTP GET Failed for URL: %s' % url)
logger.error("Host %s responded:\n\n%s" % (urlsplit(url).netloc, r.text))
logger.warning('File transfer failed: [%s]' % output_path)
else:
total = 0
start = datetime.datetime.now()
logger.debug("Transferring file %s to %s" % (url, output_path))
with open(output_path, 'wb') as data_file:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
data_file.write(chunk)
total += len(chunk)
elapsed_time = datetime.datetime.now() - start
total_secs = elapsed_time.total_seconds()
transferred = \
float(total) / float(Kilobyte) if total < Megabyte else float(total) / float(Megabyte)
throughput = str(" at %.2f MB/second" % (transferred / total_secs)) if (total_secs >= 1) else ""
elapsed = str("Elapsed time: %s." % elapsed_time) if (total_secs > 0) else ""
summary = "%.3f %s transferred%s. %s" % \
(transferred, "KB" if total < Megabyte else "MB", throughput, elapsed)
logger.info('File [%s] transfer successful. %s' % (output_path, summary))
return True
except requests.exceptions.RequestException as e:
logger.error('HTTP Request Exception: %s' % (get_typed_exception(e)))
return False
def cleanup():
for session in SESSIONS.values():
session.close()
SESSIONS.clear()
| [
"mikedarcy@users.noreply.github.com"
] | mikedarcy@users.noreply.github.com |
fb95f7509a541b20d0c87ac002317fa791d06f7b | 4c1c2e3a8882f58a895285232eddae337ddc1a3a | /tests/test_general.py | f0721fd9fa1cb43874e25eda16877495aa4b11b9 | [
"MIT"
] | permissive | ArtellaPipe/artellapipe-tools-renamer | b00472d94de48771b3de69d2706f2e37235736d2 | 2456f418e2b4cf853d228fcce27af94b4fe3185a | refs/heads/master | 2020-11-27T15:15:59.673296 | 2020-08-24T18:26:45 | 2020-08-24T18:26:45 | 229,507,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains general tests for artellapipe-tools-renamer
"""
import pytest
from artellapipe.tools.renamer import __version__
def test_version():
assert __version__.get_version()
| [
"tpovedatd@gmail.com"
] | tpovedatd@gmail.com |
54b05755dc514b5b8bb3ffd4c7da7e3f34989eb3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/l69.py | 920450ca9c83d396aca07b6664717ada6b99d6a4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('GellWPT2.py', 1), ('GodwWLN.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
d917ef627509e70c36c83c7acd82f2010593541f | 959da8ee241cf4b525b46a7a75f752fdb588b7a7 | /finegan/model.py | c6e7062dbd63094efa62b5061610ea4f3103f707 | [
"MIT",
"Apache-2.0"
] | permissive | abdelrahmanzied/GSoC-TensorFlow-2020 | b1b2349e24fe3f1249bf79d708c88ba0daee574a | 8e1cb3591dff10a74e2adcf52940be3ed03f9be7 | refs/heads/master | 2023-02-05T09:00:57.889824 | 2020-12-29T07:24:37 | 2020-12-29T07:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,773 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unsupervised Hierarchical Disentanglement for Fine Grained Object Generation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import pickle
import random
import numpy as np
import pandas as pd
import tensorflow as tf
assert tf.version.VERSION.startswith('2.')
from tensorflow.keras import Input, Model, Sequential
from tensorflow.keras.layers import LeakyReLU, BatchNormalization, ReLU, Activation, LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D, Concatenate, Dense, concatenate, Conv2DTranspose
from tensorflow.keras.layers import Flatten, Lambda, Reshape, ZeroPadding2D, add, dot, ELU
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
class GLU(tf.keras.layers.Layer):
def __init__(self):
super(GLU, self).__init__()
def call(self, inputs):
nc = inputs.shape[-1]
assert nc % 2 == 0, 'Channels are not divisible by 2.'
nc = int(nc/2)
if len(inputs.shape) == 2:
val = inputs[:,:nc] * tf.math.sigmoid(inputs[:,nc:])
else:
val = inputs[:,:,:,:nc] * tf.math.sigmoid(inputs[:,:,:,nc:])
return val
class ParentChildEncoder(tf.keras.layers.Layer):
"""Encoder for parent and child images"""
def __init__(self, num_disc_features, **kwargs):
super(ParentChildEncoder, self).__init__(**kwargs)
self.num_disc_features = num_disc_features
self.conv1 = Conv2D(self.num_disc_features, 4, 2, use_bias=False)
self.conv2 = Conv2D(self.num_disc_features*2, 4, 2, use_bias=False)
self.batchnorm1 = BatchNormalization()
self.conv3 = Conv2D(self.num_disc_features*4, 4, 2, use_bias=False)
self.batchnorm2 = BatchNormalization()
self.conv4 = Conv2D(self.num_disc_features*8, 4, 2, use_bias=False)
self.batchnorm3 = BatchNormalization()
def call(self, inputs):
x = ZeroPadding2D(1)(inputs)
x = self.conv1(x)
x = LeakyReLU(alpha=0.2)(x)
x = ZeroPadding2D(1)(x)
x = self.conv2(x)
x = self.batchnorm1(x)
x = LeakyReLU(alpha=0.2)(x)
x = ZeroPadding2D(1)(x)
x = self.conv3(x)
x = self.batchnorm2(x)
x = LeakyReLU(alpha=0.2)(x)
x = ZeroPadding2D(1)(x)
x = self.conv4(x)
x = self.batchnorm3(x)
return LeakyReLU(alpha=0.2)(x)
class BackgroundEncoder(tf.keras.layers.Layer):
"""Encoder for the background image"""
def __init__(self, num_disc_features, **kwargs):
super(BackgroundEncoder, self).__init__(**kwargs)
self.num_disc_features = num_disc_features
self.conv1 = Conv2D(self.num_disc_features, 4, 2, use_bias=False)
self.conv2 = Conv2D(self.num_disc_features*2, 4, 2, use_bias=False)
self.conv3 = Conv2D(self.num_disc_features*4, 4, 1, use_bias=False)
def call(self, inputs):
x = self.conv1(inputs)
x = LeakyReLU(alpha=0.2)(x)
x = self.conv2(x)
x = LeakyReLU(alpha=0.2)(x)
x = self.conv3(x)
return LeakyReLU(alpha=0.2)(x)
class UpSampleBlock(tf.keras.layers.Layer):
def __init__(self, filters=16, **kwargs):
super(UpSampleBlock, self).__init__(**kwargs)
self.filters = filters
self.upsample1 = Conv2DTranspose(self.filters*2, 3, strides=2, padding='same',
kernel_initializer="orthogonal", use_bias=False)
self.batchnorm1 = BatchNormalization()
def call(self, inputs):
x = self.upsample1(inputs)
x = self.batchnorm1(x)
return GLU()(x)
class DownSampleBlock(tf.keras.layers.Layer):
def __init__(self, filters=16, **kwargs):
super(DownSampleBlock, self).__init__(**kwargs)
self.filters = filters
self.conv1 = Conv2D(self.filters, 4, 2, use_bias=False)
self.batchnorm1 = BatchNormalization()
def call(self, inputs):
x = ZeroPadding2D(1)(inputs)
x = self.conv1(x)
x = self.batchnorm1(x)
return LeakyReLU(alpha=0.2)(x)
class KeepDimsBlock(tf.keras.layers.Layer):
def __init__(self, filters=16, **kwargs):
super(KeepDimsBlock, self).__init__(**kwargs)
self.filters = filters
self.conv1 = Conv2D(self.filters*2, 3, kernel_initializer='orthogonal', use_bias=False)
self.batchnorm1 = BatchNormalization()
def call(self, inputs):
x = ZeroPadding2D(1)(inputs)
x = self.conv1(x)
x = self.batchnorm1(x)
return GLU()(x)
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, channels=16, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.channels = channels
self.conv1 = Conv2D(filters=self.channels * 2, kernel_size=3, strides=1, kernel_initializer='orthogonal',
use_bias=False)
self.batchnorm1 = BatchNormalization()
self.conv2 = Conv2D(filters=self.channels, kernel_size=3, strides=1, kernel_initializer='orthogonal',
use_bias=False)
self.batchnorm2 = BatchNormalization()
def call(self, inputs):
residual = inputs
x = ZeroPadding2D(1)(inputs)
x = self.conv1(x)
x = self.batchnorm1(x)
x = GLU()(x)
x = ZeroPadding2D(1)(x)
x = self.conv2(x)
x = self.batchnorm2(x)
return tf.keras.layers.Add()([x, residual])
class InitGenerator(tf.keras.Model):
def __init__(self, cfg, gen_dims, condition_flag, **kwargs):
super(InitGenerator, self).__init__(**kwargs)
self.gf_dim = gen_dims
self.condition_flag = condition_flag
if self.condition_flag==1 :
self.input_dims = cfg.GAN['Z_DIM'] + cfg.SUPER_CATEGORIES
elif self.condition_flag==2:
self.input_dims = cfg.GAN['Z_DIM'] + cfg.FINE_GRAINED_CATEGORIES
self.layer1 = UpSampleBlock(self.gf_dim // 2)
self.layer2 = UpSampleBlock(self.gf_dim // 4)
self.layer3 = UpSampleBlock(self.gf_dim // 8)
self.layer4 = UpSampleBlock(self.gf_dim // 16)
self.layer5 = UpSampleBlock(self.gf_dim // 16)
self.dense1 = Dense(self.gf_dim*4*4*2, kernel_initializer='orthogonal', use_bias=False)
self.batchnorm1 = BatchNormalization()
def call(self, z_code, h_code):
z_code = tf.cast(z_code, dtype=tf.float32)
h_code = tf.cast(h_code, dtype=tf.float32)
x = Concatenate()([z_code, h_code])
x = self.dense1(x)
x = self.batchnorm1(x)
x = GLU()(x)
x = Reshape((4, 4, self.gf_dim))(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return self.layer5(x)
class IntermediateGenerator(tf.keras.Model):
def __init__(self, cfg, gen_dims, hrc=1, num_residual=cfg.GAN['R_NUM'], **kwargs):
super(IntermediateGenerator, self).__init__(**kwargs)
self.gf_dim = gen_dims
self.res = num_residual
if hrc == 1:
self.ef_dim = cfg.SUPER_CATEGORIES
else:
self.ef_dim = cfg.FINE_GRAINED_CATEGORIES
self.convblock = Sequential([
ZeroPadding2D(1),
Conv2D(self.gf_dim*2, 3, 1, kernel_initializer='orthogonal', use_bias=False),
BatchNormalization(),
GLU()
])
self.residual = self.make_layer(ResidualBlock, self.gf_dim)
self.keepdims = KeepDimsBlock(self.gf_dim // 2)
def make_layer(self, block, channel_num):
return Sequential([block(channel_num),
block(channel_num)])
def call(self, h_code, code):
s_size = h_code.shape[1]
code = Reshape([1, 1, self.ef_dim])(code)
code = tf.tile(code, tf.constant([1, s_size, s_size, 1]))
h_code = tf.cast(h_code, dtype=tf.float32)
code = tf.cast(code, dtype=tf.float32)
x = Concatenate(axis=-1)([code, h_code])
x = self.convblock(x)
x = self.residual(x)
return self.keepdims(x)
class GetImage(tf.keras.Model):
def __init__(self, **kwargs):
super(GetImage, self).__init__(**kwargs)
self.out_image = Sequential([
ZeroPadding2D(1),
Conv2D(filters=3, kernel_size=3, strides=1, kernel_initializer='orthogonal',
use_bias=False),
Activation('tanh')
])
def call(self, inputs):
return self.out_image(inputs)
class GetMask(tf.keras.Model):
def __init__(self, **kwargs):
super(GetMask, self).__init__(**kwargs)
self.out_mask = Sequential([
ZeroPadding2D(1),
Conv2D(filters=1, kernel_size=3, strides=1, kernel_initializer='orthogonal',
use_bias=False),
Activation('sigmoid')
])
def call(self, inputs):
return self.out_mask(inputs)
class GeneratorArchitecture(tf.keras.Model):
def __init__(self, cfg, **kwargs):
super(GeneratorArchitecture, self).__init__(**kwargs)
self.cfg = cfg
self.gen_dims = cfg.GAN['GF_DIM']
# Background Stage
self.background_gen = InitGenerator(cfg, self.gen_dims*16, 2)
self.image_bg = GetImage() # Background Image
# Parent Stage
self.parent_gen1 = InitGenerator(cfg, self.gen_dims*16, 1)
self.parent_gen2 = IntermediateGenerator(cfg, self.gen_dims, 1)
self.image_gen2 = GetImage() # Parent Foreground
self.mask_gen2 = GetMask() # Parent Mask
# Child Stage
self.child_gen = IntermediateGenerator(cfg, self.gen_dims // 2, 0)
self.image_child = GetImage() # Child Foreground
self.mask_child = GetMask() # Child Mask
def call(self, z_code, c_code, p_code=None, bg_code=None):
fake_images = [] # [Background images, Parent images, Child images]
foreground_images = [] # [Parent foreground, Child foreground]
masks = [] # [Parent masks, Child masks]
foreground_masks = [] # [Parent foreground mask, Child foreground mask]
# Set only during training
bg_code = tf.cast(c_code, dtype=tf.float32)
# Background Stage
bg_stage_code = self.background_gen(z_code, bg_code) # Upsampled Background
fake_bg = self.image_bg(bg_stage_code)
fake_img_126 = tf.image.resize(fake_bg,(126, 126))
fake_images.append(fake_img_126)
# Parent Stage
fp_dims = self.parent_gen1(z_code, p_code)
p_dims = self.parent_gen2(fp_dims, p_code) # Feature Representation (F_p)
fake_parent_fg = self.image_gen2(p_dims) # Parent Foreground (P_f)
fake_parent_mask = self.mask_gen2(p_dims) # Parent Mask (P_m)
inverse_ones = tf.ones_like(fake_parent_mask)
inverse_mask = inverse_ones - fake_parent_mask # (1-P_m)
parent_foreground_mask = tf.math.multiply(fake_parent_fg, fake_parent_mask) # Parent Foreground Mask (P_fm)
background_mask = tf.math.multiply(fake_bg, inverse_mask) # Background Mask (B_m)
fake_parent_image = parent_foreground_mask + background_mask # Parent Image (P)
fake_images.append(fake_parent_image)
foreground_images.append(fake_parent_fg)
masks.append(fake_parent_mask)
foreground_masks.append(parent_foreground_mask)
# Child Stage
# TODO: Test whether inclusion of the ResidualGen is necessary
fc_dims = self.child_gen(p_dims, c_code)
fake_child_fg = self.image_child(fc_dims) # Child Foreground (C_f)
fake_child_mask = self.mask_child(fc_dims) # Child Mask (C_m)
inverse_ones = tf.ones_like(fake_child_mask)
inverse_mask = inverse_ones - fake_child_mask # (1-C_m)
child_foreground_mask = tf.math.multiply(fake_child_fg, fake_child_mask) # Child Foreground mask (C_fm)
child_parent_mask = tf.math.multiply(fake_parent_image, inverse_mask) # Parent Mask (P_m)
fake_child_image = child_foreground_mask + child_parent_mask # Child Image (C)
fake_images.append(fake_child_image)
foreground_images.append(fake_child_fg)
masks.append(fake_child_mask)
foreground_masks.append(child_foreground_mask)
return fake_images, foreground_images, masks, foreground_masks
class DiscriminatorArchitecture(tf.keras.Model):
def __init__(self, cfg, stage_num, **kwargs):
super(DiscriminatorArchitecture, self).__init__(**kwargs)
self.disc_dims = cfg.GAN['DF_DIM']
self.stage_num = stage_num
if self.stage_num == 0:
self.encoder_dims = 1
elif self.stage_num == 1:
self.encoder_dims = cfg.SUPER_CATEGORIES
elif self.stage_num == 2:
self.encoder_dims = cfg.FINE_GRAINED_CATEGORIES
if self.stage_num == 0:
# Background Stage
self.patchgan_16 = BackgroundEncoder(self.disc_dims)
self.logits1 = Sequential([
Conv2D(1, 4, 1),
Activation('sigmoid')
])
self.logits2 = Sequential([
Conv2D(1, 4, 1),
Activation('sigmoid')
])
else:
self.code_16 = ParentChildEncoder(self.disc_dims)
self.code_32 = DownSampleBlock(self.disc_dims*16)
self.code = Sequential([
ZeroPadding2D(1),
Conv2D(self.disc_dims*8, 3, kernel_initializer='orthogonal', use_bias=False),
BatchNormalization(),
LeakyReLU(alpha=0.2)
])
# Pass gradients through
self.logits_pc = Sequential([
Conv2D(self.encoder_dims, 4, 4, name=f'logits_pc_{self.stage_num}')
])
# Pass gradients through
self.jointConv = Sequential([
ZeroPadding2D(1),
Conv2D(self.disc_dims*8, 3, kernel_initializer='orthogonal', use_bias=False, name=f'joint_conv_{self.stage_num}'),
BatchNormalization(),
LeakyReLU(alpha=0.2)
])
self.logits_pc1 = Sequential([
Conv2D(1, 4, 4, use_bias=False),
Activation('sigmoid')
])
def call(self, inputs):
if self.stage_num == 0:
x = self.patchgan_16(inputs)
back_fore = self.logits1(x) # Background/Foreground classification (D_aux)
real_fake = self.logits2(x) # Real/Fake classification (D_adv)
return [back_fore, real_fake]
else:
x = self.code_16(inputs)
x = self.code_32(x)
x = self.code(x)
x = self.jointConv(x)
p_c = self.logits_pc(x) # Information maximising code (D_pinfo or D_cinfo)
real_fake_child = self.logits_pc1(x) # Real/Fake classification - child (D_adv)
return [Reshape([self.encoder_dims])(p_c), Reshape([-1])(real_fake_child)]
| [
"vishal114186@gmail.com"
] | vishal114186@gmail.com |
2ccbe098d647f4aa3758b9746cb05213b6096eb2 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/2545. Sort the Students by Their Kth Score.py | 1938cef18d0c6419bf112383400d38432dc59634 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | '''
There is a class with m students and n exams. You are given a 0-indexed m x n integer matrix score, where each row represents one student and score[i][j] denotes the score the ith student got in the jth exam. The matrix score contains distinct integers only.
You are also given an integer k. Sort the students (i.e., the rows of the matrix) by their scores in the kth (0-indexed) exam from the highest to the lowest.
Return the matrix after sorting it.
Example 1:
Input: score = [[10,6,9,1],[7,5,11,2],[4,8,3,15]], k = 2
Output: [[7,5,11,2],[10,6,9,1],[4,8,3,15]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 11 in exam 2, which is the highest score, so they got first place.
- The student with index 0 scored 9 in exam 2, which is the second highest score, so they got second place.
- The student with index 2 scored 3 in exam 2, which is the lowest score, so they got third place.
Example 2:
Input: score = [[3,4],[5,6]], k = 0
Output: [[5,6],[3,4]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 5 in exam 0, which is the highest score, so they got first place.
- The student with index 0 scored 3 in exam 0, which is the lowest score, so they got second place.
Constraints:
m == score.length
n == score[i].length
1 <= m, n <= 250
1 <= score[i][j] <= 105
score consists of distinct integers.
0 <= k < n
'''
from typing import *
import unittest
class Solution:
def sortTheStudents(self, score: List[List[int]], k: int) -> List[List[int]]:
return sorted(score, key=lambda x: -x[k])
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([[10,6,9,1],[7,5,11,2],[4,8,3,15]],2),[[7,5,11,2],[10,6,9,1],[4,8,3,15]]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().sortTheStudents(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main() | [
"xiaohuanlin1993@gmail.com"
] | xiaohuanlin1993@gmail.com |
20742f51da8d73b241b268b3e8ad34a2a7ec71a6 | e8215b98dcf46417e720cc6ef4a0329474ae9b82 | /PHYS304/Transcendental.py | f3540b28e7fbf87080a8c19cdb8799a835c8501e | [] | no_license | rgkaufmann/PythonCodes | 2d47bab84ec851fc962598f613b1e666a14c8efd | a5d5cd993beabdb79897a05b35420ad82f438f51 | refs/heads/master | 2021-06-13T23:19:09.109162 | 2021-03-03T06:00:04 | 2021-03-03T06:00:04 | 162,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | import numpy as np
import matplotlib.pyplot as plt
def tangent(z):
return np.tan(z)
def transient(z):
return np.sqrt((8/z)**2-1)
zVals = np.linspace(np.pi, 8, 1000)
tanVals = tangent(zVals)
tranVals = transient(zVals)
zTransients = [4.16483091, 6.83067433]
tanTransients = tangent(zTransients)
plt.plot(zVals, tanVals, label='Tanget')
plt.plot(zVals, tranVals, label='Square Root')
plt.plot(zVals, np.abs(tanVals-tranVals), label='Absolute Value Difference')
plt.scatter(zTransients, tanTransients, label='Numerical Solutions')
plt.ylim(ymin=0, ymax=5)
plt.xlim(np.pi, 8)
plt.legend(loc='best')
plt.title('Graphical Representation of the Transcendental Equation')
plt.xlabel('z Values')
plt.show()
z0 = 4.16483091
z1 = 6.83067433
hbar = 1.0545718e-34
mass = 9.70938356e-31
#a = (8*hbar)/(np.sqrt(2*mass))
a=0.1
kappa0 = np.sqrt(8**2-z0**2)/a
kappa1 = np.sqrt(8**2-z1**2)/a
l0 = z0/a
l1 = z1/a
def HOWavefunction0(x):
# constant = ((mass)/(5*np.pi*hbar))**(1/4)
exponential = np.exp(-(mass)/(10*hbar)*x**2)
return exponential
def HOWavefunction1(x):
constant = ((mass)/(5*np.pi*hbar))**(1/4)
constant *= np.sqrt((2*mass)/(5*hbar))
exponential = np.exp(-(mass)/(10*hbar)*x**2)
return constant*x*exponential
def FSWWavefunction0Even(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa0*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.cos(l0*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = np.cos(l0*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = np.exp(kappa0*x[np.where(x<-a)])
return results
def FSWWavefunction0Odd(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa0*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.sin(l0*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = -1*np.sin(l0*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = -1*np.exp(kappa0*x[np.where(x<-a)])
return results
def FSWWavefunction1Even(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa1*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.cos(l1*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = np.cos(l1*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = np.exp(kappa1*x[np.where(x<-a)])
return results
def FSWWavefunction1Odd(x):
results = np.zeros(x.shape)
results[np.where(x>a)] = np.exp(-kappa1*x[np.where(x>a)])
results[np.where(np.logical_and(0<x, x<a))] = np.sin(l1*x[np.where(np.logical_and(0<x, x<a))])
results[np.where(np.logical_and(0>x, x>-a))] = -1*np.sin(l1*-1*x[np.where(np.logical_and(0>x, x>-a))])
results[np.where(x<-a)] = -1*np.exp(kappa1*x[np.where(x<-a)])
return results
xValues = np.linspace(-0.1, 0.1, 1000)
HO0 = HOWavefunction0(xValues)
HO1 = HOWavefunction1(xValues)
FSW0E = FSWWavefunction0Even(xValues)
FSW0O = FSWWavefunction0Odd(xValues)
FSW1E = FSWWavefunction1Even(xValues)
FSW1O = FSWWavefunction1Odd(xValues)
plt.plot(xValues, HO0)
plt.plot(xValues, FSW0E)
plt.plot(xValues, FSW0O)
plt.plot(xValues, np.abs(FSW0E+FSW0O))
plt.show()
plt.plot(xValues, HO1)
plt.plot(xValues, FSW1E)
plt.plot(xValues, FSW1O)
plt.plot(xValues, np.abs(FSW1E+FSW1O))
plt.show() | [
"ryankaufmannprof@gmail.com"
] | ryankaufmannprof@gmail.com |
817f9f6f9798e25518e5c410d73fac5f146c2faa | 84e4149b3571ff4abe5c27a66ecbde03c5afec3c | /chapter_09/section_3_3/test.py | 8de472d0bbe2646b49941b995e27abd2d498cb45 | [] | no_license | zhanlu-wm/Python-Crash-Course | 6efa04bd5c03e37394b3602d20e7ae57688836e7 | 043fe97b4acdf0008351fd0fdb045888e9bdd44d | refs/heads/master | 2021-07-18T18:34:32.435763 | 2017-10-23T15:27:17 | 2017-10-23T15:27:17 | 103,259,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from chapter_09.section_3_3.electric_car import ElectricCar
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
my_tesla.describe_battery() | [
"ncu09wangming@163.com"
] | ncu09wangming@163.com |
c35ed578aa88d6f303bd3df648ce69e2bf6172a4 | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/frame/methods/test_nlargest.py | 7f9912e61e6e168237191bd9c0e95df2fc24b6ce | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ad76d46aae04dbd1db10b0a2b2882fd55b14de9a1307caf45c549f0b3d316f1a
size 6942
| [
"chuksajeh1@gmail.com"
] | chuksajeh1@gmail.com |
271db093eab9eef6514cf71cd5cdc33b0ebbebbe | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/PostProcessing/Effects/__init__.py | 4712566578e17fcdf1f5316527e1b33ae470f9e3 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,600 | py | # 2016.11.19 19:54:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/PostProcessing/Effects/__init__.py
"""PostProcessing.Effects python module
This module imports all Effects for ease-of-use by script programmers.
"""
s_effectFactories = {}
class implementEffectFactory:
def __init__(self, name, desc, *defaultArgs):
self.name = name
self.desc = desc
self.defaultArgs = defaultArgs
def __call__(self, f):
def callFn(*args):
if len(args) > 0:
return f(*args)
else:
return f(*self.defaultArgs)
fn = callFn
s_effectFactories[self.name] = [self.desc, fn]
return fn
def getEffectNames():
"""
This method returns a list of effect (names, descriptions)
used by the World Editor.
"""
ret = []
for key in sorted(s_effectFactories.iterkeys()):
desc = s_effectFactories[key][0]
ret.append((key, desc))
return ret
def effectFactory(name):
"""
This method builds a effect, given the corresponding factory name.
"""
return s_effectFactories[name][1]()
@implementEffectFactory('<new empty effect>', 'Create a new, empty effect.')
def empty():
e = Effect()
e.name = 'unnamed effect'
e.phases = []
return e
from DepthOfField import *
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\PostProcessing\Effects\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:54:27 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e7e338dfe35947ecf54e68e2c21b2af79339e7a0 | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/mnist/mutants/mutant28.py | 8c15391a262bd2fab6f30bcef4af3ed76cab9317 | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
(img_rows, img_cols) = (28, 28)
def train(x_train, y_train, x_test, y_test, model_name):
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation=\
'relu', input_shape=\
input_shape))
model.add(Conv2D(64, (3, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=\
keras.optimizers.Adadelta(), metrics=\
['accuracy'])
model.fit(x_train, y_train, batch_size=\
batch_size, epochs=\
epochs, verbose=\
1, validation_data=\
(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
return (score[0], score[1]) | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
3f2754e1b1a8db1b55c2e9557da13f86471dabad | a7ab35ff204d7c4679ce8b1bf7da8fa363a7a708 | /algo_problems/q861-880/q873.py | 1015c2b49ca79f3a6b3e6bba1765132f41499876 | [] | no_license | lin13k/practice | c68e7270be2694cb9737c35af8beb19db7e30d65 | c3efe1a5839c3ff1c320c0fcfc7b65a9462f7b52 | refs/heads/master | 2021-01-19T11:05:43.521468 | 2018-11-12T09:42:29 | 2018-11-12T09:42:29 | 87,928,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | import collections
class Solution:
def lenLongestFibSubseq(self, A):
"""
:type A: List[int]
:rtype: int
"""
table = collections.defaultdict(lambda: 2)
indexDict = {v: k for k, v in enumerate(A)}
result = 0
for i, v in enumerate(A):
for j in range(i):
t = v - A[j]
k = indexDict.get(t, None)
if k is not None and j < k:
cand = table[k, i] = table[j, k] + 1
result = max(result, cand)
return result
if __name__ == '__main__':
print(Solution().lenLongestFibSubseq([1, 2, 3, 4, 5, 6, 7, 8]))
| [
"lin.13k@gmail.com"
] | lin.13k@gmail.com |
55054610ae4183039b1acb0e4e418f03195a81c4 | 9d862dd68f8b4ea4e7de9397fef8592824c77449 | /app/top/api/rest/DeliveryTemplateDeleteRequest.py | b9f87c34c48381a9fc9ce9798903f521d14d4bb9 | [] | no_license | hi-noikiy/tmall-sku-outer_id | ffaca630dfb288ca33d962b8a050932d1047b9c8 | 1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad | refs/heads/master | 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | '''
Created by auto_sdk on 2016.04.11
'''
from app.top.api.base import RestApi
class DeliveryTemplateDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.template_id = None
def getapiname(self):
return 'taobao.delivery.template.delete'
| [
"1037096435@qq.com"
] | 1037096435@qq.com |
dfbc3f3b2c03f19ca6779867eebebe3f141653c2 | 5a7b15eb2a3453475ee70bb56e19a7bb2751db89 | /code/create_models/np/yWithMorphologySequentialStreamDropoutDev_BaselineLanguage_Fast_SaveLast_NoFinePOS_OnlyWordForms_BoundedVocab_NP.py | 29ffb30c71fa5bba15f2d545cf4370749b87e909 | [] | no_license | m-hahn/memory-surprisal | 8db19bc86ada9c352feb66859f718749623700b6 | 1b3d680836ba87fb9186741a8d4f184fda35b122 | refs/heads/master | 2022-04-30T16:01:39.323884 | 2022-03-25T04:10:12 | 2022-03-25T04:10:12 | 156,466,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,261 | py | import random
import sys
objectiveName = "LM"
language = sys.argv[1]
languageCode = sys.argv[2]
dropout_rate = float(sys.argv[3]) if len(sys.argv) > 3 else 0.33
emb_dim = int(sys.argv[4]) if len(sys.argv) > 4 else 100
rnn_dim = int(sys.argv[5]) if len(sys.argv) > 5 else 512
rnn_layers = int(sys.argv[6]) if len(sys.argv) > 6 else 2
lr_lm = float(sys.argv[7]) if len(sys.argv) > 7 else 0.1
model = sys.argv[8]
input_dropoutRate = float(sys.argv[9]) # 0.33
batchSize = int(sys.argv[10])
replaceWordsProbability = float(sys.argv[11])
horizon = int(sys.argv[12]) if len(sys.argv) > 12 else 20
prescripedID = sys.argv[13] if len(sys.argv)> 13 else None
gpuNumber = sys.argv[14] if len(sys.argv) > 14 else "GPU0"
assert gpuNumber.startswith("GPU")
gpuNumber = int(gpuNumber[3:])
DOING_PARAMETER_SEARCH = (sys.argv[15] == "True") if len(sys.argv) > 15 else False
assert model.startswith("GROUND")
#########################################
assert len(sys.argv) in [12,13,14, 15]
assert dropout_rate <= 0.5
assert input_dropoutRate <= 0.5
#########################################
devSurprisalTable = [None] * horizon
if prescripedID is not None:
myID = int(prescripedID)
else:
myID = random.randint(0,10000000)
import sys
print >> sys.stderr, ("DOING PARAMETER SEARCH?", DOING_PARAMETER_SEARCH)
assert not DOING_PARAMETER_SEARCH
TARGET_DIR = "/u/scr/mhahn/deps/memory-need-neural-wordforms-np/"
posUni = set()
from math import log, exp
from random import random, shuffle, randint
from corpusIterator import CorpusIterator
originalDistanceWeights = {}
def initializeOrderTable():
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
for partition in ["train", "dev"]:
for sentence in CorpusIterator(language,partition, storeMorph=True).iterator():
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
depsVocab.add(line["dep"])
posUni.add(line["posUni"])
if line["dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (posHead, dep, posHere, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
dhLogits = {}
for key in keys:
hd = orderTable.get((key[0], key[1], key[2], "HD"), 0) + 1.0
dh = orderTable.get((key[0], key[1], key[2], "DH"), 0) + 1.0
dhLogit = log(dh) - log(hd)
dhLogits[key] = dhLogit
originalDistanceWeights[key] = (distanceSum[key] / distanceCounts[key])
return dhLogits, vocab, keys, depsVocab
import torch.nn as nn
import torch
from torch.autograd import Variable
def recursivelyLinearize(sentence, position, result):
line = sentence[position-1]
if "children_DH" in line:
for child in line["children_DH"]:
recursivelyLinearize(sentence, child, result)
result.append(line)
if "children_HD" in line:
for child in line["children_HD"]:
recursivelyLinearize(sentence, child, result)
import numpy.random
softmax_layer = torch.nn.Softmax()
logsoftmax = torch.nn.LogSoftmax()
def orderChildrenRelative(sentence, remainingChildren, reverseSoftmax):
global model
if model == "REAL":
return remainingChildren
logits = [(x, distanceWeights[stoi_deps[sentence[x-1]["dependency_key"]]]) for x in remainingChildren]
logits = sorted(logits, key=lambda x:x[1], reverse=(not reverseSoftmax))
childrenLinearized = map(lambda x:x[0], logits)
return childrenLinearized
def orderSentence(sentence, dhLogits, printThings):
global model
root = None
logits = [None]*len(sentence)
logProbabilityGradient = 0
if model == "REAL_REAL":
# Collect tokens to be removed (i.e., punctuation)
eliminated = []
for line in sentence:
if line["dep"] == "root":
root = line["index"]
continue
# Exclude Punctuation
if line["dep"].startswith("punct"):
if model == "REAL_REAL":
eliminated.append(line)
continue
# Determine ordering relative to head
key = (sentence[line["head"]-1]["posUni"], line["dep"], line["posUni"])
line["dependency_key"] = key
dhLogit = dhWeights[stoi_deps[key]]
if model == "REAL":
dhSampled = (line["head"] > line["index"])
else:
dhSampled = (dhLogit > 0)
direction = "DH" if dhSampled else "HD"
if printThings:
print "\t".join(map(str,["ORD", line["index"], ("->".join(list(key)) + " ")[:22], line["head"], dhLogit, dhSampled, direction]))
headIndex = line["head"]-1
sentence[headIndex]["children_"+direction] = (sentence[headIndex].get("children_"+direction, []) + [line["index"]])
if model != "REAL_REAL":
for line in sentence:
if "children_DH" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_DH"][:], False)
line["children_DH"] = childrenLinearized
if "children_HD" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_HD"][:], True)
line["children_HD"] = childrenLinearized
elif model == "REAL_REAL":
while len(eliminated) > 0:
line = eliminated[0]
del eliminated[0]
if "removed" in line:
continue
line["removed"] = True
if "children_DH" in line:
assert 0 not in line["children_DH"]
eliminated = eliminated + [sentence[x-1] for x in line["children_DH"]]
if "children_HD" in line:
assert 0 not in line["children_HD"]
eliminated = eliminated + [sentence[x-1] for x in line["children_HD"]]
linearized = []
recursivelyLinearize(sentence, root, linearized)
if model == "REAL_REAL":
linearized = filter(lambda x:"removed" not in x, sentence)
if printThings or len(linearized) == 0:
print " ".join(map(lambda x:x["word"], sentence))
print " ".join(map(lambda x:x["word"], linearized))
return linearized, logits
dhLogits, vocab, vocab_deps, depsVocab = initializeOrderTable()
posUni = list(posUni)
itos_pos_uni = posUni
stoi_pos_uni = dict(zip(posUni, range(len(posUni))))
itos_pure_deps = sorted(list(depsVocab))
stoi_pure_deps = dict(zip(itos_pure_deps, range(len(itos_pure_deps))))
itos_deps = sorted(vocab_deps)
stoi_deps = dict(zip(itos_deps, range(len(itos_deps))))
dhWeights = [0.0] * len(itos_deps)
distanceWeights = [0.0] * len(itos_deps)
import os
if model == "REAL" or model == "REAL_REAL":
originalCounter = "NA"
elif model == "RANDOM_BY_TYPE":
dhByType = {}
distByType = {}
for dep in itos_pure_deps:
dhByType[dep.split(":")[0]] = random() - 0.5
distByType[dep.split(":")[0]] = random()
for key in range(len(itos_deps)):
dhWeights[key] = dhByType[itos_deps[key][1].split(":")[0]]
distanceWeights[key] = distByType[itos_deps[key][1].split(":")[0]]
originalCounter = "NA"
elif model.startswith("GROUND"):
groundPath = "/u/scr/mhahn/deps/manual_output_ground_coarse/"
import os
files = [x for x in os.listdir(groundPath) if x.startswith(language+"_infer")]
print(files)
assert len(files) > 0
with open(groundPath+files[0], "r") as inFile:
headerGrammar = next(inFile).strip().split("\t")
print(headerGrammar)
dhByDependency = {}
distByDependency = {}
for line in inFile:
line = line.strip().split("\t")
assert int(line[headerGrammar.index("Counter")]) >= 1000000
dependency = line[headerGrammar.index("Dependency")]
dhHere = float(line[headerGrammar.index("DH_Mean_NoPunct")])
distHere = float(line[headerGrammar.index("Distance_Mean_NoPunct")])
print(dependency, dhHere, distHere)
dhByDependency[dependency] = dhHere
distByDependency[dependency] = distHere
for key in range(len(itos_deps)):
dhWeights[key] = dhByDependency[itos_deps[key][1].split(":")[0]]
distanceWeights[key] = distByDependency[itos_deps[key][1].split(":")[0]]
originalCounter = "NA"
else:
assert False, model
print zip(itos_deps,distanceWeights)
print dhWeights[stoi_deps[("NOUN", "amod", "ADJ")]]
print dhWeights[stoi_deps[("NOUN", "nummod", "NUM")]]
print dhWeights[stoi_deps[("NOUN", "det", "DET")]]
AMOD = distanceWeights[stoi_deps[("NOUN", "amod", "ADJ")]]
NUMMOD = distanceWeights[stoi_deps[("NOUN", "nummod", "NUM")]]
DET = distanceWeights[stoi_deps[("NOUN", "det", "DET")]]
if model== "GROUND_AND":
for x in range(len(itos_deps)):
if itos_deps[x][1] == "amod":
distanceWeights[x] = DET
if itos_deps[x][1] == "det":
distanceWeights[x] = AMOD
elif model== "GROUND_DAN":
for x in range(len(itos_deps)):
if itos_deps[x][1] == "amod":
distanceWeights[x] = NUMMOD
if itos_deps[x][1] == "nummod":
distanceWeights[x] = AMOD
elif model== "GROUND_ADN":
for x in range(len(itos_deps)):
if itos_deps[x][1] == "amod":
distanceWeights[x] = DET
if itos_deps[x][1] == "det":
distanceWeights[x] = NUMMOD
if itos_deps[x][1] == "det":
distanceWeights[x] = AMOD
print distanceWeights[stoi_deps[("NOUN", "amod", "ADJ")]]
print distanceWeights[stoi_deps[("NOUN", "nummod", "NUM")]]
print distanceWeights[stoi_deps[("NOUN", "det", "DET")]]
words = list(vocab.iteritems())
words = sorted(words, key = lambda x:x[1], reverse=True)
itos = map(lambda x:x[0], words)
stoi = dict(zip(itos, range(len(itos))))
if len(itos) > 6:
assert stoi[itos[5]] == 5
vocab_size = 10000
vocab_size = min(len(itos),vocab_size)
torch.cuda.set_device(gpuNumber)
###########################################
# Initialize neural language model
word_pos_morph_embeddings = torch.nn.Embedding(num_embeddings = len(itos_pos_uni)+vocab_size+3, embedding_dim=emb_dim).cuda()
print posUni
print "VOCABULARY "+str(vocab_size+3)
outVocabSize = len(itos_pos_uni)+vocab_size+3
itos_total = ["EOS", "EOW", "SOS"] + itos_pos_uni + itos[:vocab_size]
assert len(itos_total) == outVocabSize
dropout = nn.Dropout(dropout_rate).cuda()
rnn = nn.LSTM(emb_dim, rnn_dim, rnn_layers).cuda()
for name, param in rnn.named_parameters():
if 'bias' in name:
nn.init.constant(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal(param)
decoder = nn.Linear(rnn_dim,outVocabSize).cuda()
components = [rnn, decoder, word_pos_morph_embeddings]
def parameters():
for c in components:
for param in c.parameters():
yield param
initrange = 0.1
word_pos_morph_embeddings.weight.data.uniform_(-initrange, initrange)
decoder.bias.data.fill_(0)
decoder.weight.data.uniform_(-initrange, initrange)
import torch.cuda
import torch.nn.functional
inputDropout = torch.nn.Dropout2d(p=input_dropoutRate)
lossModule = nn.NLLLoss()
lossModuleTest = nn.NLLLoss(size_average=False, reduce=False, ignore_index=2)
#####################################################3
crossEntropy = 10.0
counter = 0
lastDevLoss = None
failedDevRuns = 0
devLosses = []
def doForwardPass(input_indices, wordStartIndices, surprisalTable=None, doDropout=True, batchSizeHere=1, annotations=None, surprisalTableByAnnotation=None, devCounterByAnnotation=None):
global counter
global crossEntropy
global printHere
global devLosses
hidden = None
loss = 0
wordNum = 0
lossWords = 0
policyGradientLoss = 0
baselineLoss = 0
for c in components:
c.zero_grad()
totalQuality = 0.0
if True:
sequenceLength = max(map(len, input_indices))
for i in range(batchSizeHere):
input_indices[i] = input_indices[i][:]
while len(input_indices[i]) < sequenceLength:
input_indices[i].append(2)
inputTensor = Variable(torch.LongTensor(input_indices).transpose(0,1).contiguous()).cuda() # so it will be sequence_length x batchSizeHere
inputTensorIn = inputTensor[:-1]
inputTensorOut = inputTensor[1:]
inputEmbeddings = word_pos_morph_embeddings(inputTensorIn.view(sequenceLength-1, batchSizeHere))
if doDropout:
inputEmbeddings = inputDropout(inputEmbeddings)
if dropout_rate > 0:
inputEmbeddings = dropout(inputEmbeddings)
output, hidden = rnn(inputEmbeddings, hidden)
if doDropout:
output = dropout(output)
word_logits = decoder(output)
word_logits = word_logits.view((sequenceLength-1)*batchSizeHere, outVocabSize)
word_softmax = logsoftmax(word_logits)
lossesWord = lossModuleTest(word_softmax, inputTensorOut.view((sequenceLength-1)*batchSizeHere))
loss = lossesWord.sum()
if surprisalTable is not None or printHere:
lossesCPU = lossesWord.data.cpu().view((sequenceLength-1), batchSizeHere).numpy()
if printHere:
for i in range(0,len(input_indices[0])-1):
j = 0
print (i, itos_total[input_indices[j][i+1]], lossesCPU[i][j])
if surprisalTable is not None:
if printHere:
print surprisalTable
for j in range(batchSizeHere):
for r in range(horizon):
assert wordStartIndices[j][r]< wordStartIndices[j][r+1]
assert wordStartIndices[j][r] < len(lossesWord)+1, (wordStartIndices[j][r],wordStartIndices[j][r+1], len(lossesWord))
assert input_indices[j][wordStartIndices[j][r+1]-1] != 2
if r == horizon-1:
assert wordStartIndices[j][r+1] == len(input_indices[j]) or input_indices[j][wordStartIndices[j][r+1]] == 2
surprisalSum = sum(lossesCPU[wordStartIndices[j][r]-1:wordStartIndices[j][r+1]-1,j])
surprisalTable[r] += surprisalSum #.data.cpu().numpy()[0]
anno = annotations[j][wordStartIndices[j][r+1]-1][0]
# print(anno)
if anno not in ["NOUN", "DET", "ADJ"]:
anno = "OTHER"
# print(anno)
surprisalTableByAnnotation[anno][r] += surprisalSum
devCounterByAnnotation[anno][r] += 1
# surprisalTableByAnnotation = {x : [z/devCounterByAnnotation[x] for z in y] for x, y in surprisalTableByAnnotation.iteritems()}
wordNum = (len(wordStartIndices[0]) - 1)*batchSizeHere
assert len(wordStartIndices[0]) == horizon+1, map(len, wordStartIndices)
if wordNum == 0:
print input_words
print batchOrdered
return 0,0,0,0,0
if printHere:
print loss/wordNum
print lossWords/wordNum
print ["CROSS ENTROPY", crossEntropy, exp(crossEntropy)]
crossEntropy = 0.99 * crossEntropy + 0.01 * (loss/wordNum).data.cpu().numpy()
totalQuality = loss.data.cpu().numpy() # consists of lossesWord + lossesPOS
numberOfWords = wordNum
return loss, None, None, totalQuality, numberOfWords
parameterList = list(parameters())
def doBackwardPass(loss, baselineLoss, policy_related_loss):
global lastDevLoss
global failedDevRuns
loss.backward()
if printHere:
print "BACKWARD "+__file__+" "+language+" "+str(myID)+" "+str(counter)+" "+str(lastDevLoss)+" "+str(failedDevRuns)+" "+(" ".join(map(str,["Dropout (real)", dropout_rate, "Emb_dim", emb_dim, "rnn_dim", rnn_dim, "rnn_layers", rnn_layers, "MODEL", model])))
print devLosses
torch.nn.utils.clip_grad_norm(parameterList, 5.0, norm_type='inf')
for param in parameters():
if param.grad is None:
print "WARNING: None gradient"
continue
param.data.sub_(lr_lm * param.grad.data)
def createStream(corpus):
global crossEntropy
global printHere
global devLosses
input_indices = [2] # Start of Segment
wordStartIndices = []
sentCount = 0
for sentence in corpus:
sentCount += 1
ordered, _ = orderSentence(sentence, dhLogits, printHere)
for line in ordered+["EOS"]:
wordStartIndices.append(len(input_indices))
if line == "EOS":
input_indices.append(0)
else:
if random() < replaceWordsProbability:
targetWord = randint(0,vocab_size-1)
else:
targetWord = stoi[line["word"]]
if targetWord >= vocab_size:
input_indices.append(stoi_pos_uni[line["posUni"]]+3)
else:
input_indices.append(targetWord+3+len(itos_pos_uni))
if len(wordStartIndices) == horizon:
yield input_indices, wordStartIndices+[len(input_indices)]
input_indices = [2] # Start of Segment (makes sure that first word can be predicted from this token)
wordStartIndices = []
def createStreamContinuous(corpus):
# global counter
global crossEntropy
global printHere
global devLosses
input_indices = [2] # Start of Segment
annotations = [("EOS",)]
wordStartIndices = []
sentCount = 0
for sentence in corpus:
sentCount += 1
if sentCount % 10 == 0:
print ["DEV SENTENCES", sentCount]
ordered, _ = orderSentence(sentence, dhLogits, printHere)
for line in ordered+["EOS"]:
wordStartIndices.append(len(input_indices))
if line == "EOS":
input_indices.append(0)
annotations.append(("EOS",))
else:
targetWord = stoi[line["word"]]
if targetWord >= vocab_size:
input_indices.append(stoi_pos_uni[line["posUni"]]+3)
annotations.append((line["posUni"]+"_"+"OOV",))
else:
input_indices.append(targetWord+3+len(itos_pos_uni))
annotations.append((line["posUni"],))
if len(wordStartIndices) == horizon:
assert len(input_indices) == len(annotations)
yield input_indices, wordStartIndices+[len(input_indices)], annotations
if DOING_PARAMETER_SEARCH:
input_indices = [2] # Start of Segment (makes sure that first word can be predicted from this token)
annotations = [("EOS",)]
wordStartIndices = []
else:
input_indices = [2]+input_indices[wordStartIndices[1]:] # Start of Segment (makes sure that first word can be predicted from this token)
annotations = [("EOS",)]+annotations[wordStartIndices[1]:] # Start of Segment (makes sure that first word can be predicted from this token)
wordStartIndices = [x-wordStartIndices[1]+1 for x in wordStartIndices[1:]]
assert wordStartIndices[0] == 1
def computeDevLoss():
devBatchSize = 512
global printHere
global horizon
devLoss = 0.0
devWords = 0
corpusDev = CorpusIterator(language,"dev", storeMorph=True).iterator(rejectShortSentences = False)
stream = createStreamContinuous(corpusDev)
surprisalTable = [0 for _ in range(horizon)]
surprisalTableByAnnotation = {x : [0 for _ in range(horizon)] for x in ["NOUN", "ADJ", "DET", "OTHER"]}
devCounterByAnnotation = {x : [0 for _ in range(horizon)] for x in ["NOUN", "ADJ", "DET", "OTHER"]}
devCounter = 0
devCounterTimesBatchSize = 0
while True:
try:
input_indices_list = []
wordStartIndices_list = []
annotations_list = []
for _ in range(devBatchSize):
input_indices, wordStartIndices, annotations = next(stream)
input_indices_list.append(input_indices)
wordStartIndices_list.append(wordStartIndices)
annotations_list.append(annotations)
except StopIteration:
devBatchSize = len(input_indices_list)
if devBatchSize == 0:
break
devCounter += 1
printHere = (devCounter % 100 == 0)
_, _, _, newLoss, newWords = doForwardPass(input_indices_list, wordStartIndices_list, surprisalTable = surprisalTable, doDropout=False, batchSizeHere=devBatchSize, annotations=annotations_list, surprisalTableByAnnotation=surprisalTableByAnnotation, devCounterByAnnotation=devCounterByAnnotation)
devLoss += newLoss
devWords += newWords
if printHere:
print "Dev examples "+str(devCounter)
devCounterTimesBatchSize += devBatchSize
devSurprisalTableHere = [surp/(devCounterTimesBatchSize) for surp in surprisalTable]
# print(devCounterByAnnotation)
surprisalTableByAnnotation = {x : [z/(u+1e-10) for z, u in zip(y, devCounterByAnnotation[x])] for x, y in surprisalTableByAnnotation.iteritems()}
# quit()
return devLoss/devWords, devSurprisalTableHere, surprisalTableByAnnotation
surprisalTableByAnnotation = None
DEV_PERIOD = 5000
epochCount = 0
corpusBase = CorpusIterator(language, storeMorph=True)
while failedDevRuns == 0:
epochCount += 1
print "Starting new epoch, permuting corpus"
corpusBase.permute()
corpus = corpusBase.iterator(rejectShortSentences = False)
stream = createStream(corpus)
if counter > 5:
newDevLoss, devSurprisalTableHere, surprisalTableByAnnotation = computeDevLoss()
devLosses.append(newDevLoss)
print "New dev loss "+str(newDevLoss)+". previous was: "+str(lastDevLoss)
if newDevLoss > 15 or len(devLosses) > 99:
print "Abort, training too slow?"
devLosses.append(newDevLoss+0.001)
if lastDevLoss is None or newDevLoss < lastDevLoss:
devSurprisalTable = devSurprisalTableHere
print(devSurprisalTable)
if False:
with open(TARGET_DIR+"/estimates-"+language+"_"+__file__+"_model_"+str(myID)+"_"+model+".txt", "w") as outFile:
print >> outFile, " ".join(sys.argv)
print >> outFile, " ".join(map(str,devLosses))
print >> outFile, " ".join(map(str,devSurprisalTable))
print >> outFile, "PARAMETER_SEARCH" if DOING_PARAMETER_SEARCH else "RUNNING"
if newDevLoss > 15 or len(devLosses) > 100:
print "Abort, training too slow?"
failedDevRuns = 1
break
if lastDevLoss is None or newDevLoss < lastDevLoss:
lastDevLoss = newDevLoss
failedDevRuns = 0
else:
failedDevRuns += 1
print "Skip saving, hoping for better model"
print devLosses
print "Epoch "+str(epochCount)+" "+str(counter)
print zip(range(1,horizon+1), devSurprisalTable)
print surprisalTableByAnnotation
break
while True:
counter += 1
printHere = (counter % 100 == 0)
try:
input_indices_list = []
wordStartIndices_list = []
for _ in range(batchSize):
input_indices, wordStartIndices = next(stream)
input_indices_list.append(input_indices)
wordStartIndices_list.append(wordStartIndices)
except StopIteration:
break
loss, baselineLoss, policy_related_loss, _, wordNumInPass = doForwardPass(input_indices_list, wordStartIndices_list, batchSizeHere=batchSize)
if wordNumInPass > 0:
doBackwardPass(loss, baselineLoss, policy_related_loss)
else:
print "No words, skipped backward"
if printHere:
print "Epoch "+str(epochCount)+" "+str(counter)
print zip(range(1,horizon+1), devSurprisalTable)
print surprisalTableByAnnotation
for x in sorted(list(surprisalTableByAnnotation)):
print x
print "\t".join([str(round(y,4)) for y in surprisalTableByAnnotation[x]])
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
81db18bcfb68e90a27adf52c75673c14410207b8 | b5f4dad59698f3f311a98db7932fddab2c3d0957 | /tests/extension/thread_/stream_constant/test_thread_stream_constant.py | c1daa3cb7d8af2ec8e38cb7787fdf122b830739f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | charudatta10/veriloggen | 84f5cd27d996ddf48750914e3bd4e74f7b4c9df7 | dd0d029f323a6510c07392e00ca6ba270bd407d2 | refs/heads/master | 2021-04-26T14:43:02.549058 | 2018-02-11T03:14:40 | 2018-02-11T03:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89,069 | py | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_stream_constant
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] myaxi_awaddr;
wire [8-1:0] myaxi_awlen;
wire myaxi_awvalid;
reg myaxi_awready;
wire [32-1:0] myaxi_wdata;
wire [4-1:0] myaxi_wstrb;
wire myaxi_wlast;
wire myaxi_wvalid;
reg myaxi_wready;
wire [32-1:0] myaxi_araddr;
wire [8-1:0] myaxi_arlen;
wire myaxi_arvalid;
reg myaxi_arready;
reg [32-1:0] myaxi_rdata;
reg myaxi_rlast;
reg myaxi_rvalid;
wire myaxi_rready;
wire [32-1:0] memory_awaddr;
wire [8-1:0] memory_awlen;
wire memory_awvalid;
reg memory_awready;
wire [32-1:0] memory_wdata;
wire [4-1:0] memory_wstrb;
wire memory_wlast;
wire memory_wvalid;
reg memory_wready;
wire [32-1:0] memory_araddr;
wire [8-1:0] memory_arlen;
wire memory_arvalid;
reg memory_arready;
reg [32-1:0] memory_rdata;
reg memory_rlast;
reg memory_rvalid;
wire memory_rready;
reg [8-1:0] _memory_mem [0:2**20-1];
initial begin
$readmemh("_memory_memimg_.out", _memory_mem);
end
reg [32-1:0] _memory_fsm;
localparam _memory_fsm_init = 0;
reg [33-1:0] _write_count;
reg [32-1:0] _write_addr;
reg [33-1:0] _read_count;
reg [32-1:0] _read_addr;
reg [33-1:0] _sleep_count;
reg [32-1:0] _d1__memory_fsm;
reg __memory_fsm_cond_100_0_1;
reg __memory_fsm_cond_200_1_1;
reg __memory_fsm_cond_211_2_1;
assign memory_awaddr = myaxi_awaddr;
assign memory_awlen = myaxi_awlen;
assign memory_awvalid = myaxi_awvalid;
wire _tmp_0;
assign _tmp_0 = memory_awready;
always @(*) begin
myaxi_awready = _tmp_0;
end
assign memory_wdata = myaxi_wdata;
assign memory_wstrb = myaxi_wstrb;
assign memory_wlast = myaxi_wlast;
assign memory_wvalid = myaxi_wvalid;
wire _tmp_1;
assign _tmp_1 = memory_wready;
always @(*) begin
myaxi_wready = _tmp_1;
end
assign memory_araddr = myaxi_araddr;
assign memory_arlen = myaxi_arlen;
assign memory_arvalid = myaxi_arvalid;
wire _tmp_2;
assign _tmp_2 = memory_arready;
always @(*) begin
myaxi_arready = _tmp_2;
end
always @(*) begin
myaxi_rdata <= memory_rdata;
end
wire _tmp_3;
assign _tmp_3 = memory_rlast;
always @(*) begin
myaxi_rlast = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = memory_rvalid;
always @(*) begin
myaxi_rvalid = _tmp_4;
end
assign memory_rready = myaxi_rready;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awlen(myaxi_awlen),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wlast(myaxi_wlast),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arlen(myaxi_arlen),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rlast(myaxi_rlast),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
memory_awready = 0;
memory_wready = 0;
memory_arready = 0;
memory_rdata = 0;
memory_rlast = 0;
memory_rvalid = 0;
_memory_fsm = _memory_fsm_init;
_write_count = 0;
_write_addr = 0;
_read_count = 0;
_read_addr = 0;
_sleep_count = 0;
_d1__memory_fsm = _memory_fsm_init;
__memory_fsm_cond_100_0_1 = 0;
__memory_fsm_cond_200_1_1 = 0;
__memory_fsm_cond_211_2_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
localparam _memory_fsm_200 = 200;
localparam _memory_fsm_201 = 201;
localparam _memory_fsm_202 = 202;
localparam _memory_fsm_203 = 203;
localparam _memory_fsm_204 = 204;
localparam _memory_fsm_205 = 205;
localparam _memory_fsm_206 = 206;
localparam _memory_fsm_207 = 207;
localparam _memory_fsm_208 = 208;
localparam _memory_fsm_209 = 209;
localparam _memory_fsm_210 = 210;
localparam _memory_fsm_211 = 211;
localparam _memory_fsm_100 = 100;
localparam _memory_fsm_101 = 101;
localparam _memory_fsm_102 = 102;
localparam _memory_fsm_103 = 103;
localparam _memory_fsm_104 = 104;
localparam _memory_fsm_105 = 105;
localparam _memory_fsm_106 = 106;
localparam _memory_fsm_107 = 107;
localparam _memory_fsm_108 = 108;
localparam _memory_fsm_109 = 109;
localparam _memory_fsm_110 = 110;
localparam _memory_fsm_111 = 111;
localparam _memory_fsm_112 = 112;
always @(posedge CLK) begin
if(RST) begin
_memory_fsm <= _memory_fsm_init;
_d1__memory_fsm <= _memory_fsm_init;
memory_awready <= 0;
_write_addr <= 0;
_write_count <= 0;
__memory_fsm_cond_100_0_1 <= 0;
memory_wready <= 0;
memory_arready <= 0;
_read_addr <= 0;
_read_count <= 0;
__memory_fsm_cond_200_1_1 <= 0;
memory_rdata[7:0] <= (0 >> 0) & { 8{ 1'd1 } };
memory_rdata[15:8] <= (0 >> 8) & { 8{ 1'd1 } };
memory_rdata[23:16] <= (0 >> 16) & { 8{ 1'd1 } };
memory_rdata[31:24] <= (0 >> 24) & { 8{ 1'd1 } };
memory_rvalid <= 0;
memory_rlast <= 0;
__memory_fsm_cond_211_2_1 <= 0;
memory_rdata <= 0;
_sleep_count <= 0;
end else begin
_sleep_count <= _sleep_count + 1;
if(_sleep_count == 3) begin
_sleep_count <= 0;
end
_d1__memory_fsm <= _memory_fsm;
case(_d1__memory_fsm)
_memory_fsm_100: begin
if(__memory_fsm_cond_100_0_1) begin
memory_awready <= 0;
end
end
_memory_fsm_200: begin
if(__memory_fsm_cond_200_1_1) begin
memory_arready <= 0;
end
end
_memory_fsm_211: begin
if(__memory_fsm_cond_211_2_1) begin
memory_rvalid <= 0;
memory_rlast <= 0;
end
end
endcase
case(_memory_fsm)
_memory_fsm_init: begin
if(memory_awvalid) begin
_memory_fsm <= _memory_fsm_100;
end
if(memory_arvalid) begin
_memory_fsm <= _memory_fsm_200;
end
end
_memory_fsm_100: begin
if(memory_awvalid) begin
memory_awready <= 1;
_write_addr <= memory_awaddr;
_write_count <= memory_awlen + 1;
end
__memory_fsm_cond_100_0_1 <= 1;
if(!memory_awvalid) begin
_memory_fsm <= _memory_fsm_init;
end
if(memory_awvalid) begin
_memory_fsm <= _memory_fsm_101;
end
end
_memory_fsm_101: begin
_memory_fsm <= _memory_fsm_102;
end
_memory_fsm_102: begin
_memory_fsm <= _memory_fsm_103;
end
_memory_fsm_103: begin
_memory_fsm <= _memory_fsm_104;
end
_memory_fsm_104: begin
_memory_fsm <= _memory_fsm_105;
end
_memory_fsm_105: begin
_memory_fsm <= _memory_fsm_106;
end
_memory_fsm_106: begin
_memory_fsm <= _memory_fsm_107;
end
_memory_fsm_107: begin
_memory_fsm <= _memory_fsm_108;
end
_memory_fsm_108: begin
_memory_fsm <= _memory_fsm_109;
end
_memory_fsm_109: begin
_memory_fsm <= _memory_fsm_110;
end
_memory_fsm_110: begin
_memory_fsm <= _memory_fsm_111;
end
_memory_fsm_111: begin
memory_wready <= 1;
_memory_fsm <= _memory_fsm_112;
end
_memory_fsm_112: begin
if(memory_wvalid && memory_wstrb[0]) begin
_memory_mem[_write_addr + 0] <= memory_wdata[7:0];
end
if(memory_wvalid && memory_wstrb[1]) begin
_memory_mem[_write_addr + 1] <= memory_wdata[15:8];
end
if(memory_wvalid && memory_wstrb[2]) begin
_memory_mem[_write_addr + 2] <= memory_wdata[23:16];
end
if(memory_wvalid && memory_wstrb[3]) begin
_memory_mem[_write_addr + 3] <= memory_wdata[31:24];
end
if(memory_wvalid && memory_wready) begin
_write_addr <= _write_addr + 4;
_write_count <= _write_count - 1;
end
if(_sleep_count == 3) begin
memory_wready <= 0;
end else begin
memory_wready <= 1;
end
if(memory_wvalid && memory_wready && (_write_count == 1)) begin
memory_wready <= 0;
end
if(memory_wvalid && memory_wready && (_write_count == 1)) begin
_memory_fsm <= _memory_fsm_init;
end
end
_memory_fsm_200: begin
if(memory_arvalid) begin
memory_arready <= 1;
_read_addr <= memory_araddr;
_read_count <= memory_arlen + 1;
end
__memory_fsm_cond_200_1_1 <= 1;
if(!memory_arvalid) begin
_memory_fsm <= _memory_fsm_init;
end
if(memory_arvalid) begin
_memory_fsm <= _memory_fsm_201;
end
end
_memory_fsm_201: begin
_memory_fsm <= _memory_fsm_202;
end
_memory_fsm_202: begin
_memory_fsm <= _memory_fsm_203;
end
_memory_fsm_203: begin
_memory_fsm <= _memory_fsm_204;
end
_memory_fsm_204: begin
_memory_fsm <= _memory_fsm_205;
end
_memory_fsm_205: begin
_memory_fsm <= _memory_fsm_206;
end
_memory_fsm_206: begin
_memory_fsm <= _memory_fsm_207;
end
_memory_fsm_207: begin
_memory_fsm <= _memory_fsm_208;
end
_memory_fsm_208: begin
_memory_fsm <= _memory_fsm_209;
end
_memory_fsm_209: begin
_memory_fsm <= _memory_fsm_210;
end
_memory_fsm_210: begin
_memory_fsm <= _memory_fsm_211;
end
_memory_fsm_211: begin
if(memory_rready | !memory_rvalid) begin
memory_rdata[7:0] <= _memory_mem[_read_addr + 0];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[15:8] <= _memory_mem[_read_addr + 1];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[23:16] <= _memory_mem[_read_addr + 2];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[31:24] <= _memory_mem[_read_addr + 3];
end
if((_sleep_count < 3) && (_read_count > 0) && memory_rready | !memory_rvalid) begin
memory_rvalid <= 1;
_read_addr <= _read_addr + 4;
_read_count <= _read_count - 1;
end
if((_sleep_count < 3) && (_read_count == 1) && memory_rready | !memory_rvalid) begin
memory_rlast <= 1;
end
__memory_fsm_cond_211_2_1 <= 1;
if(memory_rvalid && !memory_rready) begin
memory_rvalid <= memory_rvalid;
memory_rdata <= memory_rdata;
memory_rlast <= memory_rlast;
end
if(memory_rvalid && memory_rready && (_read_count == 0)) begin
_memory_fsm <= _memory_fsm_init;
end
end
endcase
end
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [32-1:0] myaxi_awaddr,
output reg [8-1:0] myaxi_awlen,
output reg myaxi_awvalid,
input myaxi_awready,
output reg [32-1:0] myaxi_wdata,
output reg [4-1:0] myaxi_wstrb,
output reg myaxi_wlast,
output reg myaxi_wvalid,
input myaxi_wready,
output reg [32-1:0] myaxi_araddr,
output reg [8-1:0] myaxi_arlen,
output reg myaxi_arvalid,
input myaxi_arready,
input [32-1:0] myaxi_rdata,
input myaxi_rlast,
input myaxi_rvalid,
output myaxi_rready
);
reg [10-1:0] ram_a_0_addr;
wire [32-1:0] ram_a_0_rdata;
reg [32-1:0] ram_a_0_wdata;
reg ram_a_0_wenable;
ram_a
inst_ram_a
(
.CLK(CLK),
.ram_a_0_addr(ram_a_0_addr),
.ram_a_0_rdata(ram_a_0_rdata),
.ram_a_0_wdata(ram_a_0_wdata),
.ram_a_0_wenable(ram_a_0_wenable)
);
reg [10-1:0] ram_b_0_addr;
wire [32-1:0] ram_b_0_rdata;
reg [32-1:0] ram_b_0_wdata;
reg ram_b_0_wenable;
ram_b
inst_ram_b
(
.CLK(CLK),
.ram_b_0_addr(ram_b_0_addr),
.ram_b_0_rdata(ram_b_0_rdata),
.ram_b_0_wdata(ram_b_0_wdata),
.ram_b_0_wenable(ram_b_0_wenable)
);
reg [10-1:0] ram_c_0_addr;
wire [32-1:0] ram_c_0_rdata;
reg [32-1:0] ram_c_0_wdata;
reg ram_c_0_wenable;
ram_c
inst_ram_c
(
.CLK(CLK),
.ram_c_0_addr(ram_c_0_addr),
.ram_c_0_rdata(ram_c_0_rdata),
.ram_c_0_wdata(ram_c_0_wdata),
.ram_c_0_wenable(ram_c_0_wenable)
);
reg [32-1:0] _mystream_fsm;
localparam _mystream_fsm_init = 0;
reg _mystream_start;
reg _mystream_busy;
reg [16-1:0] _mystream_a_fsm_sel;
reg _mystream_a_idle;
reg [16-1:0] _mystream_b_fsm_sel;
reg _mystream_b_idle;
reg [16-1:0] _mystream_bias_fsm_sel;
reg [16-1:0] _mystream_c_fsm_sel;
reg [32-1:0] th_comp;
localparam th_comp_init = 0;
reg signed [32-1:0] _th_comp_size_0;
reg signed [32-1:0] _th_comp_double_size_1;
reg signed [32-1:0] _th_comp_offset_2;
reg [10-1:0] _tmp_0;
reg [32-1:0] _tmp_1;
reg [32-1:0] _tmp_2;
reg [32-1:0] _tmp_fsm_0;
localparam _tmp_fsm_0_init = 0;
reg [32-1:0] _tmp_3;
reg [33-1:0] _tmp_4;
reg [33-1:0] _tmp_5;
reg [32-1:0] _tmp_6;
reg _tmp_7;
reg [33-1:0] _tmp_8;
reg _tmp_9;
wire [32-1:0] __variable_data_10;
wire __variable_valid_10;
wire __variable_ready_10;
assign __variable_ready_10 = (_tmp_8 > 0) && !_tmp_9;
reg _ram_a_cond_0_1;
reg [9-1:0] _tmp_11;
reg _myaxi_cond_0_1;
reg [32-1:0] _d1__tmp_fsm_0;
reg __tmp_fsm_0_cond_4_0_1;
reg _tmp_12;
reg __tmp_fsm_0_cond_5_1_1;
reg [10-1:0] _tmp_13;
reg [32-1:0] _tmp_14;
reg [32-1:0] _tmp_15;
reg [32-1:0] _tmp_fsm_1;
localparam _tmp_fsm_1_init = 0;
reg [32-1:0] _tmp_16;
reg [33-1:0] _tmp_17;
reg [33-1:0] _tmp_18;
reg [32-1:0] _tmp_19;
reg _tmp_20;
reg [33-1:0] _tmp_21;
reg _tmp_22;
wire [32-1:0] __variable_data_23;
wire __variable_valid_23;
wire __variable_ready_23;
assign __variable_ready_23 = (_tmp_21 > 0) && !_tmp_22;
reg _ram_b_cond_0_1;
reg [9-1:0] _tmp_24;
reg _myaxi_cond_1_1;
reg [32-1:0] _d1__tmp_fsm_1;
reg __tmp_fsm_1_cond_4_0_1;
reg _tmp_25;
reg __tmp_fsm_1_cond_5_1_1;
reg signed [32-1:0] _th_comp_size_3;
reg signed [32-1:0] _th_comp_offset_4;
reg signed [32-1:0] _th_comp_bias_5;
wire signed [32-1:0] mystream_a_data;
wire signed [32-1:0] mystream_b_data;
wire signed [32-1:0] mystream_bias_data;
reg signed [32-1:0] _plus_data_3;
reg signed [32-1:0] _plus_data_4;
wire signed [32-1:0] mystream_c_data;
assign mystream_c_data = _plus_data_4;
reg [32-1:0] _mystream_a_fsm_1;
localparam _mystream_a_fsm_1_init = 0;
reg [10-1:0] _mystream_a_offset_1;
reg [11-1:0] _mystream_a_size_1;
reg [10-1:0] _mystream_a_stride_1;
reg [11-1:0] _mystream_a_count_1;
reg [10-1:0] _mystream_a_raddr_1;
reg _mystream_a_renable_1;
reg _tmp_26;
reg _ram_a_cond_1_1;
reg _ram_a_cond_2_1;
reg _ram_a_cond_2_2;
reg signed [32-1:0] __variable_wdata_0;
assign mystream_a_data = __variable_wdata_0;
reg [32-1:0] _d1__mystream_a_fsm_1;
reg __mystream_a_fsm_1_cond_1_0_1;
reg __mystream_a_fsm_1_cond_2_1_1;
reg [32-1:0] _mystream_b_fsm_2;
localparam _mystream_b_fsm_2_init = 0;
reg [10-1:0] _mystream_b_offset_2;
reg [11-1:0] _mystream_b_size_2;
reg [10-1:0] _mystream_b_stride_2;
reg [11-1:0] _mystream_b_count_2;
reg [10-1:0] _mystream_b_raddr_2;
reg _mystream_b_renable_2;
reg _tmp_27;
reg _ram_b_cond_1_1;
reg _ram_b_cond_2_1;
reg _ram_b_cond_2_2;
reg signed [32-1:0] __variable_wdata_1;
assign mystream_b_data = __variable_wdata_1;
reg [32-1:0] _d1__mystream_b_fsm_2;
reg __mystream_b_fsm_2_cond_1_0_1;
reg __mystream_b_fsm_2_cond_2_1_1;
reg signed [32-1:0] __parametervariable_wdata_2;
assign mystream_bias_data = __parametervariable_wdata_2;
reg [32-1:0] _mystream_c_fsm_3;
localparam _mystream_c_fsm_3_init = 0;
reg [10-1:0] _mystream_c_offset_3;
reg [11-1:0] _mystream_c_size_3;
reg [10-1:0] _mystream_c_stride_3;
reg [11-1:0] _mystream_c_count_3;
reg [10-1:0] _mystream_c_waddr_3;
reg _mystream_c_wenable_3;
reg signed [32-1:0] _mystream_c_wdata_3;
reg _ram_c_cond_0_1;
reg [32-1:0] _d1__mystream_c_fsm_3;
reg __mystream_c_fsm_3_cond_7_0_1;
reg __mystream_c_fsm_3_cond_8_1_1;
reg [32-1:0] _d1__mystream_fsm;
reg __mystream_fsm_cond_0_0_1;
wire _mystream_done;
assign _mystream_done = _mystream_a_idle && _mystream_b_idle;
reg signed [32-1:0] _th_comp_size_6;
reg signed [32-1:0] _th_comp_offset_7;
reg signed [32-1:0] _th_comp_bias_8;
reg [32-1:0] _mystream_a_fsm_4;
localparam _mystream_a_fsm_4_init = 0;
reg [10-1:0] _mystream_a_offset_4;
reg [11-1:0] _mystream_a_size_4;
reg [10-1:0] _mystream_a_stride_4;
reg [11-1:0] _mystream_a_count_4;
reg [10-1:0] _mystream_a_raddr_4;
reg _mystream_a_renable_4;
reg _tmp_28;
reg _ram_a_cond_3_1;
reg _ram_a_cond_4_1;
reg _ram_a_cond_4_2;
reg [32-1:0] _d1__mystream_a_fsm_4;
reg __mystream_a_fsm_4_cond_1_0_1;
reg __mystream_a_fsm_4_cond_2_1_1;
reg [32-1:0] _mystream_b_fsm_5;
localparam _mystream_b_fsm_5_init = 0;
reg [10-1:0] _mystream_b_offset_5;
reg [11-1:0] _mystream_b_size_5;
reg [10-1:0] _mystream_b_stride_5;
reg [11-1:0] _mystream_b_count_5;
reg [10-1:0] _mystream_b_raddr_5;
reg _mystream_b_renable_5;
reg _tmp_29;
reg _ram_b_cond_3_1;
reg _ram_b_cond_4_1;
reg _ram_b_cond_4_2;
reg [32-1:0] _d1__mystream_b_fsm_5;
reg __mystream_b_fsm_5_cond_1_0_1;
reg __mystream_b_fsm_5_cond_2_1_1;
reg [32-1:0] _mystream_c_fsm_6;
localparam _mystream_c_fsm_6_init = 0;
reg [10-1:0] _mystream_c_offset_6;
reg [11-1:0] _mystream_c_size_6;
reg [10-1:0] _mystream_c_stride_6;
reg [11-1:0] _mystream_c_count_6;
reg [10-1:0] _mystream_c_waddr_6;
reg _mystream_c_wenable_6;
reg signed [32-1:0] _mystream_c_wdata_6;
reg _ram_c_cond_1_1;
reg [32-1:0] _d1__mystream_c_fsm_6;
reg __mystream_c_fsm_6_cond_7_0_1;
reg __mystream_c_fsm_6_cond_8_1_1;
reg __mystream_fsm_cond_0_1_1;
reg [10-1:0] _tmp_30;
reg [32-1:0] _tmp_31;
reg [32-1:0] _tmp_32;
reg [32-1:0] _tmp_fsm_2;
localparam _tmp_fsm_2_init = 0;
reg [32-1:0] _tmp_33;
reg [33-1:0] _tmp_34;
reg [33-1:0] _tmp_35;
reg _tmp_36;
reg _tmp_37;
wire _tmp_38;
wire _tmp_39;
assign _tmp_39 = 1;
localparam _tmp_40 = 1;
wire [_tmp_40-1:0] _tmp_41;
assign _tmp_41 = (_tmp_38 || !_tmp_36) && (_tmp_39 || !_tmp_37);
reg [_tmp_40-1:0] __tmp_41_1;
wire signed [32-1:0] _tmp_42;
reg signed [32-1:0] __tmp_42_1;
assign _tmp_42 = (__tmp_41_1)? ram_c_0_rdata : __tmp_42_1;
reg _tmp_43;
reg _tmp_44;
reg _tmp_45;
reg _tmp_46;
reg [33-1:0] _tmp_47;
reg [9-1:0] _tmp_48;
reg _myaxi_cond_2_1;
reg _tmp_49;
wire [32-1:0] __variable_data_50;
wire __variable_valid_50;
wire __variable_ready_50;
assign __variable_ready_50 = (_tmp_fsm_2 == 4) && ((_tmp_48 > 0) && (myaxi_wready || !myaxi_wvalid));
reg _myaxi_cond_3_1;
reg _tmp_51;
reg [32-1:0] _d1__tmp_fsm_2;
reg __tmp_fsm_2_cond_5_0_1;
reg [10-1:0] _tmp_52;
reg [32-1:0] _tmp_53;
reg [32-1:0] _tmp_54;
reg [32-1:0] _tmp_fsm_3;
localparam _tmp_fsm_3_init = 0;
reg [32-1:0] _tmp_55;
reg [33-1:0] _tmp_56;
reg [33-1:0] _tmp_57;
reg [32-1:0] _tmp_58;
reg _tmp_59;
reg [33-1:0] _tmp_60;
reg _tmp_61;
wire [32-1:0] __variable_data_62;
wire __variable_valid_62;
wire __variable_ready_62;
assign __variable_ready_62 = (_tmp_60 > 0) && !_tmp_61;
reg _ram_a_cond_5_1;
reg [9-1:0] _tmp_63;
reg _myaxi_cond_4_1;
reg [32-1:0] _d1__tmp_fsm_3;
reg __tmp_fsm_3_cond_4_0_1;
reg _tmp_64;
reg __tmp_fsm_3_cond_5_1_1;
reg [10-1:0] _tmp_65;
reg [32-1:0] _tmp_66;
reg [32-1:0] _tmp_67;
reg [32-1:0] _tmp_fsm_4;
localparam _tmp_fsm_4_init = 0;
reg [32-1:0] _tmp_68;
reg [33-1:0] _tmp_69;
reg [33-1:0] _tmp_70;
reg [32-1:0] _tmp_71;
reg _tmp_72;
reg [33-1:0] _tmp_73;
reg _tmp_74;
wire [32-1:0] __variable_data_75;
wire __variable_valid_75;
wire __variable_ready_75;
assign __variable_ready_75 = (_tmp_73 > 0) && !_tmp_74;
reg _ram_b_cond_5_1;
reg [9-1:0] _tmp_76;
reg _myaxi_cond_5_1;
assign myaxi_rready = (_tmp_fsm_0 == 4) || (_tmp_fsm_1 == 4) || (_tmp_fsm_3 == 4) || (_tmp_fsm_4 == 4);
reg [32-1:0] _d1__tmp_fsm_4;
reg __tmp_fsm_4_cond_4_0_1;
reg _tmp_77;
reg __tmp_fsm_4_cond_5_1_1;
reg signed [32-1:0] _th_comp_size_9;
reg signed [32-1:0] _th_comp_offset_10;
reg signed [32-1:0] _th_comp_bias_11;
reg signed [32-1:0] _th_comp_sum_12;
reg signed [32-1:0] _th_comp_i_13;
reg _tmp_78;
reg _ram_a_cond_6_1;
reg _ram_a_cond_7_1;
reg _ram_a_cond_7_2;
reg signed [32-1:0] _tmp_79;
reg signed [32-1:0] _th_comp_a_14;
reg _tmp_80;
reg _ram_b_cond_6_1;
reg _ram_b_cond_7_1;
reg _ram_b_cond_7_2;
reg signed [32-1:0] _tmp_81;
reg signed [32-1:0] _th_comp_b_15;
reg _ram_c_cond_2_1;
reg [10-1:0] _tmp_82;
reg [32-1:0] _tmp_83;
reg [32-1:0] _tmp_84;
reg [32-1:0] _tmp_fsm_5;
localparam _tmp_fsm_5_init = 0;
reg [32-1:0] _tmp_85;
reg [33-1:0] _tmp_86;
reg [33-1:0] _tmp_87;
reg _tmp_88;
reg _tmp_89;
wire _tmp_90;
wire _tmp_91;
assign _tmp_91 = 1;
localparam _tmp_92 = 1;
wire [_tmp_92-1:0] _tmp_93;
assign _tmp_93 = (_tmp_90 || !_tmp_88) && (_tmp_91 || !_tmp_89);
reg [_tmp_92-1:0] __tmp_93_1;
wire signed [32-1:0] _tmp_94;
reg signed [32-1:0] __tmp_94_1;
assign _tmp_94 = (__tmp_93_1)? ram_c_0_rdata : __tmp_94_1;
reg _tmp_95;
reg _tmp_96;
reg _tmp_97;
reg _tmp_98;
reg [33-1:0] _tmp_99;
reg [9-1:0] _tmp_100;
reg _myaxi_cond_6_1;
reg _tmp_101;
wire [32-1:0] __variable_data_102;
wire __variable_valid_102;
wire __variable_ready_102;
assign __variable_ready_102 = (_tmp_fsm_5 == 4) && ((_tmp_100 > 0) && (myaxi_wready || !myaxi_wvalid));
reg _myaxi_cond_7_1;
reg _tmp_103;
reg [32-1:0] _d1__tmp_fsm_5;
reg __tmp_fsm_5_cond_5_0_1;
reg signed [32-1:0] _th_comp_size_16;
reg signed [32-1:0] _th_comp_offset_stream_17;
reg signed [32-1:0] _th_comp_offset_seq_18;
reg signed [32-1:0] _th_comp_all_ok_19;
reg signed [32-1:0] _th_comp_i_20;
reg _tmp_104;
reg _ram_c_cond_3_1;
reg _ram_c_cond_4_1;
reg _ram_c_cond_4_2;
reg signed [32-1:0] _tmp_105;
reg signed [32-1:0] _th_comp_st_21;
reg _tmp_106;
reg _ram_c_cond_5_1;
reg _ram_c_cond_6_1;
reg _ram_c_cond_6_2;
reg signed [32-1:0] _tmp_107;
reg signed [32-1:0] _th_comp_sq_22;
always @(posedge CLK) begin
if(RST) begin
myaxi_araddr <= 0;
myaxi_arlen <= 0;
myaxi_arvalid <= 0;
_tmp_11 <= 0;
_myaxi_cond_0_1 <= 0;
_tmp_24 <= 0;
_myaxi_cond_1_1 <= 0;
myaxi_awaddr <= 0;
myaxi_awlen <= 0;
myaxi_awvalid <= 0;
_tmp_48 <= 0;
_myaxi_cond_2_1 <= 0;
myaxi_wdata <= 0;
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
myaxi_wstrb <= 0;
_tmp_49 <= 0;
_myaxi_cond_3_1 <= 0;
_tmp_63 <= 0;
_myaxi_cond_4_1 <= 0;
_tmp_76 <= 0;
_myaxi_cond_5_1 <= 0;
_tmp_100 <= 0;
_myaxi_cond_6_1 <= 0;
_tmp_101 <= 0;
_myaxi_cond_7_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_arvalid <= 0;
end
if(_myaxi_cond_1_1) begin
myaxi_arvalid <= 0;
end
if(_myaxi_cond_2_1) begin
myaxi_awvalid <= 0;
end
if(_myaxi_cond_3_1) begin
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
_tmp_49 <= 0;
end
if(_myaxi_cond_4_1) begin
myaxi_arvalid <= 0;
end
if(_myaxi_cond_5_1) begin
myaxi_arvalid <= 0;
end
if(_myaxi_cond_6_1) begin
myaxi_awvalid <= 0;
end
if(_myaxi_cond_7_1) begin
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
_tmp_101 <= 0;
end
if((_tmp_fsm_0 == 3) && ((myaxi_arready || !myaxi_arvalid) && (_tmp_11 == 0))) begin
myaxi_araddr <= _tmp_3;
myaxi_arlen <= _tmp_4 - 1;
myaxi_arvalid <= 1;
_tmp_11 <= _tmp_4;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_arvalid && !myaxi_arready) begin
myaxi_arvalid <= myaxi_arvalid;
end
if(myaxi_rready && myaxi_rvalid && (_tmp_11 > 0)) begin
_tmp_11 <= _tmp_11 - 1;
end
if((_tmp_fsm_1 == 3) && ((myaxi_arready || !myaxi_arvalid) && (_tmp_24 == 0))) begin
myaxi_araddr <= _tmp_16;
myaxi_arlen <= _tmp_17 - 1;
myaxi_arvalid <= 1;
_tmp_24 <= _tmp_17;
end
_myaxi_cond_1_1 <= 1;
if(myaxi_arvalid && !myaxi_arready) begin
myaxi_arvalid <= myaxi_arvalid;
end
if(myaxi_rready && myaxi_rvalid && (_tmp_24 > 0)) begin
_tmp_24 <= _tmp_24 - 1;
end
if((_tmp_fsm_2 == 3) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_48 == 0))) begin
myaxi_awaddr <= _tmp_33;
myaxi_awlen <= _tmp_34 - 1;
myaxi_awvalid <= 1;
_tmp_48 <= _tmp_34;
end
if((_tmp_fsm_2 == 3) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_48 == 0)) && (_tmp_34 == 0)) begin
myaxi_awvalid <= 0;
end
_myaxi_cond_2_1 <= 1;
if(myaxi_awvalid && !myaxi_awready) begin
myaxi_awvalid <= myaxi_awvalid;
end
if(__variable_valid_50 && ((_tmp_fsm_2 == 4) && ((_tmp_48 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_48 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_48 > 0))) begin
myaxi_wdata <= __variable_data_50;
myaxi_wvalid <= 1;
myaxi_wlast <= 0;
myaxi_wstrb <= { 4{ 1'd1 } };
_tmp_48 <= _tmp_48 - 1;
end
if(__variable_valid_50 && ((_tmp_fsm_2 == 4) && ((_tmp_48 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_48 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_48 > 0)) && (_tmp_48 == 1)) begin
myaxi_wlast <= 1;
_tmp_49 <= 1;
end
_myaxi_cond_3_1 <= 1;
if(myaxi_wvalid && !myaxi_wready) begin
myaxi_wvalid <= myaxi_wvalid;
myaxi_wlast <= myaxi_wlast;
_tmp_49 <= _tmp_49;
end
if((_tmp_fsm_3 == 3) && ((myaxi_arready || !myaxi_arvalid) && (_tmp_63 == 0))) begin
myaxi_araddr <= _tmp_55;
myaxi_arlen <= _tmp_56 - 1;
myaxi_arvalid <= 1;
_tmp_63 <= _tmp_56;
end
_myaxi_cond_4_1 <= 1;
if(myaxi_arvalid && !myaxi_arready) begin
myaxi_arvalid <= myaxi_arvalid;
end
if(myaxi_rready && myaxi_rvalid && (_tmp_63 > 0)) begin
_tmp_63 <= _tmp_63 - 1;
end
if((_tmp_fsm_4 == 3) && ((myaxi_arready || !myaxi_arvalid) && (_tmp_76 == 0))) begin
myaxi_araddr <= _tmp_68;
myaxi_arlen <= _tmp_69 - 1;
myaxi_arvalid <= 1;
_tmp_76 <= _tmp_69;
end
_myaxi_cond_5_1 <= 1;
if(myaxi_arvalid && !myaxi_arready) begin
myaxi_arvalid <= myaxi_arvalid;
end
if(myaxi_rready && myaxi_rvalid && (_tmp_76 > 0)) begin
_tmp_76 <= _tmp_76 - 1;
end
if((_tmp_fsm_5 == 3) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_100 == 0))) begin
myaxi_awaddr <= _tmp_85;
myaxi_awlen <= _tmp_86 - 1;
myaxi_awvalid <= 1;
_tmp_100 <= _tmp_86;
end
if((_tmp_fsm_5 == 3) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_100 == 0)) && (_tmp_86 == 0)) begin
myaxi_awvalid <= 0;
end
_myaxi_cond_6_1 <= 1;
if(myaxi_awvalid && !myaxi_awready) begin
myaxi_awvalid <= myaxi_awvalid;
end
if(__variable_valid_102 && ((_tmp_fsm_5 == 4) && ((_tmp_100 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_100 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_100 > 0))) begin
myaxi_wdata <= __variable_data_102;
myaxi_wvalid <= 1;
myaxi_wlast <= 0;
myaxi_wstrb <= { 4{ 1'd1 } };
_tmp_100 <= _tmp_100 - 1;
end
if(__variable_valid_102 && ((_tmp_fsm_5 == 4) && ((_tmp_100 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_100 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_100 > 0)) && (_tmp_100 == 1)) begin
myaxi_wlast <= 1;
_tmp_101 <= 1;
end
_myaxi_cond_7_1 <= 1;
if(myaxi_wvalid && !myaxi_wready) begin
myaxi_wvalid <= myaxi_wvalid;
myaxi_wlast <= myaxi_wlast;
_tmp_101 <= _tmp_101;
end
end
end
assign __variable_data_10 = _tmp_6;
assign __variable_valid_10 = _tmp_7;
assign __variable_data_23 = _tmp_19;
assign __variable_valid_23 = _tmp_20;
assign __variable_data_62 = _tmp_58;
assign __variable_valid_62 = _tmp_59;
assign __variable_data_75 = _tmp_71;
assign __variable_valid_75 = _tmp_72;
always @(posedge CLK) begin
if(RST) begin
ram_a_0_addr <= 0;
_tmp_8 <= 0;
ram_a_0_wdata <= 0;
ram_a_0_wenable <= 0;
_tmp_9 <= 0;
_ram_a_cond_0_1 <= 0;
_ram_a_cond_1_1 <= 0;
_tmp_26 <= 0;
_ram_a_cond_2_1 <= 0;
_ram_a_cond_2_2 <= 0;
_ram_a_cond_3_1 <= 0;
_tmp_28 <= 0;
_ram_a_cond_4_1 <= 0;
_ram_a_cond_4_2 <= 0;
_tmp_60 <= 0;
_tmp_61 <= 0;
_ram_a_cond_5_1 <= 0;
_ram_a_cond_6_1 <= 0;
_tmp_78 <= 0;
_ram_a_cond_7_1 <= 0;
_ram_a_cond_7_2 <= 0;
end else begin
if(_ram_a_cond_2_2) begin
_tmp_26 <= 0;
end
if(_ram_a_cond_4_2) begin
_tmp_28 <= 0;
end
if(_ram_a_cond_7_2) begin
_tmp_78 <= 0;
end
if(_ram_a_cond_0_1) begin
ram_a_0_wenable <= 0;
_tmp_9 <= 0;
end
if(_ram_a_cond_1_1) begin
_tmp_26 <= 1;
end
_ram_a_cond_2_2 <= _ram_a_cond_2_1;
if(_ram_a_cond_3_1) begin
_tmp_28 <= 1;
end
_ram_a_cond_4_2 <= _ram_a_cond_4_1;
if(_ram_a_cond_5_1) begin
ram_a_0_wenable <= 0;
_tmp_61 <= 0;
end
if(_ram_a_cond_6_1) begin
_tmp_78 <= 1;
end
_ram_a_cond_7_2 <= _ram_a_cond_7_1;
if((_tmp_fsm_0 == 1) && (_tmp_8 == 0)) begin
ram_a_0_addr <= _tmp_0 - 1;
_tmp_8 <= _tmp_2;
end
if(__variable_valid_10 && ((_tmp_8 > 0) && !_tmp_9) && (_tmp_8 > 0)) begin
ram_a_0_addr <= ram_a_0_addr + 1;
ram_a_0_wdata <= __variable_data_10;
ram_a_0_wenable <= 1;
_tmp_8 <= _tmp_8 - 1;
end
if(__variable_valid_10 && ((_tmp_8 > 0) && !_tmp_9) && (_tmp_8 == 1)) begin
_tmp_9 <= 1;
end
_ram_a_cond_0_1 <= 1;
if(_mystream_a_renable_1) begin
ram_a_0_addr <= _mystream_a_raddr_1;
end
_ram_a_cond_1_1 <= _mystream_a_renable_1;
_ram_a_cond_2_1 <= _mystream_a_renable_1;
if(_mystream_a_renable_4) begin
ram_a_0_addr <= _mystream_a_raddr_4;
end
_ram_a_cond_3_1 <= _mystream_a_renable_4;
_ram_a_cond_4_1 <= _mystream_a_renable_4;
if((_tmp_fsm_3 == 1) && (_tmp_60 == 0)) begin
ram_a_0_addr <= _tmp_52 - 1;
_tmp_60 <= _tmp_54;
end
if(__variable_valid_62 && ((_tmp_60 > 0) && !_tmp_61) && (_tmp_60 > 0)) begin
ram_a_0_addr <= ram_a_0_addr + 1;
ram_a_0_wdata <= __variable_data_62;
ram_a_0_wenable <= 1;
_tmp_60 <= _tmp_60 - 1;
end
if(__variable_valid_62 && ((_tmp_60 > 0) && !_tmp_61) && (_tmp_60 == 1)) begin
_tmp_61 <= 1;
end
_ram_a_cond_5_1 <= 1;
if(th_comp == 32) begin
ram_a_0_addr <= _th_comp_i_13 + _th_comp_offset_10;
end
_ram_a_cond_6_1 <= th_comp == 32;
_ram_a_cond_7_1 <= th_comp == 32;
end
end
always @(posedge CLK) begin
if(RST) begin
ram_b_0_addr <= 0;
_tmp_21 <= 0;
ram_b_0_wdata <= 0;
ram_b_0_wenable <= 0;
_tmp_22 <= 0;
_ram_b_cond_0_1 <= 0;
_ram_b_cond_1_1 <= 0;
_tmp_27 <= 0;
_ram_b_cond_2_1 <= 0;
_ram_b_cond_2_2 <= 0;
_ram_b_cond_3_1 <= 0;
_tmp_29 <= 0;
_ram_b_cond_4_1 <= 0;
_ram_b_cond_4_2 <= 0;
_tmp_73 <= 0;
_tmp_74 <= 0;
_ram_b_cond_5_1 <= 0;
_ram_b_cond_6_1 <= 0;
_tmp_80 <= 0;
_ram_b_cond_7_1 <= 0;
_ram_b_cond_7_2 <= 0;
end else begin
if(_ram_b_cond_2_2) begin
_tmp_27 <= 0;
end
if(_ram_b_cond_4_2) begin
_tmp_29 <= 0;
end
if(_ram_b_cond_7_2) begin
_tmp_80 <= 0;
end
if(_ram_b_cond_0_1) begin
ram_b_0_wenable <= 0;
_tmp_22 <= 0;
end
if(_ram_b_cond_1_1) begin
_tmp_27 <= 1;
end
_ram_b_cond_2_2 <= _ram_b_cond_2_1;
if(_ram_b_cond_3_1) begin
_tmp_29 <= 1;
end
_ram_b_cond_4_2 <= _ram_b_cond_4_1;
if(_ram_b_cond_5_1) begin
ram_b_0_wenable <= 0;
_tmp_74 <= 0;
end
if(_ram_b_cond_6_1) begin
_tmp_80 <= 1;
end
_ram_b_cond_7_2 <= _ram_b_cond_7_1;
if((_tmp_fsm_1 == 1) && (_tmp_21 == 0)) begin
ram_b_0_addr <= _tmp_13 - 1;
_tmp_21 <= _tmp_15;
end
if(__variable_valid_23 && ((_tmp_21 > 0) && !_tmp_22) && (_tmp_21 > 0)) begin
ram_b_0_addr <= ram_b_0_addr + 1;
ram_b_0_wdata <= __variable_data_23;
ram_b_0_wenable <= 1;
_tmp_21 <= _tmp_21 - 1;
end
if(__variable_valid_23 && ((_tmp_21 > 0) && !_tmp_22) && (_tmp_21 == 1)) begin
_tmp_22 <= 1;
end
_ram_b_cond_0_1 <= 1;
if(_mystream_b_renable_2) begin
ram_b_0_addr <= _mystream_b_raddr_2;
end
_ram_b_cond_1_1 <= _mystream_b_renable_2;
_ram_b_cond_2_1 <= _mystream_b_renable_2;
if(_mystream_b_renable_5) begin
ram_b_0_addr <= _mystream_b_raddr_5;
end
_ram_b_cond_3_1 <= _mystream_b_renable_5;
_ram_b_cond_4_1 <= _mystream_b_renable_5;
if((_tmp_fsm_4 == 1) && (_tmp_73 == 0)) begin
ram_b_0_addr <= _tmp_65 - 1;
_tmp_73 <= _tmp_67;
end
if(__variable_valid_75 && ((_tmp_73 > 0) && !_tmp_74) && (_tmp_73 > 0)) begin
ram_b_0_addr <= ram_b_0_addr + 1;
ram_b_0_wdata <= __variable_data_75;
ram_b_0_wenable <= 1;
_tmp_73 <= _tmp_73 - 1;
end
if(__variable_valid_75 && ((_tmp_73 > 0) && !_tmp_74) && (_tmp_73 == 1)) begin
_tmp_74 <= 1;
end
_ram_b_cond_5_1 <= 1;
if(th_comp == 34) begin
ram_b_0_addr <= _th_comp_i_13 + _th_comp_offset_10;
end
_ram_b_cond_6_1 <= th_comp == 34;
_ram_b_cond_7_1 <= th_comp == 34;
end
end
always @(posedge CLK) begin
if(RST) begin
ram_c_0_addr <= 0;
ram_c_0_wdata <= 0;
ram_c_0_wenable <= 0;
_ram_c_cond_0_1 <= 0;
_ram_c_cond_1_1 <= 0;
__tmp_41_1 <= 0;
__tmp_42_1 <= 0;
_tmp_46 <= 0;
_tmp_36 <= 0;
_tmp_37 <= 0;
_tmp_44 <= 0;
_tmp_45 <= 0;
_tmp_43 <= 0;
_tmp_47 <= 0;
_ram_c_cond_2_1 <= 0;
__tmp_93_1 <= 0;
__tmp_94_1 <= 0;
_tmp_98 <= 0;
_tmp_88 <= 0;
_tmp_89 <= 0;
_tmp_96 <= 0;
_tmp_97 <= 0;
_tmp_95 <= 0;
_tmp_99 <= 0;
_ram_c_cond_3_1 <= 0;
_tmp_104 <= 0;
_ram_c_cond_4_1 <= 0;
_ram_c_cond_4_2 <= 0;
_ram_c_cond_5_1 <= 0;
_tmp_106 <= 0;
_ram_c_cond_6_1 <= 0;
_ram_c_cond_6_2 <= 0;
end else begin
if(_ram_c_cond_4_2) begin
_tmp_104 <= 0;
end
if(_ram_c_cond_6_2) begin
_tmp_106 <= 0;
end
if(_ram_c_cond_0_1) begin
ram_c_0_wenable <= 0;
end
if(_ram_c_cond_1_1) begin
ram_c_0_wenable <= 0;
end
if(_ram_c_cond_2_1) begin
ram_c_0_wenable <= 0;
end
if(_ram_c_cond_3_1) begin
_tmp_104 <= 1;
end
_ram_c_cond_4_2 <= _ram_c_cond_4_1;
if(_ram_c_cond_5_1) begin
_tmp_106 <= 1;
end
_ram_c_cond_6_2 <= _ram_c_cond_6_1;
if(_mystream_c_wenable_3) begin
ram_c_0_addr <= _mystream_c_waddr_3;
ram_c_0_wdata <= _mystream_c_wdata_3;
ram_c_0_wenable <= 1;
end
_ram_c_cond_0_1 <= _mystream_c_wenable_3;
if(_mystream_c_wenable_6) begin
ram_c_0_addr <= _mystream_c_waddr_6;
ram_c_0_wdata <= _mystream_c_wdata_6;
ram_c_0_wenable <= 1;
end
_ram_c_cond_1_1 <= _mystream_c_wenable_6;
__tmp_41_1 <= _tmp_41;
__tmp_42_1 <= _tmp_42;
if((_tmp_38 || !_tmp_36) && (_tmp_39 || !_tmp_37) && _tmp_44) begin
_tmp_46 <= 0;
_tmp_36 <= 0;
_tmp_37 <= 0;
_tmp_44 <= 0;
end
if((_tmp_38 || !_tmp_36) && (_tmp_39 || !_tmp_37) && _tmp_43) begin
_tmp_36 <= 1;
_tmp_37 <= 1;
_tmp_46 <= _tmp_45;
_tmp_45 <= 0;
_tmp_43 <= 0;
_tmp_44 <= 1;
end
if((_tmp_fsm_2 == 1) && (_tmp_47 == 0) && !_tmp_45 && !_tmp_46) begin
ram_c_0_addr <= _tmp_30;
_tmp_47 <= _tmp_32 - 1;
_tmp_43 <= 1;
_tmp_45 <= _tmp_32 == 1;
end
if((_tmp_38 || !_tmp_36) && (_tmp_39 || !_tmp_37) && (_tmp_47 > 0)) begin
ram_c_0_addr <= ram_c_0_addr + 1;
_tmp_47 <= _tmp_47 - 1;
_tmp_43 <= 1;
_tmp_45 <= 0;
end
if((_tmp_38 || !_tmp_36) && (_tmp_39 || !_tmp_37) && (_tmp_47 == 1)) begin
_tmp_45 <= 1;
end
if(th_comp == 37) begin
ram_c_0_addr <= _th_comp_i_13 + _th_comp_offset_10;
ram_c_0_wdata <= _th_comp_sum_12;
ram_c_0_wenable <= 1;
end
_ram_c_cond_2_1 <= th_comp == 37;
__tmp_93_1 <= _tmp_93;
__tmp_94_1 <= _tmp_94;
if((_tmp_90 || !_tmp_88) && (_tmp_91 || !_tmp_89) && _tmp_96) begin
_tmp_98 <= 0;
_tmp_88 <= 0;
_tmp_89 <= 0;
_tmp_96 <= 0;
end
if((_tmp_90 || !_tmp_88) && (_tmp_91 || !_tmp_89) && _tmp_95) begin
_tmp_88 <= 1;
_tmp_89 <= 1;
_tmp_98 <= _tmp_97;
_tmp_97 <= 0;
_tmp_95 <= 0;
_tmp_96 <= 1;
end
if((_tmp_fsm_5 == 1) && (_tmp_99 == 0) && !_tmp_97 && !_tmp_98) begin
ram_c_0_addr <= _tmp_82;
_tmp_99 <= _tmp_84 - 1;
_tmp_95 <= 1;
_tmp_97 <= _tmp_84 == 1;
end
if((_tmp_90 || !_tmp_88) && (_tmp_91 || !_tmp_89) && (_tmp_99 > 0)) begin
ram_c_0_addr <= ram_c_0_addr + 1;
_tmp_99 <= _tmp_99 - 1;
_tmp_95 <= 1;
_tmp_97 <= 0;
end
if((_tmp_90 || !_tmp_88) && (_tmp_91 || !_tmp_89) && (_tmp_99 == 1)) begin
_tmp_97 <= 1;
end
if(th_comp == 45) begin
ram_c_0_addr <= _th_comp_i_20 + _th_comp_offset_stream_17;
end
_ram_c_cond_3_1 <= th_comp == 45;
_ram_c_cond_4_1 <= th_comp == 45;
if(th_comp == 47) begin
ram_c_0_addr <= _th_comp_i_20 + _th_comp_offset_seq_18;
end
_ram_c_cond_5_1 <= th_comp == 47;
_ram_c_cond_6_1 <= th_comp == 47;
end
end
assign __variable_data_50 = _tmp_42;
assign __variable_valid_50 = _tmp_36;
assign _tmp_38 = 1 && __variable_ready_50;
assign __variable_data_102 = _tmp_94;
assign __variable_valid_102 = _tmp_88;
assign _tmp_90 = 1 && __variable_ready_102;
always @(posedge CLK) begin
if(RST) begin
_plus_data_3 <= 0;
_plus_data_4 <= 0;
_mystream_a_fsm_sel <= 0;
_mystream_a_idle <= 1;
__variable_wdata_0 <= 0;
_mystream_b_fsm_sel <= 0;
_mystream_b_idle <= 1;
__variable_wdata_1 <= 0;
__parametervariable_wdata_2 <= 0;
_mystream_c_fsm_sel <= 0;
end else begin
_plus_data_3 <= mystream_a_data + mystream_b_data;
_plus_data_4 <= _plus_data_3 + mystream_bias_data;
if(th_comp == 8) begin
_mystream_a_fsm_sel <= 1;
end
if(_mystream_start) begin
_mystream_a_idle <= 0;
end
if(_tmp_26) begin
__variable_wdata_0 <= ram_a_0_rdata;
end
if((_mystream_a_fsm_1 == 1) && (_mystream_a_count_1 == 1)) begin
_mystream_a_idle <= 1;
end
if((_mystream_a_fsm_1 == 2) && (_mystream_a_count_1 == 1)) begin
_mystream_a_idle <= 1;
end
if(th_comp == 9) begin
_mystream_b_fsm_sel <= 2;
end
if(_mystream_start) begin
_mystream_b_idle <= 0;
end
if(_tmp_27) begin
__variable_wdata_1 <= ram_b_0_rdata;
end
if((_mystream_b_fsm_2 == 1) && (_mystream_b_count_2 == 1)) begin
_mystream_b_idle <= 1;
end
if((_mystream_b_fsm_2 == 2) && (_mystream_b_count_2 == 1)) begin
_mystream_b_idle <= 1;
end
if(th_comp == 10) begin
__parametervariable_wdata_2 <= _th_comp_bias_5;
end
if(th_comp == 11) begin
_mystream_c_fsm_sel <= 3;
end
if(th_comp == 15) begin
_mystream_a_fsm_sel <= 4;
end
if(_mystream_start) begin
_mystream_a_idle <= 0;
end
if(_tmp_28) begin
__variable_wdata_0 <= ram_a_0_rdata;
end
if((_mystream_a_fsm_4 == 1) && (_mystream_a_count_4 == 1)) begin
_mystream_a_idle <= 1;
end
if((_mystream_a_fsm_4 == 2) && (_mystream_a_count_4 == 1)) begin
_mystream_a_idle <= 1;
end
if(th_comp == 16) begin
_mystream_b_fsm_sel <= 5;
end
if(_mystream_start) begin
_mystream_b_idle <= 0;
end
if(_tmp_29) begin
__variable_wdata_1 <= ram_b_0_rdata;
end
if((_mystream_b_fsm_5 == 1) && (_mystream_b_count_5 == 1)) begin
_mystream_b_idle <= 1;
end
if((_mystream_b_fsm_5 == 2) && (_mystream_b_count_5 == 1)) begin
_mystream_b_idle <= 1;
end
if(th_comp == 17) begin
__parametervariable_wdata_2 <= _th_comp_bias_8;
end
if(th_comp == 18) begin
_mystream_c_fsm_sel <= 6;
end
end
end
localparam _mystream_fsm_1 = 1;
localparam _mystream_fsm_2 = 2;
localparam _mystream_fsm_3 = 3;
localparam _mystream_fsm_4 = 4;
localparam _mystream_fsm_5 = 5;
localparam _mystream_fsm_6 = 6;
localparam _mystream_fsm_7 = 7;
localparam _mystream_fsm_8 = 8;
localparam _mystream_fsm_9 = 9;
always @(posedge CLK) begin
if(RST) begin
_mystream_fsm <= _mystream_fsm_init;
_d1__mystream_fsm <= _mystream_fsm_init;
_mystream_start <= 0;
_mystream_busy <= 0;
__mystream_fsm_cond_0_0_1 <= 0;
__mystream_fsm_cond_0_1_1 <= 0;
end else begin
_d1__mystream_fsm <= _mystream_fsm;
case(_d1__mystream_fsm)
_mystream_fsm_init: begin
if(__mystream_fsm_cond_0_0_1) begin
_mystream_start <= 0;
end
if(__mystream_fsm_cond_0_1_1) begin
_mystream_start <= 0;
end
end
endcase
case(_mystream_fsm)
_mystream_fsm_init: begin
if(th_comp == 12) begin
_mystream_start <= 1;
_mystream_busy <= 1;
end
__mystream_fsm_cond_0_0_1 <= th_comp == 12;
if(th_comp == 19) begin
_mystream_start <= 1;
_mystream_busy <= 1;
end
__mystream_fsm_cond_0_1_1 <= th_comp == 19;
if(th_comp == 12) begin
_mystream_fsm <= _mystream_fsm_1;
end
if(th_comp == 19) begin
_mystream_fsm <= _mystream_fsm_1;
end
end
_mystream_fsm_1: begin
_mystream_fsm <= _mystream_fsm_2;
end
_mystream_fsm_2: begin
if(_mystream_done) begin
_mystream_fsm <= _mystream_fsm_3;
end
end
_mystream_fsm_3: begin
_mystream_fsm <= _mystream_fsm_4;
end
_mystream_fsm_4: begin
_mystream_fsm <= _mystream_fsm_5;
end
_mystream_fsm_5: begin
_mystream_fsm <= _mystream_fsm_6;
end
_mystream_fsm_6: begin
_mystream_fsm <= _mystream_fsm_7;
end
_mystream_fsm_7: begin
_mystream_fsm <= _mystream_fsm_8;
end
_mystream_fsm_8: begin
_mystream_fsm <= _mystream_fsm_9;
end
_mystream_fsm_9: begin
_mystream_busy <= 0;
_mystream_fsm <= _mystream_fsm_init;
end
endcase
end
end
localparam th_comp_1 = 1;
localparam th_comp_2 = 2;
localparam th_comp_3 = 3;
localparam th_comp_4 = 4;
localparam th_comp_5 = 5;
localparam th_comp_6 = 6;
localparam th_comp_7 = 7;
localparam th_comp_8 = 8;
localparam th_comp_9 = 9;
localparam th_comp_10 = 10;
localparam th_comp_11 = 11;
localparam th_comp_12 = 12;
localparam th_comp_13 = 13;
localparam th_comp_14 = 14;
localparam th_comp_15 = 15;
localparam th_comp_16 = 16;
localparam th_comp_17 = 17;
localparam th_comp_18 = 18;
localparam th_comp_19 = 19;
localparam th_comp_20 = 20;
localparam th_comp_21 = 21;
localparam th_comp_22 = 22;
localparam th_comp_23 = 23;
localparam th_comp_24 = 24;
localparam th_comp_25 = 25;
localparam th_comp_26 = 26;
localparam th_comp_27 = 27;
localparam th_comp_28 = 28;
localparam th_comp_29 = 29;
localparam th_comp_30 = 30;
localparam th_comp_31 = 31;
localparam th_comp_32 = 32;
localparam th_comp_33 = 33;
localparam th_comp_34 = 34;
localparam th_comp_35 = 35;
localparam th_comp_36 = 36;
localparam th_comp_37 = 37;
localparam th_comp_38 = 38;
localparam th_comp_39 = 39;
localparam th_comp_40 = 40;
localparam th_comp_41 = 41;
localparam th_comp_42 = 42;
localparam th_comp_43 = 43;
localparam th_comp_44 = 44;
localparam th_comp_45 = 45;
localparam th_comp_46 = 46;
localparam th_comp_47 = 47;
localparam th_comp_48 = 48;
localparam th_comp_49 = 49;
localparam th_comp_50 = 50;
localparam th_comp_51 = 51;
localparam th_comp_52 = 52;
localparam th_comp_53 = 53;
localparam th_comp_54 = 54;
localparam th_comp_55 = 55;
localparam th_comp_56 = 56;
always @(posedge CLK) begin
if(RST) begin
th_comp <= th_comp_init;
_th_comp_size_0 <= 0;
_th_comp_double_size_1 <= 0;
_th_comp_offset_2 <= 0;
_tmp_0 <= 0;
_tmp_1 <= 0;
_tmp_2 <= 0;
_tmp_13 <= 0;
_tmp_14 <= 0;
_tmp_15 <= 0;
_th_comp_size_3 <= 0;
_th_comp_offset_4 <= 0;
_th_comp_bias_5 <= 0;
_th_comp_size_6 <= 0;
_th_comp_offset_7 <= 0;
_th_comp_bias_8 <= 0;
_tmp_30 <= 0;
_tmp_31 <= 0;
_tmp_32 <= 0;
_tmp_52 <= 0;
_tmp_53 <= 0;
_tmp_54 <= 0;
_tmp_65 <= 0;
_tmp_66 <= 0;
_tmp_67 <= 0;
_th_comp_size_9 <= 0;
_th_comp_offset_10 <= 0;
_th_comp_bias_11 <= 0;
_th_comp_sum_12 <= 0;
_th_comp_i_13 <= 0;
_tmp_79 <= 0;
_th_comp_a_14 <= 0;
_tmp_81 <= 0;
_th_comp_b_15 <= 0;
_tmp_82 <= 0;
_tmp_83 <= 0;
_tmp_84 <= 0;
_th_comp_size_16 <= 0;
_th_comp_offset_stream_17 <= 0;
_th_comp_offset_seq_18 <= 0;
_th_comp_all_ok_19 <= 0;
_th_comp_i_20 <= 0;
_tmp_105 <= 0;
_th_comp_st_21 <= 0;
_tmp_107 <= 0;
_th_comp_sq_22 <= 0;
end else begin
case(th_comp)
th_comp_init: begin
_th_comp_size_0 <= 32;
th_comp <= th_comp_1;
end
th_comp_1: begin
_th_comp_double_size_1 <= _th_comp_size_0 + _th_comp_size_0;
th_comp <= th_comp_2;
end
th_comp_2: begin
_th_comp_offset_2 <= 0;
th_comp <= th_comp_3;
end
th_comp_3: begin
_tmp_0 <= _th_comp_offset_2;
_tmp_1 <= 0;
_tmp_2 <= _th_comp_double_size_1;
th_comp <= th_comp_4;
end
th_comp_4: begin
if(_tmp_12) begin
th_comp <= th_comp_5;
end
end
th_comp_5: begin
_tmp_13 <= _th_comp_offset_2;
_tmp_14 <= 512;
_tmp_15 <= _th_comp_double_size_1;
th_comp <= th_comp_6;
end
th_comp_6: begin
if(_tmp_25) begin
th_comp <= th_comp_7;
end
end
th_comp_7: begin
_th_comp_size_3 <= _th_comp_size_0;
_th_comp_offset_4 <= _th_comp_offset_2;
_th_comp_bias_5 <= 100;
th_comp <= th_comp_8;
end
th_comp_8: begin
th_comp <= th_comp_9;
end
th_comp_9: begin
th_comp <= th_comp_10;
end
th_comp_10: begin
th_comp <= th_comp_11;
end
th_comp_11: begin
th_comp <= th_comp_12;
end
th_comp_12: begin
th_comp <= th_comp_13;
end
th_comp_13: begin
if(!_mystream_busy) begin
th_comp <= th_comp_14;
end
end
th_comp_14: begin
_th_comp_size_6 <= _th_comp_size_0;
_th_comp_offset_7 <= _th_comp_offset_2 + _th_comp_size_0;
_th_comp_bias_8 <= 100;
th_comp <= th_comp_15;
end
th_comp_15: begin
th_comp <= th_comp_16;
end
th_comp_16: begin
th_comp <= th_comp_17;
end
th_comp_17: begin
th_comp <= th_comp_18;
end
th_comp_18: begin
th_comp <= th_comp_19;
end
th_comp_19: begin
th_comp <= th_comp_20;
end
th_comp_20: begin
if(!_mystream_busy) begin
th_comp <= th_comp_21;
end
end
th_comp_21: begin
_tmp_30 <= _th_comp_offset_2;
_tmp_31 <= 1024;
_tmp_32 <= _th_comp_double_size_1;
th_comp <= th_comp_22;
end
th_comp_22: begin
if(_tmp_51) begin
th_comp <= th_comp_23;
end
end
th_comp_23: begin
_th_comp_offset_2 <= _th_comp_double_size_1;
th_comp <= th_comp_24;
end
th_comp_24: begin
_tmp_52 <= _th_comp_offset_2;
_tmp_53 <= 0;
_tmp_54 <= _th_comp_double_size_1;
th_comp <= th_comp_25;
end
th_comp_25: begin
if(_tmp_64) begin
th_comp <= th_comp_26;
end
end
th_comp_26: begin
_tmp_65 <= _th_comp_offset_2;
_tmp_66 <= 512;
_tmp_67 <= _th_comp_double_size_1;
th_comp <= th_comp_27;
end
th_comp_27: begin
if(_tmp_77) begin
th_comp <= th_comp_28;
end
end
th_comp_28: begin
_th_comp_size_9 <= _th_comp_double_size_1;
_th_comp_offset_10 <= _th_comp_offset_2;
_th_comp_bias_11 <= 100;
th_comp <= th_comp_29;
end
th_comp_29: begin
_th_comp_sum_12 <= 0;
th_comp <= th_comp_30;
end
th_comp_30: begin
_th_comp_i_13 <= 0;
th_comp <= th_comp_31;
end
th_comp_31: begin
if(_th_comp_i_13 < _th_comp_size_9) begin
th_comp <= th_comp_32;
end else begin
th_comp <= th_comp_39;
end
end
th_comp_32: begin
if(_tmp_78) begin
_tmp_79 <= ram_a_0_rdata;
end
if(_tmp_78) begin
th_comp <= th_comp_33;
end
end
th_comp_33: begin
_th_comp_a_14 <= _tmp_79;
th_comp <= th_comp_34;
end
th_comp_34: begin
if(_tmp_80) begin
_tmp_81 <= ram_b_0_rdata;
end
if(_tmp_80) begin
th_comp <= th_comp_35;
end
end
th_comp_35: begin
_th_comp_b_15 <= _tmp_81;
th_comp <= th_comp_36;
end
th_comp_36: begin
_th_comp_sum_12 <= _th_comp_a_14 + _th_comp_b_15 + _th_comp_bias_11;
th_comp <= th_comp_37;
end
th_comp_37: begin
th_comp <= th_comp_38;
end
th_comp_38: begin
_th_comp_i_13 <= _th_comp_i_13 + 1;
th_comp <= th_comp_31;
end
th_comp_39: begin
_tmp_82 <= _th_comp_offset_2;
_tmp_83 <= 2048;
_tmp_84 <= _th_comp_double_size_1;
th_comp <= th_comp_40;
end
th_comp_40: begin
if(_tmp_103) begin
th_comp <= th_comp_41;
end
end
th_comp_41: begin
_th_comp_size_16 <= _th_comp_double_size_1;
_th_comp_offset_stream_17 <= 0;
_th_comp_offset_seq_18 <= _th_comp_offset_2;
th_comp <= th_comp_42;
end
th_comp_42: begin
_th_comp_all_ok_19 <= 1;
th_comp <= th_comp_43;
end
th_comp_43: begin
_th_comp_i_20 <= 0;
th_comp <= th_comp_44;
end
th_comp_44: begin
if(_th_comp_i_20 < _th_comp_size_16) begin
th_comp <= th_comp_45;
end else begin
th_comp <= th_comp_52;
end
end
th_comp_45: begin
if(_tmp_104) begin
_tmp_105 <= ram_c_0_rdata;
end
if(_tmp_104) begin
th_comp <= th_comp_46;
end
end
th_comp_46: begin
_th_comp_st_21 <= _tmp_105;
th_comp <= th_comp_47;
end
th_comp_47: begin
if(_tmp_106) begin
_tmp_107 <= ram_c_0_rdata;
end
if(_tmp_106) begin
th_comp <= th_comp_48;
end
end
th_comp_48: begin
_th_comp_sq_22 <= _tmp_107;
th_comp <= th_comp_49;
end
th_comp_49: begin
if(_th_comp_st_21 !== _th_comp_sq_22) begin
th_comp <= th_comp_50;
end else begin
th_comp <= th_comp_51;
end
end
th_comp_50: begin
_th_comp_all_ok_19 <= 0;
th_comp <= th_comp_51;
end
th_comp_51: begin
_th_comp_i_20 <= _th_comp_i_20 + 1;
th_comp <= th_comp_44;
end
th_comp_52: begin
if(_th_comp_all_ok_19) begin
th_comp <= th_comp_53;
end else begin
th_comp <= th_comp_55;
end
end
th_comp_53: begin
$display("OK");
th_comp <= th_comp_54;
end
th_comp_54: begin
th_comp <= th_comp_56;
end
th_comp_55: begin
$display("NG");
th_comp <= th_comp_56;
end
endcase
end
end
localparam _tmp_fsm_0_1 = 1;
localparam _tmp_fsm_0_2 = 2;
localparam _tmp_fsm_0_3 = 3;
localparam _tmp_fsm_0_4 = 4;
localparam _tmp_fsm_0_5 = 5;
localparam _tmp_fsm_0_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_0 <= _tmp_fsm_0_init;
_d1__tmp_fsm_0 <= _tmp_fsm_0_init;
_tmp_3 <= 0;
_tmp_5 <= 0;
_tmp_4 <= 0;
__tmp_fsm_0_cond_4_0_1 <= 0;
_tmp_7 <= 0;
_tmp_6 <= 0;
_tmp_12 <= 0;
__tmp_fsm_0_cond_5_1_1 <= 0;
end else begin
_d1__tmp_fsm_0 <= _tmp_fsm_0;
case(_d1__tmp_fsm_0)
_tmp_fsm_0_4: begin
if(__tmp_fsm_0_cond_4_0_1) begin
_tmp_7 <= 0;
end
end
_tmp_fsm_0_5: begin
if(__tmp_fsm_0_cond_5_1_1) begin
_tmp_12 <= 0;
end
end
endcase
case(_tmp_fsm_0)
_tmp_fsm_0_init: begin
if(th_comp == 4) begin
_tmp_fsm_0 <= _tmp_fsm_0_1;
end
end
_tmp_fsm_0_1: begin
_tmp_3 <= (_tmp_1 >> 2) << 2;
_tmp_5 <= _tmp_2;
_tmp_fsm_0 <= _tmp_fsm_0_2;
end
_tmp_fsm_0_2: begin
if((_tmp_5 <= 256) && ((_tmp_3 & 4095) + (_tmp_5 << 2) >= 4096)) begin
_tmp_4 <= 4096 - (_tmp_3 & 4095) >> 2;
_tmp_5 <= _tmp_5 - (4096 - (_tmp_3 & 4095) >> 2);
end else if(_tmp_5 <= 256) begin
_tmp_4 <= _tmp_5;
_tmp_5 <= 0;
end else if((_tmp_3 & 4095) + 1024 >= 4096) begin
_tmp_4 <= 4096 - (_tmp_3 & 4095) >> 2;
_tmp_5 <= _tmp_5 - (4096 - (_tmp_3 & 4095) >> 2);
end else begin
_tmp_4 <= 256;
_tmp_5 <= _tmp_5 - 256;
end
_tmp_fsm_0 <= _tmp_fsm_0_3;
end
_tmp_fsm_0_3: begin
if(myaxi_arready || !myaxi_arvalid) begin
_tmp_fsm_0 <= _tmp_fsm_0_4;
end
end
_tmp_fsm_0_4: begin
__tmp_fsm_0_cond_4_0_1 <= 1;
if(myaxi_rready && myaxi_rvalid) begin
_tmp_6 <= myaxi_rdata;
_tmp_7 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast) begin
_tmp_3 <= _tmp_3 + (_tmp_4 << 2);
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_5 > 0)) begin
_tmp_fsm_0 <= _tmp_fsm_0_2;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_5 == 0)) begin
_tmp_fsm_0 <= _tmp_fsm_0_5;
end
end
_tmp_fsm_0_5: begin
_tmp_12 <= 1;
__tmp_fsm_0_cond_5_1_1 <= 1;
_tmp_fsm_0 <= _tmp_fsm_0_6;
end
_tmp_fsm_0_6: begin
_tmp_fsm_0 <= _tmp_fsm_0_init;
end
endcase
end
end
localparam _tmp_fsm_1_1 = 1;
localparam _tmp_fsm_1_2 = 2;
localparam _tmp_fsm_1_3 = 3;
localparam _tmp_fsm_1_4 = 4;
localparam _tmp_fsm_1_5 = 5;
localparam _tmp_fsm_1_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_1 <= _tmp_fsm_1_init;
_d1__tmp_fsm_1 <= _tmp_fsm_1_init;
_tmp_16 <= 0;
_tmp_18 <= 0;
_tmp_17 <= 0;
__tmp_fsm_1_cond_4_0_1 <= 0;
_tmp_20 <= 0;
_tmp_19 <= 0;
_tmp_25 <= 0;
__tmp_fsm_1_cond_5_1_1 <= 0;
end else begin
_d1__tmp_fsm_1 <= _tmp_fsm_1;
case(_d1__tmp_fsm_1)
_tmp_fsm_1_4: begin
if(__tmp_fsm_1_cond_4_0_1) begin
_tmp_20 <= 0;
end
end
_tmp_fsm_1_5: begin
if(__tmp_fsm_1_cond_5_1_1) begin
_tmp_25 <= 0;
end
end
endcase
case(_tmp_fsm_1)
_tmp_fsm_1_init: begin
if(th_comp == 6) begin
_tmp_fsm_1 <= _tmp_fsm_1_1;
end
end
_tmp_fsm_1_1: begin
_tmp_16 <= (_tmp_14 >> 2) << 2;
_tmp_18 <= _tmp_15;
_tmp_fsm_1 <= _tmp_fsm_1_2;
end
_tmp_fsm_1_2: begin
if((_tmp_18 <= 256) && ((_tmp_16 & 4095) + (_tmp_18 << 2) >= 4096)) begin
_tmp_17 <= 4096 - (_tmp_16 & 4095) >> 2;
_tmp_18 <= _tmp_18 - (4096 - (_tmp_16 & 4095) >> 2);
end else if(_tmp_18 <= 256) begin
_tmp_17 <= _tmp_18;
_tmp_18 <= 0;
end else if((_tmp_16 & 4095) + 1024 >= 4096) begin
_tmp_17 <= 4096 - (_tmp_16 & 4095) >> 2;
_tmp_18 <= _tmp_18 - (4096 - (_tmp_16 & 4095) >> 2);
end else begin
_tmp_17 <= 256;
_tmp_18 <= _tmp_18 - 256;
end
_tmp_fsm_1 <= _tmp_fsm_1_3;
end
_tmp_fsm_1_3: begin
if(myaxi_arready || !myaxi_arvalid) begin
_tmp_fsm_1 <= _tmp_fsm_1_4;
end
end
_tmp_fsm_1_4: begin
__tmp_fsm_1_cond_4_0_1 <= 1;
if(myaxi_rready && myaxi_rvalid) begin
_tmp_19 <= myaxi_rdata;
_tmp_20 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast) begin
_tmp_16 <= _tmp_16 + (_tmp_17 << 2);
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_18 > 0)) begin
_tmp_fsm_1 <= _tmp_fsm_1_2;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_18 == 0)) begin
_tmp_fsm_1 <= _tmp_fsm_1_5;
end
end
_tmp_fsm_1_5: begin
_tmp_25 <= 1;
__tmp_fsm_1_cond_5_1_1 <= 1;
_tmp_fsm_1 <= _tmp_fsm_1_6;
end
_tmp_fsm_1_6: begin
_tmp_fsm_1 <= _tmp_fsm_1_init;
end
endcase
end
end
localparam _mystream_a_fsm_1_1 = 1;
localparam _mystream_a_fsm_1_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_a_fsm_1 <= _mystream_a_fsm_1_init;
_d1__mystream_a_fsm_1 <= _mystream_a_fsm_1_init;
_mystream_a_offset_1 <= 0;
_mystream_a_size_1 <= 0;
_mystream_a_stride_1 <= 0;
_mystream_a_count_1 <= 0;
_mystream_a_raddr_1 <= 0;
_mystream_a_renable_1 <= 0;
__mystream_a_fsm_1_cond_1_0_1 <= 0;
__mystream_a_fsm_1_cond_2_1_1 <= 0;
end else begin
_d1__mystream_a_fsm_1 <= _mystream_a_fsm_1;
case(_d1__mystream_a_fsm_1)
_mystream_a_fsm_1_1: begin
if(__mystream_a_fsm_1_cond_1_0_1) begin
_mystream_a_renable_1 <= 0;
end
end
_mystream_a_fsm_1_2: begin
if(__mystream_a_fsm_1_cond_2_1_1) begin
_mystream_a_renable_1 <= 0;
end
end
endcase
case(_mystream_a_fsm_1)
_mystream_a_fsm_1_init: begin
if(th_comp == 8) begin
_mystream_a_offset_1 <= _th_comp_offset_4;
_mystream_a_size_1 <= _th_comp_size_3;
_mystream_a_stride_1 <= 1;
end
if(_mystream_start && (_mystream_a_fsm_sel == 1) && (_mystream_a_size_1 > 0)) begin
_mystream_a_count_1 <= _mystream_a_size_1;
end
if(_mystream_start && (_mystream_a_fsm_sel == 1) && (_mystream_a_size_1 > 0)) begin
_mystream_a_fsm_1 <= _mystream_a_fsm_1_1;
end
end
_mystream_a_fsm_1_1: begin
_mystream_a_raddr_1 <= _mystream_a_offset_1;
_mystream_a_renable_1 <= 1;
_mystream_a_count_1 <= _mystream_a_count_1 - 1;
__mystream_a_fsm_1_cond_1_0_1 <= 1;
if(_mystream_a_count_1 == 1) begin
_mystream_a_fsm_1 <= _mystream_a_fsm_1_init;
end
if(_mystream_a_count_1 > 1) begin
_mystream_a_fsm_1 <= _mystream_a_fsm_1_2;
end
end
_mystream_a_fsm_1_2: begin
_mystream_a_raddr_1 <= _mystream_a_raddr_1 + _mystream_a_stride_1;
_mystream_a_renable_1 <= 1;
_mystream_a_count_1 <= _mystream_a_count_1 - 1;
__mystream_a_fsm_1_cond_2_1_1 <= 1;
if(_mystream_a_count_1 == 1) begin
_mystream_a_fsm_1 <= _mystream_a_fsm_1_init;
end
end
endcase
end
end
localparam _mystream_b_fsm_2_1 = 1;
localparam _mystream_b_fsm_2_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_b_fsm_2 <= _mystream_b_fsm_2_init;
_d1__mystream_b_fsm_2 <= _mystream_b_fsm_2_init;
_mystream_b_offset_2 <= 0;
_mystream_b_size_2 <= 0;
_mystream_b_stride_2 <= 0;
_mystream_b_count_2 <= 0;
_mystream_b_raddr_2 <= 0;
_mystream_b_renable_2 <= 0;
__mystream_b_fsm_2_cond_1_0_1 <= 0;
__mystream_b_fsm_2_cond_2_1_1 <= 0;
end else begin
_d1__mystream_b_fsm_2 <= _mystream_b_fsm_2;
case(_d1__mystream_b_fsm_2)
_mystream_b_fsm_2_1: begin
if(__mystream_b_fsm_2_cond_1_0_1) begin
_mystream_b_renable_2 <= 0;
end
end
_mystream_b_fsm_2_2: begin
if(__mystream_b_fsm_2_cond_2_1_1) begin
_mystream_b_renable_2 <= 0;
end
end
endcase
case(_mystream_b_fsm_2)
_mystream_b_fsm_2_init: begin
if(th_comp == 9) begin
_mystream_b_offset_2 <= _th_comp_offset_4;
_mystream_b_size_2 <= _th_comp_size_3;
_mystream_b_stride_2 <= 1;
end
if(_mystream_start && (_mystream_b_fsm_sel == 2) && (_mystream_b_size_2 > 0)) begin
_mystream_b_count_2 <= _mystream_b_size_2;
end
if(_mystream_start && (_mystream_b_fsm_sel == 2) && (_mystream_b_size_2 > 0)) begin
_mystream_b_fsm_2 <= _mystream_b_fsm_2_1;
end
end
_mystream_b_fsm_2_1: begin
_mystream_b_raddr_2 <= _mystream_b_offset_2;
_mystream_b_renable_2 <= 1;
_mystream_b_count_2 <= _mystream_b_count_2 - 1;
__mystream_b_fsm_2_cond_1_0_1 <= 1;
if(_mystream_b_count_2 == 1) begin
_mystream_b_fsm_2 <= _mystream_b_fsm_2_init;
end
if(_mystream_b_count_2 > 1) begin
_mystream_b_fsm_2 <= _mystream_b_fsm_2_2;
end
end
_mystream_b_fsm_2_2: begin
_mystream_b_raddr_2 <= _mystream_b_raddr_2 + _mystream_b_stride_2;
_mystream_b_renable_2 <= 1;
_mystream_b_count_2 <= _mystream_b_count_2 - 1;
__mystream_b_fsm_2_cond_2_1_1 <= 1;
if(_mystream_b_count_2 == 1) begin
_mystream_b_fsm_2 <= _mystream_b_fsm_2_init;
end
end
endcase
end
end
localparam _mystream_c_fsm_3_1 = 1;
localparam _mystream_c_fsm_3_2 = 2;
localparam _mystream_c_fsm_3_3 = 3;
localparam _mystream_c_fsm_3_4 = 4;
localparam _mystream_c_fsm_3_5 = 5;
localparam _mystream_c_fsm_3_6 = 6;
localparam _mystream_c_fsm_3_7 = 7;
localparam _mystream_c_fsm_3_8 = 8;
always @(posedge CLK) begin
if(RST) begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_init;
_d1__mystream_c_fsm_3 <= _mystream_c_fsm_3_init;
_mystream_c_offset_3 <= 0;
_mystream_c_size_3 <= 0;
_mystream_c_stride_3 <= 0;
_mystream_c_count_3 <= 0;
_mystream_c_waddr_3 <= 0;
_mystream_c_wdata_3 <= 0;
_mystream_c_wenable_3 <= 0;
__mystream_c_fsm_3_cond_7_0_1 <= 0;
__mystream_c_fsm_3_cond_8_1_1 <= 0;
end else begin
_d1__mystream_c_fsm_3 <= _mystream_c_fsm_3;
case(_d1__mystream_c_fsm_3)
_mystream_c_fsm_3_7: begin
if(__mystream_c_fsm_3_cond_7_0_1) begin
_mystream_c_wenable_3 <= 0;
end
end
_mystream_c_fsm_3_8: begin
if(__mystream_c_fsm_3_cond_8_1_1) begin
_mystream_c_wenable_3 <= 0;
end
end
endcase
case(_mystream_c_fsm_3)
_mystream_c_fsm_3_init: begin
if(th_comp == 11) begin
_mystream_c_offset_3 <= _th_comp_offset_4;
_mystream_c_size_3 <= _th_comp_size_3;
_mystream_c_stride_3 <= 1;
end
if(_mystream_start && (_mystream_c_fsm_sel == 3) && (_mystream_c_size_3 > 0)) begin
_mystream_c_count_3 <= _mystream_c_size_3;
end
if(_mystream_start && (_mystream_c_fsm_sel == 3) && (_mystream_c_size_3 > 0)) begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_1;
end
end
_mystream_c_fsm_3_1: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_2;
end
_mystream_c_fsm_3_2: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_3;
end
_mystream_c_fsm_3_3: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_4;
end
_mystream_c_fsm_3_4: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_5;
end
_mystream_c_fsm_3_5: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_6;
end
_mystream_c_fsm_3_6: begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_7;
end
_mystream_c_fsm_3_7: begin
_mystream_c_waddr_3 <= _mystream_c_offset_3;
_mystream_c_wdata_3 <= mystream_c_data;
_mystream_c_wenable_3 <= 1;
_mystream_c_count_3 <= _mystream_c_count_3 - 1;
__mystream_c_fsm_3_cond_7_0_1 <= 1;
if(_mystream_c_count_3 == 1) begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_init;
end
if(_mystream_c_count_3 > 1) begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_8;
end
end
_mystream_c_fsm_3_8: begin
_mystream_c_waddr_3 <= _mystream_c_waddr_3 + _mystream_c_stride_3;
_mystream_c_wdata_3 <= mystream_c_data;
_mystream_c_wenable_3 <= 1;
_mystream_c_count_3 <= _mystream_c_count_3 - 1;
__mystream_c_fsm_3_cond_8_1_1 <= 1;
if(_mystream_c_count_3 == 1) begin
_mystream_c_fsm_3 <= _mystream_c_fsm_3_init;
end
end
endcase
end
end
localparam _mystream_a_fsm_4_1 = 1;
localparam _mystream_a_fsm_4_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_a_fsm_4 <= _mystream_a_fsm_4_init;
_d1__mystream_a_fsm_4 <= _mystream_a_fsm_4_init;
_mystream_a_offset_4 <= 0;
_mystream_a_size_4 <= 0;
_mystream_a_stride_4 <= 0;
_mystream_a_count_4 <= 0;
_mystream_a_raddr_4 <= 0;
_mystream_a_renable_4 <= 0;
__mystream_a_fsm_4_cond_1_0_1 <= 0;
__mystream_a_fsm_4_cond_2_1_1 <= 0;
end else begin
_d1__mystream_a_fsm_4 <= _mystream_a_fsm_4;
case(_d1__mystream_a_fsm_4)
_mystream_a_fsm_4_1: begin
if(__mystream_a_fsm_4_cond_1_0_1) begin
_mystream_a_renable_4 <= 0;
end
end
_mystream_a_fsm_4_2: begin
if(__mystream_a_fsm_4_cond_2_1_1) begin
_mystream_a_renable_4 <= 0;
end
end
endcase
case(_mystream_a_fsm_4)
_mystream_a_fsm_4_init: begin
if(th_comp == 15) begin
_mystream_a_offset_4 <= _th_comp_offset_7;
_mystream_a_size_4 <= _th_comp_size_6;
_mystream_a_stride_4 <= 1;
end
if(_mystream_start && (_mystream_a_fsm_sel == 4) && (_mystream_a_size_4 > 0)) begin
_mystream_a_count_4 <= _mystream_a_size_4;
end
if(_mystream_start && (_mystream_a_fsm_sel == 4) && (_mystream_a_size_4 > 0)) begin
_mystream_a_fsm_4 <= _mystream_a_fsm_4_1;
end
end
_mystream_a_fsm_4_1: begin
_mystream_a_raddr_4 <= _mystream_a_offset_4;
_mystream_a_renable_4 <= 1;
_mystream_a_count_4 <= _mystream_a_count_4 - 1;
__mystream_a_fsm_4_cond_1_0_1 <= 1;
if(_mystream_a_count_4 == 1) begin
_mystream_a_fsm_4 <= _mystream_a_fsm_4_init;
end
if(_mystream_a_count_4 > 1) begin
_mystream_a_fsm_4 <= _mystream_a_fsm_4_2;
end
end
_mystream_a_fsm_4_2: begin
_mystream_a_raddr_4 <= _mystream_a_raddr_4 + _mystream_a_stride_4;
_mystream_a_renable_4 <= 1;
_mystream_a_count_4 <= _mystream_a_count_4 - 1;
__mystream_a_fsm_4_cond_2_1_1 <= 1;
if(_mystream_a_count_4 == 1) begin
_mystream_a_fsm_4 <= _mystream_a_fsm_4_init;
end
end
endcase
end
end
localparam _mystream_b_fsm_5_1 = 1;
localparam _mystream_b_fsm_5_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_b_fsm_5 <= _mystream_b_fsm_5_init;
_d1__mystream_b_fsm_5 <= _mystream_b_fsm_5_init;
_mystream_b_offset_5 <= 0;
_mystream_b_size_5 <= 0;
_mystream_b_stride_5 <= 0;
_mystream_b_count_5 <= 0;
_mystream_b_raddr_5 <= 0;
_mystream_b_renable_5 <= 0;
__mystream_b_fsm_5_cond_1_0_1 <= 0;
__mystream_b_fsm_5_cond_2_1_1 <= 0;
end else begin
_d1__mystream_b_fsm_5 <= _mystream_b_fsm_5;
case(_d1__mystream_b_fsm_5)
_mystream_b_fsm_5_1: begin
if(__mystream_b_fsm_5_cond_1_0_1) begin
_mystream_b_renable_5 <= 0;
end
end
_mystream_b_fsm_5_2: begin
if(__mystream_b_fsm_5_cond_2_1_1) begin
_mystream_b_renable_5 <= 0;
end
end
endcase
case(_mystream_b_fsm_5)
_mystream_b_fsm_5_init: begin
if(th_comp == 16) begin
_mystream_b_offset_5 <= _th_comp_offset_7;
_mystream_b_size_5 <= _th_comp_size_6;
_mystream_b_stride_5 <= 1;
end
if(_mystream_start && (_mystream_b_fsm_sel == 5) && (_mystream_b_size_5 > 0)) begin
_mystream_b_count_5 <= _mystream_b_size_5;
end
if(_mystream_start && (_mystream_b_fsm_sel == 5) && (_mystream_b_size_5 > 0)) begin
_mystream_b_fsm_5 <= _mystream_b_fsm_5_1;
end
end
_mystream_b_fsm_5_1: begin
_mystream_b_raddr_5 <= _mystream_b_offset_5;
_mystream_b_renable_5 <= 1;
_mystream_b_count_5 <= _mystream_b_count_5 - 1;
__mystream_b_fsm_5_cond_1_0_1 <= 1;
if(_mystream_b_count_5 == 1) begin
_mystream_b_fsm_5 <= _mystream_b_fsm_5_init;
end
if(_mystream_b_count_5 > 1) begin
_mystream_b_fsm_5 <= _mystream_b_fsm_5_2;
end
end
_mystream_b_fsm_5_2: begin
_mystream_b_raddr_5 <= _mystream_b_raddr_5 + _mystream_b_stride_5;
_mystream_b_renable_5 <= 1;
_mystream_b_count_5 <= _mystream_b_count_5 - 1;
__mystream_b_fsm_5_cond_2_1_1 <= 1;
if(_mystream_b_count_5 == 1) begin
_mystream_b_fsm_5 <= _mystream_b_fsm_5_init;
end
end
endcase
end
end
localparam _mystream_c_fsm_6_1 = 1;
localparam _mystream_c_fsm_6_2 = 2;
localparam _mystream_c_fsm_6_3 = 3;
localparam _mystream_c_fsm_6_4 = 4;
localparam _mystream_c_fsm_6_5 = 5;
localparam _mystream_c_fsm_6_6 = 6;
localparam _mystream_c_fsm_6_7 = 7;
localparam _mystream_c_fsm_6_8 = 8;
always @(posedge CLK) begin
if(RST) begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_init;
_d1__mystream_c_fsm_6 <= _mystream_c_fsm_6_init;
_mystream_c_offset_6 <= 0;
_mystream_c_size_6 <= 0;
_mystream_c_stride_6 <= 0;
_mystream_c_count_6 <= 0;
_mystream_c_waddr_6 <= 0;
_mystream_c_wdata_6 <= 0;
_mystream_c_wenable_6 <= 0;
__mystream_c_fsm_6_cond_7_0_1 <= 0;
__mystream_c_fsm_6_cond_8_1_1 <= 0;
end else begin
_d1__mystream_c_fsm_6 <= _mystream_c_fsm_6;
case(_d1__mystream_c_fsm_6)
_mystream_c_fsm_6_7: begin
if(__mystream_c_fsm_6_cond_7_0_1) begin
_mystream_c_wenable_6 <= 0;
end
end
_mystream_c_fsm_6_8: begin
if(__mystream_c_fsm_6_cond_8_1_1) begin
_mystream_c_wenable_6 <= 0;
end
end
endcase
case(_mystream_c_fsm_6)
_mystream_c_fsm_6_init: begin
if(th_comp == 18) begin
_mystream_c_offset_6 <= _th_comp_offset_7;
_mystream_c_size_6 <= _th_comp_size_6;
_mystream_c_stride_6 <= 1;
end
if(_mystream_start && (_mystream_c_fsm_sel == 6) && (_mystream_c_size_6 > 0)) begin
_mystream_c_count_6 <= _mystream_c_size_6;
end
if(_mystream_start && (_mystream_c_fsm_sel == 6) && (_mystream_c_size_6 > 0)) begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_1;
end
end
_mystream_c_fsm_6_1: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_2;
end
_mystream_c_fsm_6_2: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_3;
end
_mystream_c_fsm_6_3: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_4;
end
_mystream_c_fsm_6_4: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_5;
end
_mystream_c_fsm_6_5: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_6;
end
_mystream_c_fsm_6_6: begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_7;
end
_mystream_c_fsm_6_7: begin
_mystream_c_waddr_6 <= _mystream_c_offset_6;
_mystream_c_wdata_6 <= mystream_c_data;
_mystream_c_wenable_6 <= 1;
_mystream_c_count_6 <= _mystream_c_count_6 - 1;
__mystream_c_fsm_6_cond_7_0_1 <= 1;
if(_mystream_c_count_6 == 1) begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_init;
end
if(_mystream_c_count_6 > 1) begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_8;
end
end
_mystream_c_fsm_6_8: begin
_mystream_c_waddr_6 <= _mystream_c_waddr_6 + _mystream_c_stride_6;
_mystream_c_wdata_6 <= mystream_c_data;
_mystream_c_wenable_6 <= 1;
_mystream_c_count_6 <= _mystream_c_count_6 - 1;
__mystream_c_fsm_6_cond_8_1_1 <= 1;
if(_mystream_c_count_6 == 1) begin
_mystream_c_fsm_6 <= _mystream_c_fsm_6_init;
end
end
endcase
end
end
localparam _tmp_fsm_2_1 = 1;
localparam _tmp_fsm_2_2 = 2;
localparam _tmp_fsm_2_3 = 3;
localparam _tmp_fsm_2_4 = 4;
localparam _tmp_fsm_2_5 = 5;
localparam _tmp_fsm_2_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_2 <= _tmp_fsm_2_init;
_d1__tmp_fsm_2 <= _tmp_fsm_2_init;
_tmp_33 <= 0;
_tmp_35 <= 0;
_tmp_34 <= 0;
_tmp_51 <= 0;
__tmp_fsm_2_cond_5_0_1 <= 0;
end else begin
_d1__tmp_fsm_2 <= _tmp_fsm_2;
case(_d1__tmp_fsm_2)
_tmp_fsm_2_5: begin
if(__tmp_fsm_2_cond_5_0_1) begin
_tmp_51 <= 0;
end
end
endcase
case(_tmp_fsm_2)
_tmp_fsm_2_init: begin
if(th_comp == 22) begin
_tmp_fsm_2 <= _tmp_fsm_2_1;
end
end
_tmp_fsm_2_1: begin
_tmp_33 <= (_tmp_31 >> 2) << 2;
_tmp_35 <= _tmp_32;
_tmp_fsm_2 <= _tmp_fsm_2_2;
end
_tmp_fsm_2_2: begin
if((_tmp_35 <= 256) && ((_tmp_33 & 4095) + (_tmp_35 << 2) >= 4096)) begin
_tmp_34 <= 4096 - (_tmp_33 & 4095) >> 2;
_tmp_35 <= _tmp_35 - (4096 - (_tmp_33 & 4095) >> 2);
end else if(_tmp_35 <= 256) begin
_tmp_34 <= _tmp_35;
_tmp_35 <= 0;
end else if((_tmp_33 & 4095) + 1024 >= 4096) begin
_tmp_34 <= 4096 - (_tmp_33 & 4095) >> 2;
_tmp_35 <= _tmp_35 - (4096 - (_tmp_33 & 4095) >> 2);
end else begin
_tmp_34 <= 256;
_tmp_35 <= _tmp_35 - 256;
end
_tmp_fsm_2 <= _tmp_fsm_2_3;
end
_tmp_fsm_2_3: begin
if(myaxi_awready || !myaxi_awvalid) begin
_tmp_fsm_2 <= _tmp_fsm_2_4;
end
end
_tmp_fsm_2_4: begin
if(_tmp_49 && myaxi_wvalid && myaxi_wready) begin
_tmp_33 <= _tmp_33 + (_tmp_34 << 2);
end
if(_tmp_49 && myaxi_wvalid && myaxi_wready && (_tmp_35 > 0)) begin
_tmp_fsm_2 <= _tmp_fsm_2_2;
end
if(_tmp_49 && myaxi_wvalid && myaxi_wready && (_tmp_35 == 0)) begin
_tmp_fsm_2 <= _tmp_fsm_2_5;
end
end
_tmp_fsm_2_5: begin
_tmp_51 <= 1;
__tmp_fsm_2_cond_5_0_1 <= 1;
_tmp_fsm_2 <= _tmp_fsm_2_6;
end
_tmp_fsm_2_6: begin
_tmp_fsm_2 <= _tmp_fsm_2_init;
end
endcase
end
end
localparam _tmp_fsm_3_1 = 1;
localparam _tmp_fsm_3_2 = 2;
localparam _tmp_fsm_3_3 = 3;
localparam _tmp_fsm_3_4 = 4;
localparam _tmp_fsm_3_5 = 5;
localparam _tmp_fsm_3_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_3 <= _tmp_fsm_3_init;
_d1__tmp_fsm_3 <= _tmp_fsm_3_init;
_tmp_55 <= 0;
_tmp_57 <= 0;
_tmp_56 <= 0;
__tmp_fsm_3_cond_4_0_1 <= 0;
_tmp_59 <= 0;
_tmp_58 <= 0;
_tmp_64 <= 0;
__tmp_fsm_3_cond_5_1_1 <= 0;
end else begin
_d1__tmp_fsm_3 <= _tmp_fsm_3;
case(_d1__tmp_fsm_3)
_tmp_fsm_3_4: begin
if(__tmp_fsm_3_cond_4_0_1) begin
_tmp_59 <= 0;
end
end
_tmp_fsm_3_5: begin
if(__tmp_fsm_3_cond_5_1_1) begin
_tmp_64 <= 0;
end
end
endcase
case(_tmp_fsm_3)
_tmp_fsm_3_init: begin
if(th_comp == 25) begin
_tmp_fsm_3 <= _tmp_fsm_3_1;
end
end
_tmp_fsm_3_1: begin
_tmp_55 <= (_tmp_53 >> 2) << 2;
_tmp_57 <= _tmp_54;
_tmp_fsm_3 <= _tmp_fsm_3_2;
end
_tmp_fsm_3_2: begin
if((_tmp_57 <= 256) && ((_tmp_55 & 4095) + (_tmp_57 << 2) >= 4096)) begin
_tmp_56 <= 4096 - (_tmp_55 & 4095) >> 2;
_tmp_57 <= _tmp_57 - (4096 - (_tmp_55 & 4095) >> 2);
end else if(_tmp_57 <= 256) begin
_tmp_56 <= _tmp_57;
_tmp_57 <= 0;
end else if((_tmp_55 & 4095) + 1024 >= 4096) begin
_tmp_56 <= 4096 - (_tmp_55 & 4095) >> 2;
_tmp_57 <= _tmp_57 - (4096 - (_tmp_55 & 4095) >> 2);
end else begin
_tmp_56 <= 256;
_tmp_57 <= _tmp_57 - 256;
end
_tmp_fsm_3 <= _tmp_fsm_3_3;
end
_tmp_fsm_3_3: begin
if(myaxi_arready || !myaxi_arvalid) begin
_tmp_fsm_3 <= _tmp_fsm_3_4;
end
end
_tmp_fsm_3_4: begin
__tmp_fsm_3_cond_4_0_1 <= 1;
if(myaxi_rready && myaxi_rvalid) begin
_tmp_58 <= myaxi_rdata;
_tmp_59 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast) begin
_tmp_55 <= _tmp_55 + (_tmp_56 << 2);
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_57 > 0)) begin
_tmp_fsm_3 <= _tmp_fsm_3_2;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_57 == 0)) begin
_tmp_fsm_3 <= _tmp_fsm_3_5;
end
end
_tmp_fsm_3_5: begin
_tmp_64 <= 1;
__tmp_fsm_3_cond_5_1_1 <= 1;
_tmp_fsm_3 <= _tmp_fsm_3_6;
end
_tmp_fsm_3_6: begin
_tmp_fsm_3 <= _tmp_fsm_3_init;
end
endcase
end
end
localparam _tmp_fsm_4_1 = 1;
localparam _tmp_fsm_4_2 = 2;
localparam _tmp_fsm_4_3 = 3;
localparam _tmp_fsm_4_4 = 4;
localparam _tmp_fsm_4_5 = 5;
localparam _tmp_fsm_4_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_4 <= _tmp_fsm_4_init;
_d1__tmp_fsm_4 <= _tmp_fsm_4_init;
_tmp_68 <= 0;
_tmp_70 <= 0;
_tmp_69 <= 0;
__tmp_fsm_4_cond_4_0_1 <= 0;
_tmp_72 <= 0;
_tmp_71 <= 0;
_tmp_77 <= 0;
__tmp_fsm_4_cond_5_1_1 <= 0;
end else begin
_d1__tmp_fsm_4 <= _tmp_fsm_4;
case(_d1__tmp_fsm_4)
_tmp_fsm_4_4: begin
if(__tmp_fsm_4_cond_4_0_1) begin
_tmp_72 <= 0;
end
end
_tmp_fsm_4_5: begin
if(__tmp_fsm_4_cond_5_1_1) begin
_tmp_77 <= 0;
end
end
endcase
case(_tmp_fsm_4)
_tmp_fsm_4_init: begin
if(th_comp == 27) begin
_tmp_fsm_4 <= _tmp_fsm_4_1;
end
end
_tmp_fsm_4_1: begin
_tmp_68 <= (_tmp_66 >> 2) << 2;
_tmp_70 <= _tmp_67;
_tmp_fsm_4 <= _tmp_fsm_4_2;
end
_tmp_fsm_4_2: begin
if((_tmp_70 <= 256) && ((_tmp_68 & 4095) + (_tmp_70 << 2) >= 4096)) begin
_tmp_69 <= 4096 - (_tmp_68 & 4095) >> 2;
_tmp_70 <= _tmp_70 - (4096 - (_tmp_68 & 4095) >> 2);
end else if(_tmp_70 <= 256) begin
_tmp_69 <= _tmp_70;
_tmp_70 <= 0;
end else if((_tmp_68 & 4095) + 1024 >= 4096) begin
_tmp_69 <= 4096 - (_tmp_68 & 4095) >> 2;
_tmp_70 <= _tmp_70 - (4096 - (_tmp_68 & 4095) >> 2);
end else begin
_tmp_69 <= 256;
_tmp_70 <= _tmp_70 - 256;
end
_tmp_fsm_4 <= _tmp_fsm_4_3;
end
_tmp_fsm_4_3: begin
if(myaxi_arready || !myaxi_arvalid) begin
_tmp_fsm_4 <= _tmp_fsm_4_4;
end
end
_tmp_fsm_4_4: begin
__tmp_fsm_4_cond_4_0_1 <= 1;
if(myaxi_rready && myaxi_rvalid) begin
_tmp_71 <= myaxi_rdata;
_tmp_72 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast) begin
_tmp_68 <= _tmp_68 + (_tmp_69 << 2);
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_70 > 0)) begin
_tmp_fsm_4 <= _tmp_fsm_4_2;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_tmp_70 == 0)) begin
_tmp_fsm_4 <= _tmp_fsm_4_5;
end
end
_tmp_fsm_4_5: begin
_tmp_77 <= 1;
__tmp_fsm_4_cond_5_1_1 <= 1;
_tmp_fsm_4 <= _tmp_fsm_4_6;
end
_tmp_fsm_4_6: begin
_tmp_fsm_4 <= _tmp_fsm_4_init;
end
endcase
end
end
localparam _tmp_fsm_5_1 = 1;
localparam _tmp_fsm_5_2 = 2;
localparam _tmp_fsm_5_3 = 3;
localparam _tmp_fsm_5_4 = 4;
localparam _tmp_fsm_5_5 = 5;
localparam _tmp_fsm_5_6 = 6;
always @(posedge CLK) begin
if(RST) begin
_tmp_fsm_5 <= _tmp_fsm_5_init;
_d1__tmp_fsm_5 <= _tmp_fsm_5_init;
_tmp_85 <= 0;
_tmp_87 <= 0;
_tmp_86 <= 0;
_tmp_103 <= 0;
__tmp_fsm_5_cond_5_0_1 <= 0;
end else begin
_d1__tmp_fsm_5 <= _tmp_fsm_5;
case(_d1__tmp_fsm_5)
_tmp_fsm_5_5: begin
if(__tmp_fsm_5_cond_5_0_1) begin
_tmp_103 <= 0;
end
end
endcase
case(_tmp_fsm_5)
_tmp_fsm_5_init: begin
if(th_comp == 40) begin
_tmp_fsm_5 <= _tmp_fsm_5_1;
end
end
_tmp_fsm_5_1: begin
_tmp_85 <= (_tmp_83 >> 2) << 2;
_tmp_87 <= _tmp_84;
_tmp_fsm_5 <= _tmp_fsm_5_2;
end
_tmp_fsm_5_2: begin
if((_tmp_87 <= 256) && ((_tmp_85 & 4095) + (_tmp_87 << 2) >= 4096)) begin
_tmp_86 <= 4096 - (_tmp_85 & 4095) >> 2;
_tmp_87 <= _tmp_87 - (4096 - (_tmp_85 & 4095) >> 2);
end else if(_tmp_87 <= 256) begin
_tmp_86 <= _tmp_87;
_tmp_87 <= 0;
end else if((_tmp_85 & 4095) + 1024 >= 4096) begin
_tmp_86 <= 4096 - (_tmp_85 & 4095) >> 2;
_tmp_87 <= _tmp_87 - (4096 - (_tmp_85 & 4095) >> 2);
end else begin
_tmp_86 <= 256;
_tmp_87 <= _tmp_87 - 256;
end
_tmp_fsm_5 <= _tmp_fsm_5_3;
end
_tmp_fsm_5_3: begin
if(myaxi_awready || !myaxi_awvalid) begin
_tmp_fsm_5 <= _tmp_fsm_5_4;
end
end
_tmp_fsm_5_4: begin
if(_tmp_101 && myaxi_wvalid && myaxi_wready) begin
_tmp_85 <= _tmp_85 + (_tmp_86 << 2);
end
if(_tmp_101 && myaxi_wvalid && myaxi_wready && (_tmp_87 > 0)) begin
_tmp_fsm_5 <= _tmp_fsm_5_2;
end
if(_tmp_101 && myaxi_wvalid && myaxi_wready && (_tmp_87 == 0)) begin
_tmp_fsm_5 <= _tmp_fsm_5_5;
end
end
_tmp_fsm_5_5: begin
_tmp_103 <= 1;
__tmp_fsm_5_cond_5_0_1 <= 1;
_tmp_fsm_5 <= _tmp_fsm_5_6;
end
_tmp_fsm_5_6: begin
_tmp_fsm_5 <= _tmp_fsm_5_init;
end
endcase
end
end
endmodule
module ram_a
(
input CLK,
input [10-1:0] ram_a_0_addr,
output [32-1:0] ram_a_0_rdata,
input [32-1:0] ram_a_0_wdata,
input ram_a_0_wenable
);
reg [10-1:0] ram_a_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_a_0_wenable) begin
mem[ram_a_0_addr] <= ram_a_0_wdata;
end
ram_a_0_daddr <= ram_a_0_addr;
end
assign ram_a_0_rdata = mem[ram_a_0_daddr];
endmodule
module ram_b
(
input CLK,
input [10-1:0] ram_b_0_addr,
output [32-1:0] ram_b_0_rdata,
input [32-1:0] ram_b_0_wdata,
input ram_b_0_wenable
);
reg [10-1:0] ram_b_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_b_0_wenable) begin
mem[ram_b_0_addr] <= ram_b_0_wdata;
end
ram_b_0_daddr <= ram_b_0_addr;
end
assign ram_b_0_rdata = mem[ram_b_0_daddr];
endmodule
module ram_c
(
input CLK,
input [10-1:0] ram_c_0_addr,
output [32-1:0] ram_c_0_rdata,
input [32-1:0] ram_c_0_wdata,
input ram_c_0_wenable
);
reg [10-1:0] ram_c_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_c_0_wenable) begin
mem[ram_c_0_addr] <= ram_c_0_wdata;
end
ram_c_0_daddr <= ram_c_0_addr;
end
assign ram_c_0_rdata = mem[ram_c_0_daddr];
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_stream_constant.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| [
"shta.ky1018@gmail.com"
] | shta.ky1018@gmail.com |
7aa16390fac2a04b1be129c306ea0507fc300de1 | 6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4 | /tests/python/gpu/test_numpy_fallback.py | dc367b03139c0fcb872418d3db2dada7314dd41b | [
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib"
] | permissive | yajiedesign/mxnet | 5a495fd06dd1730c17d2d27d7e46c8a770847f17 | 8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51 | refs/heads/master | 2021-03-30T22:37:18.603396 | 2020-10-23T06:40:17 | 2020-10-23T06:40:17 | 43,763,550 | 214 | 59 | Apache-2.0 | 2020-06-01T23:31:15 | 2015-10-06T16:36:40 | C++ | UTF-8 | Python | false | false | 4,728 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import
from distutils.version import StrictVersion
import sys
import pytest
import itertools
import numpy as _np
import platform
import mxnet as mx
import scipy.stats as ss
import scipy.special as scipy_special
from mxnet import np, npx
from mxnet.base import MXNetError
from mxnet.test_utils import assert_almost_equal, use_np, set_default_context
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import assertRaises
import random
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf
from mxnet.numpy_op_signature import _get_builtin_op
from mxnet.util import numpy_fallback
set_default_context(mx.gpu(0))
@use_np
@pytest.mark.serial
def test_np_fallback_decorator():
@numpy_fallback
def dnp_func(a, b=None, split_inputs=(), ret_type=list):
"""
Dummy Doc:
dnp_func is using the same np.xxx operators
"""
ret_lst = []
# unsupported indexing case
ret_lst.append(a[:,a[1,:]>0])
# unsupported operator
ret_lst.append(np.nonzero(b))
# unsupported operator case
ret_lst.append(tuple(np.split(split_inputs[0], split_inputs[1])))
return ret_type(ret_lst)
def onp_func(a, b=None, split_inputs=(), ret_type=list):
ret_lst = []
ret_lst.append(a[:,a[1,:]>0])
ret_lst.append(_np.nonzero(b))
ret_lst.append(tuple(_np.split(split_inputs[0], split_inputs[1])))
return ret_type(ret_lst)
def get_indices(axis_size):
if axis_size is 0:
axis_size = random.randint(3, 6)
samples = random.randint(1, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
return indices
ret_type = list if random.uniform(0.0, 1.0) > 0.5 else tuple
mx_a = np.array([[1,2,3],[3,4,5]])
np_b = _np.random.uniform(size=(3, 4)) > 0.5
mx_b = np.array(np_b, dtype=np_b.dtype)
mx_c_len = random.randint(5, 20)
mx_c = np.random.uniform(size=(mx_c_len,))
mx_indices = np.array(get_indices(mx_c_len), dtype=np.int64)
assert dnp_func.__doc__ is not None
assert 'onp' not in dnp_func.__doc__
fallback_ret = dnp_func(mx_a, b=mx_b, split_inputs=(mx_c, mx_indices), ret_type=ret_type)
onp_ret = onp_func(mx_a.asnumpy(), b=mx_b.asnumpy(), split_inputs=(mx_c.asnumpy(), mx_indices.asnumpy()), ret_type=ret_type)
for fallback_out, onp_out in zip(fallback_ret, onp_ret):
if isinstance(fallback_out, (list, tuple)):
for fallback_item, onp_item in zip(fallback_out, onp_out):
assert fallback_item.ctx == mx.context.current_context(), "incorrect output context %s vs desired %s" % (str(fallback_item.ctx), str(mx.context.current_context()))
assert isinstance(fallback_item, np.ndarray)
assert_almost_equal(fallback_item.asnumpy(), onp_item, rtol=1e-3, atol=1e-5, equal_nan=False)
else:
assert fallback_out.ctx == mx.context.current_context(), "incorrect output context %s vs desired %s" % (str(fallback_out.ctx), str(mx.context.current_context()))
assert isinstance(fallback_out, np.ndarray)
assert_almost_equal(fallback_out.asnumpy(), onp_out, rtol=1e-3, atol=1e-5, equal_nan=False)
# does not support mixed-context inputs
assertRaises(AssertionError, dnp_func, mx_a.as_in_ctx(npx.cpu(0)), b=mx_b, split_inputs=(mx_c, mx_indices), ret_type=ret_type)
assertRaises(AssertionError, dnp_func, mx_a, b=mx_b,
split_inputs=(mx_c.as_in_ctx(npx.cpu(0)), mx_indices.as_in_ctx(npx.gpu(0))), ret_type=ret_type)
@numpy_fallback
def empty_ret_func():
return
# does not support functions with no return values
assertRaises(ValueError, empty_ret_func)
| [
"noreply@github.com"
] | yajiedesign.noreply@github.com |
6c72aa4b88d463eb65057a9ae6b0380975d34e05 | 5a169ab7b1a9eb419a0f46a2fbdc169b0dac8ecb | /cdhweb/blog/apps.py | f751cb8f1fd4d700d8c238e70a78c5f09a78ce3e | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | Princeton-CDH/cdh-web | b1db47f7c11c6eced71aaee8ddf078eeb84a19d8 | b065d69644147c5b88d7dafb3d7180c3da3ef45f | refs/heads/main | 2023-09-01T09:17:17.336396 | 2023-08-03T20:31:05 | 2023-08-03T20:31:05 | 71,808,328 | 1 | 5 | Apache-2.0 | 2023-09-14T14:04:47 | 2016-10-24T16:23:57 | Python | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class BlogConfig(AppConfig):
name = "cdhweb.blog"
| [
"rebecca.s.koeser@princeton.edu"
] | rebecca.s.koeser@princeton.edu |
4bf22539537a8f1f789ed31e664cd633e8de6836 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02897/s654715745.py | abe3e4e772cdeb9a4c7fc71fa90cc9c995ff8bf6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | n=float(input())
if int(n)%2==0:
print(1/2)
else:
print(float(((n+1)/(2*n)))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4d30ab38156924fdc3bad48d9760d204c8fdebdc | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/bugs/core_4165_test.py | d2422778b5f53e4956446b0f1a4dd1f69a715d89 | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 1,993 | py | #coding:utf-8
"""
ID: issue-4492
ISSUE: 4492
TITLE: Replace the hierarchical union execution with the plain one
DESCRIPTION:
JIRA: CORE-4165
FBTEST: bugs.core_4165
"""
import pytest
from firebird.qa import *
init_script = """
recreate table t1(id int);
recreate table t2(id int);
recreate table t3(id int);
commit;
insert into t1 select rand()*100 from rdb$types,rdb$types;
commit;
insert into t2 select * from t1;
insert into t3 select * from t1;
commit;
"""
db = db_factory(init=init_script)
test_script = """
set planonly;
set explain on;
select 0 i from t1
union all
select 1 from t1
union all
select 2 from t1
;
select 0 i from t2
union
select 1 from t2
union
select 2 from t2
;
select 0 i from t3
union distinct
select 1 from t3
union all
select 2 from t3
;
-- Note: values in 'record length' and 'key length' should be suppressed
-- because they contain not only size of field(s) but also db_key.
"""
act = isql_act('db', test_script, substitutions=[('record length.*', ''), ('key length.*', '')])
expected_stdout = """
Select Expression
-> Union
-> Table "T1" Full Scan
-> Table "T1" Full Scan
-> Table "T1" Full Scan
Select Expression
-> Unique Sort (record length: 52, key length: 8)
-> Union
-> Table "T2" Full Scan
-> Table "T2" Full Scan
-> Table "T2" Full Scan
Select Expression
-> Union
-> Unique Sort (record length: 44, key length: 8)
-> Union
-> Table "T3" Full Scan
-> Table "T3" Full Scan
-> Table "T3" Full Scan
"""
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
5d25234ef3941728dcf320cbda2aeea1d13e6d35 | a672ac356faa8743a78703812ce41eb48fc0f99f | /tests/contrib/falcon/test_middleware.py | 68a9614099575934169190628353ac591579082e | [] | permissive | dailymotion/dd-trace-py | 2dd0b23aac89b60d7b40a74692e210a9b8778e94 | 47ecf1d805bbdff3579a7d644595ac083af04c70 | refs/heads/master | 2023-04-13T19:56:00.888553 | 2018-12-05T21:27:46 | 2018-12-05T21:27:46 | 84,096,497 | 0 | 1 | BSD-3-Clause | 2023-04-07T00:17:31 | 2017-03-06T16:38:57 | Python | UTF-8 | Python | false | false | 543 | py | from falcon import testing
from tests.test_tracer import get_dummy_tracer
from .app import get_app
from .test_suite import FalconTestCase
class MiddlewareTestCase(testing.TestCase, FalconTestCase):
"""Executes tests using the manual instrumentation so a middleware
is explicitly added.
"""
def setUp(self):
super(MiddlewareTestCase, self).setUp()
# build a test app with a dummy tracer
self._service = 'falcon'
self.tracer = get_dummy_tracer()
self.api = get_app(tracer=self.tracer)
| [
"emanuele.palazzetti@datadoghq.com"
] | emanuele.palazzetti@datadoghq.com |
e614b59b4db80db0b7fa97d34be01f68a4243409 | d4fa331d7d8a00865f99ee2c05ec8efc0468fb63 | /alg/unique_path.py | b28469b75a95f6b64b6d410929fd04451d54d2e0 | [] | no_license | nyannko/leetcode-python | 5342620c789a02c7ae3478d7ecf149b640779932 | f234bd7b62cb7bc2150faa764bf05a9095e19192 | refs/heads/master | 2021-08-11T04:11:00.715244 | 2019-02-05T15:26:43 | 2019-02-05T15:26:43 | 145,757,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
table = [[-1] * n for _ in range(m)]
table[0][0] = 1
def dfs(m, n):
if m < 0 or n < 0:
return 0
if m == 0 and n == 0:
return table[0][0]
if table[m][n] != -1:
return table[m][n]
else:
table[m][n] = dfs(m - 1, n) + dfs(m, n - 1)
return table[m][n]
def dfs1(m, n):
if table[m][n] == -1:
if m < 0 or n < 0:
return 0
if m == 0 and n == 0:
return table[0][0]
table[m][n] = dfs(m - 1, n) + dfs(m, n - 1)
return table[m][n]
return dfs1(m - 1, n - 1)
a = Solution()
print(a.uniquePaths(3, 2))
| [
"9638293+nyannko@users.noreply.github.com"
] | 9638293+nyannko@users.noreply.github.com |
4e3b0e3d395a1dc1c02f2d1d43fa390d3354c17a | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/core/arrays/period.py | fe78481d99d30290ce2cd487ce98a865332146dd | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 32,396 | py | from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Type, Union
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Timedelta,
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
period as libperiod,
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.offsets import Tick, delta_to_tick
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
PeriodMixin,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._typing import AnyArrayLike
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
def _field_accessor(name: str, docstring=None):
def f(self):
base = self.freq._period_dtype_code
result = get_period_field_arr(name, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray":
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls: Type["PeriodArray"],
scalars: Union[Sequence[Optional[Period]], AnyArrayLike],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> "PeriodArray":
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(
cls, strings, dtype=None, copy=False
) -> "PeriodArray":
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray":
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value: Union[Period, NaTType]) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self) -> PeriodDtype:
return self._dtype
# error: Read-only property cannot override read-write property [misc]
@property # type: ignore
def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
return ~self._isnan
# This will raise TypeError for non-object dtypes
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"weekday",
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"day_of_year",
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear")
days_in_month = _field_accessor(
"days_in_month",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self) -> np.ndarray:
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod.validate_end_alias(how)
end = how == "E"
if end:
if freq == "B" or self.freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
freq = self._get_to_timestamp_base()
base = freq
else:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
return DatetimeArray(new_data)._with_freq("infer")
# --------------------------------------------------------------------
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def asfreq(self, freq=None, how: str = "E") -> "PeriodArray":
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
A frequency.
how : str {'E', 'S'}
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
Period Array/Index
Constructed with the new frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod.validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1 = self.freq._period_dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.freq.n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
if boxed:
return str
return "'{}'".format
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy: bool = True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = algos.checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any],
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
if other.base != self.freq.base:
raise raise_on_incompatible(self, other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
return super()._add_timedeltalike_scalar(other)
def _add_timedelta_arraylike(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
ordinals = self._addsub_int_array(delta, operator.add).asi8
return type(self)(ordinals, dtype=self.dtype)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
base_nanos = self.freq.base.nanos
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Union[Sequence[Optional[Period]], AnyArrayLike],
freq: Optional[Union[str, Tick]] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
data_dtype = getattr(data, "dtype", None)
if is_datetime64_dtype(data_dtype):
return PeriodArray._from_datetime64(data, freq)
if is_period_dtype(data_dtype):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
data = list(data)
data = np.asarray(data)
dtype: Optional[PeriodDtype]
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = Period._maybe_convert_freq(freq)
if isinstance(data, (ABCIndexClass, ABCSeries)):
data = data._values
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is not None:
freq = to_offset(freq)
mult = freq.n
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
if start is NaT or end is NaT:
raise ValueError("start and end must not be NaT")
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError("Could not infer freq from start/end")
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
)
else:
data = np.arange(
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(
year=None,
month=None,
quarter=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = to_offset("Q")
base = FreqGroup.FR_QTR
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
if base != FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = libperiod.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError("Mismatched Period array lengths")
elif length is None:
length = len(x)
arrays = [
np.asarray(x)
if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length)
for x in fields
]
return arrays
| [
"kd619@ic.ac.uk"
] | kd619@ic.ac.uk |
1bd9fba50c8d65cd8d1f749fbeb322b3d5990748 | 12a21462d6cdb37ff7336d498a75f578a8ec7959 | /lib/public/load_cases.py | 15ebcfc8940fd78877551e1317ae4af284186e82 | [
"MIT",
"Python-2.0"
] | permissive | bushidosds/MeteorTears | b1c23331aed6158662e6d544dbf71df2b10ef78d | cde3151b42e9ccae3c58e45233b637808c152571 | refs/heads/master | 2020-05-25T02:16:25.171245 | 2019-05-17T10:32:17 | 2019-05-17T10:32:17 | 187,575,106 | 1 | 0 | MIT | 2019-05-20T05:47:01 | 2019-05-20T05:47:01 | null | UTF-8 | Python | false | false | 1,611 | py | # -*- coding:utf-8 -*-
import yaml
from lib.utils import fp
from lib.public import logger
from lib.utils import exceptions
class LoadCase(object):
def __init__(self, path: str = None):
self.path = path
def get_all_files(self) -> list:
"""
返回文件目录路径下全部文件列表
:Usage:
get_all_files()
"""
return fp.iter_files(self.path)
@property
def __get_files_name(self) -> list:
"""
返回文件目录下的文件名
:Usage:
__get_files_name
"""
return fp.iter_files(self.path, otype='name')
def load_files(self) -> list:
"""
加载文件
:Usage:
load_files()
"""
files_list = []
for index, file in enumerate(self.get_all_files()):
class_name = self.__get_files_name[index].split('.')[0].title().replace('_', '')
try:
with open(file, encoding='utf-8') as f:
files_list.append({class_name: yaml.safe_load(f)})
except exceptions.JsonLoadingError as err:
logger.log_error(
"Json file parsing error, error file: {0}, error message: {1}".format(
file, err))
return files_list
class Containers(object):
def __init__(self, crop: dict):
self.crop = crop
def __repr__(self):
return "Containers <{}->{}>".format(
self.crop.get('class_name'),
self.crop.get('func_name')
)
if __name__ == '__main__':
pass | [
"546464268@qq.com"
] | 546464268@qq.com |
fa21ef31448dccd7a96df6c42c7e27d93203474f | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/426A/cdf_426A.py | acb2d5c61e1ef8068bbd8310cc8e909a4c400db3 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class CodeforcesTask426ASolution:
def __init__(self):
self.result = ''
self.n_s = []
self.mugs = []
def read_input(self):
self.n_s = [int(x) for x in input().split(" ")]
self.mugs = [int(x) for x in input().split(" ")]
def process_task(self):
self.mugs.sort()
self.result = "YES" if sum(self.mugs[:-1]) <= self.n_s[1] else "NO"
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask426ASolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
95d20cf93f789370811e7bf8b5254408503d39e9 | d95cfcee3fc5825d10d9c930baac94ebe7b9fa13 | /tests/test_pysubstringsearch.py | ece0467bba081ffd33ddde85efc22cb0720d05f8 | [
"MIT"
] | permissive | Intsights/PySubstringSearch | 191723bcd3ed699fe91f552d75f52a3a24f7a61f | 1f027986472c5b3e5d1d0e12e0cf7259def8df9a | refs/heads/master | 2023-01-22T12:18:29.351840 | 2023-01-10T06:41:03 | 2023-01-10T06:41:03 | 231,457,596 | 34 | 3 | MIT | 2023-01-10T06:41:05 | 2020-01-02T20:51:12 | C | UTF-8 | Python | false | false | 8,338 | py | import os
import tempfile
import unittest
import pysubstringsearch
class PySubstringSearchTestCase(
unittest.TestCase,
):
def assert_substring_search(
self,
strings,
substring,
expected_results,
):
try:
with tempfile.TemporaryDirectory() as tmp_directory:
index_file_path = f'{tmp_directory}/output.idx'
writer = pysubstringsearch.Writer(
index_file_path=index_file_path,
)
for string in strings:
writer.add_entry(
text=string,
)
writer.finalize()
reader = pysubstringsearch.Reader(
index_file_path=index_file_path,
)
self.assertCountEqual(
first=reader.search(
substring=substring,
),
second=expected_results,
)
try:
os.unlink(
path=index_file_path,
)
except Exception:
pass
except PermissionError:
pass
def test_file_not_found(
self,
):
with self.assertRaises(
expected_exception=FileNotFoundError,
):
pysubstringsearch.Reader(
index_file_path='missing_index_file_path',
)
def test_sanity(
self,
):
strings = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
]
self.assert_substring_search(
strings=strings,
substring='four',
expected_results=[
'four',
],
)
self.assert_substring_search(
strings=strings,
substring='f',
expected_results=[
'four',
'five',
],
)
self.assert_substring_search(
strings=strings,
substring='our',
expected_results=[
'four',
],
)
self.assert_substring_search(
strings=strings,
substring='aaa',
expected_results=[],
)
def test_edgecases(
self,
):
strings = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'tenten',
]
self.assert_substring_search(
strings=strings,
substring='none',
expected_results=[],
)
self.assert_substring_search(
strings=strings,
substring='one',
expected_results=[
'one',
],
)
self.assert_substring_search(
strings=strings,
substring='onet',
expected_results=[],
)
self.assert_substring_search(
strings=strings,
substring='ten',
expected_results=[
'ten',
'tenten',
],
)
def test_unicode(
self,
):
strings = [
'رجعوني عنيك لأيامي اللي راحوا',
'علموني أندم على الماضي وجراحه',
'اللي شفته قبل ما تشوفك عنيه',
'عمر ضايع يحسبوه إزاي عليّ',
'انت عمري اللي ابتدي بنورك صباحه',
'قد ايه من عمري قبلك راح وعدّى',
'يا حبيبي قد ايه من عمري راح',
'ولا شاف القلب قبلك فرحة واحدة',
'ولا داق في الدنيا غير طعم الجراح',
'ابتديت دلوقت بس أحب عمري',
'ابتديت دلوقت اخاف لا العمر يجري',
'كل فرحه اشتاقها من قبلك خيالي',
'التقاها في نور عنيك قلبي وفكري',
'يا حياة قلبي يا أغلى من حياتي',
'ليه ما قابلتش هواك يا حبيبي بدري',
'اللي شفته قبل ما تشوفك عنيه',
'عمر ضايع يحسبوه إزاي عليّ',
'انت عمري اللي ابتدي بنورك صباحه',
'الليالي الحلوه والشوق والمحبة',
'من زمان والقلب شايلهم عشانك',
'دوق معايا الحب دوق حبه بحبه',
'من حنان قلبي اللي طال شوقه لحنانك',
'هات عنيك تسرح في دنيتهم عنيه',
'هات ايديك ترتاح للمستهم ايديه',
]
self.assert_substring_search(
strings=strings,
substring='زمان',
expected_results=[
'من زمان والقلب شايلهم عشانك',
],
)
self.assert_substring_search(
strings=strings,
substring='في',
expected_results=[
'هات عنيك تسرح في دنيتهم عنيه',
'التقاها في نور عنيك قلبي وفكري',
'ولا داق في الدنيا غير طعم الجراح',
],
)
self.assert_substring_search(
strings=strings,
substring='حنان',
expected_results=[
'من حنان قلبي اللي طال شوقه لحنانك',
],
)
self.assert_substring_search(
strings=strings,
substring='none',
expected_results=[],
)
def test_multiple_words_string(
self,
):
strings = [
'some short string',
'another but now a longer string',
'more text to add',
]
self.assert_substring_search(
strings=strings,
substring='short',
expected_results=[
'some short string',
],
)
def test_short_string(
self,
):
strings = [
'ab',
]
self.assert_substring_search(
strings=strings,
substring='a',
expected_results=[
'ab',
],
)
def test_multiple_strings(
self,
):
try:
with tempfile.TemporaryDirectory() as tmp_directory:
index_file_path = f'{tmp_directory}/output.idx'
writer = pysubstringsearch.Writer(
index_file_path=index_file_path,
)
for string in [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'tenten',
]:
writer.add_entry(
text=string,
)
writer.finalize()
reader = pysubstringsearch.Reader(
index_file_path=index_file_path,
)
self.assertCountEqual(
first=reader.search_multiple(
substrings=[
'ee',
'ven',
],
),
second=[
'three',
'seven',
],
)
try:
os.unlink(
path=index_file_path,
)
except Exception:
pass
except PermissionError:
pass
| [
"gal@intsights.com"
] | gal@intsights.com |
633e92496b35946000f6bd921841af0b78776164 | 0e8b6f94467c25dd2440f7e2ea1519244e689620 | /MokkaJobs/MokkaGridJobs.py | b46bd21da2de9d0ba74c112d8fce0236cb7084ce | [] | no_license | StevenGreen1/HighEnergyPhotonAnalysis | 97a661eaca2efd00472f1969855c724c9d505369 | 8a82ac57f56aad5bdbe99d4a5afb771592bc1725 | refs/heads/master | 2021-01-10T14:08:50.550184 | 2015-10-12T12:43:47 | 2015-10-12T12:43:47 | 43,491,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,854 | py | # Submit Mokka jobs to the grid: MokkaGridJobs.py
import re
import os
import sys
### ----------------------------------------------------------------------------------------------------
def setGearFile(mokkaSteeringTemplate,gearFile):
mokkaSteeringTemplate = re.sub('GEAR_FILE_XXXX',gearFile,mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def setStartNumber(mokkaSteeringTemplate,startNumber):
mokkaSteeringTemplate = re.sub('START_EVENT_NUMBER_XXXX',str(startNumber),mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def setOutputFile(mokkaSteeringTemplate,outputFile):
mokkaSteeringTemplate = re.sub('OUTPUT_FILE_NAME_XXXX',outputFile,mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def getMokkaVersion(detectorConfigFile):
config = {}
execfile(detectorConfigFile, config)
return config['MokkaVersion']
### ----------------------------------------------------------------------------------------------------
def getMokkaSteeringFileTemplate(baseFileName,detectorConfigFile):
config = {}
execfile(detectorConfigFile, config)
baseFile = open(baseFileName,'r')
mokkaSteeringTemplate = baseFile.read()
baseFile.close()
# Detector Model
mokkaSteeringTemplate = re.sub('DETECTOR_MODEL_XXXX',config['DetectorModel'],mokkaSteeringTemplate)
# Physics List
mokkaSteeringTemplate = re.sub('PHYSICS_LIST_XXXX',config['PhysicsList'],mokkaSteeringTemplate)
# HCal absorber material
mokkaSteeringTemplate = re.sub('HCAL_ABSORBER_MATERIAL_XXXX',str(config['HCalAbsorberMaterial']),mokkaSteeringTemplate)
# HCal cell size
mokkaSteeringTemplate = re.sub('HCAL_CELL_SIZE_XXXX',str(config['HCalCellSize']),mokkaSteeringTemplate)
# Thickness of absorber layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_ABSORBER_LAYER_THICKNESS_XXXX',str(config['HCalAbsorberLayerThickness']),mokkaSteeringTemplate)
# Thickenss of scintillator layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_SCINTILLATOR_LAYER_THICKNESS_XXXX',str(config['HCalScintillatorThickness']),mokkaSteeringTemplate)
# Number of layers in the HCal
mokkaSteeringTemplate = re.sub('HCAL_NUMBER_OF_LAYERS_XXXX',str(config['NumberHCalLayers']),mokkaSteeringTemplate)
# Coil extra size, has to be varied if expanding HCal
mokkaSteeringTemplate = re.sub('COIL_EXTRA_SIZE_XXXX',str(config['CoilExtraSize']),mokkaSteeringTemplate)
# Strength of B field in tracker
mokkaSteeringTemplate = re.sub('BFIELD_XXXX',str(config['BField']),mokkaSteeringTemplate)
# Outer radius of the tracker/ inner radius of the ECal
mokkaSteeringTemplate = re.sub('TPC_OUTER_RADIUS_XXXX',str(config['TPCOuterRadius']),mokkaSteeringTemplate)
# Detailed shower mode
mokkaSteeringTemplate = re.sub('DETAILED_SHOWER_MODE_XXXX',config['DetailedShowerMode'],mokkaSteeringTemplate)
return mokkaSteeringTemplate
### ----------------------------------------------------------------------------------------------------
def getHEPEvtFiles(eventType, energy):
hepevtFiles = []
os.system('dirac-ilc-find-in-FC /ilc JobDescription="HEPEvt" Energy=' + str(energy) + ' EvtType="' + eventType + '" > tmp.txt')
with open('tmp.txt') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
line = line.strip()
hepevtFiles.append(line)
os.system('rm tmp.txt')
return hepevtFiles
### ----------------------------------------------------------------------------------------------------
| [
"sg1sg2sg3@hotmail.co.uk"
] | sg1sg2sg3@hotmail.co.uk |
ad1d6246f54fb89f7cf4d0bd153800016048e5b1 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/devices/v20170701/list_iot_hub_resource_keys.py | a406e6b6c9a9cbd4caa7847dbb84ed9eee6cb173 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,753 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListIotHubResourceKeysResult',
'AwaitableListIotHubResourceKeysResult',
'list_iot_hub_resource_keys',
]
@pulumi.output_type
class ListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The next link.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The list of shared access policies.
"""
return pulumi.get(self, "value")
class AwaitableListIotHubResourceKeysResult(ListIotHubResourceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysResult(
next_link=self.next_link,
value=self.value)
def list_iot_hub_resource_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devices/v20170701:listIotHubResourceKeys', __args__, opts=opts, typ=ListIotHubResourceKeysResult).value
return AwaitableListIotHubResourceKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
bd0f247b7986d7d12e7110d7fe7257f8dc06b819 | faabe34af6297530617395bcc6811350765da847 | /platforms/leetcode/DeleteNodesAndReturnForest.py | 4c40784b5befdda322e99dfa179d34fc32592d04 | [] | no_license | pqnguyen/CompetitiveProgramming | 44a542aea299bd553dd022a9e737e087285b8b6d | 27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78 | refs/heads/master | 2021-07-21T12:15:47.366599 | 2021-06-27T14:58:48 | 2021-06-27T14:58:48 | 132,837,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:
to_delete = set(to_delete)
roots = []
if root and root.val not in to_delete: roots.append(root)
self.delNodesUtil(root, set(to_delete), roots)
return roots
def delNodesUtil(self, root, to_delete, roots):
if not root: return None
needDelete = root.val in to_delete
root.left = self.delNodesUtil(root.left, to_delete, roots)
root.right = self.delNodesUtil(root.right, to_delete, roots)
if needDelete:
if root.left: roots.append(root.left)
if root.right: roots.append(root.right)
root.left = root.right = None
return None
return root
| [
"pqnguyen1996@gmail.com"
] | pqnguyen1996@gmail.com |
c8b141e64d1719e48b961907b0984796c4450614 | 4ace4d5a94ab0db79562f1b23edd6011a89148c6 | /src/airflow-stubs/contrib/hooks/dingding_hook.pyi | dab4dfa1201d2ddd2dda0f3a426571174e3d6bae | [
"MIT"
] | permissive | viewthespace/mypy-stubs | 9abebc2eab2b46b2230842f06114673e1a4de052 | 182fa275c4a7011eb5345694b88229adbddcc999 | refs/heads/master | 2023-06-07T18:52:46.739560 | 2023-06-01T22:05:27 | 2023-06-01T22:05:45 | 236,780,299 | 0 | 0 | MIT | 2022-01-11T20:53:55 | 2020-01-28T16:23:07 | Python | UTF-8 | Python | false | false | 532 | pyi | from airflow import AirflowException as AirflowException
from airflow.hooks.http_hook import HttpHook as HttpHook
from typing import Any
class DingdingHook(HttpHook):
message_type: Any
message: Any
at_mobiles: Any
at_all: Any
def __init__(self, dingding_conn_id: str = ..., message_type: str = ..., message: Any | None = ..., at_mobiles: Any | None = ..., at_all: bool = ..., *args, **kwargs) -> None: ...
base_url: Any
def get_conn(self, headers: Any | None = ...): ...
def send(self) -> None: ...
| [
"andrew.marshall@vts.com"
] | andrew.marshall@vts.com |
7919332572d3089ed39adfdcfd8799e6e725cb1d | 9b8ca63a377e6f94cc6a970cc97a6f7f50932811 | /nomitang_affpart/main.py | 3c8900c633842101ee3c2057923d4f1ddb4697b7 | [
"Apache-2.0"
] | permissive | lester-lees/extra_addons_sz | 9b6d2400abe4707b7b18d9e2e9caf2fb366cf3a6 | cddaf972cf4ea64c553bcff0006eb006a115d5ee | refs/heads/master | 2021-01-06T20:43:28.782147 | 2017-08-07T06:51:45 | 2017-08-07T06:51:45 | 99,545,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.addons.website.models.website import slug
class website_offlinestore(http.Controller):
@http.route(['/thanks',], type='http', auth="public", website=True)
def thanks(self, **post):
return request.website.render("nomitang_affpart.nt_thanks_website_view", {'url':''})
| [
"346994202@qq.com"
] | 346994202@qq.com |
7bb418e716f8d3155d546be5ac11af34f04a71ef | b9b7853bd32e6aa6f17b56befd36181e2349a4bd | /venv/Scripts/pip3-script.py | 1226a1d3d2367a0e0ba8a7b05a6a47ff5ae80feb | [] | no_license | bazhenov4job/Grokking_algorithms | e6eb2c0401b3f836d6907f6d95cd0f85399aa33e | 590347467b7183669e96b6c5f77e89f643118703 | refs/heads/master | 2020-12-28T18:40:10.654108 | 2020-02-12T07:52:40 | 2020-02-12T07:52:40 | 238,443,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!"D:\Programming\Grokking Algorithms\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"bazhenov4job@gmail.com"
] | bazhenov4job@gmail.com |
6c54c77da5bee9778fd95a7b143658328e2d1e93 | f9b919ee04754978f739c5516434c581a47c5eec | /music/migrations/0001_initial.py | ac5da28c56ca2e8706d5e74ff2e9a56684a4e237 | [
"MIT"
] | permissive | Hadryan/Music-Genre-Classification-5 | 7f50b9ef6778f96751c4f68391b730603f39a4bc | f67316b0710b2e5ca52e924e7f8254aa7897751b | refs/heads/master | 2022-11-05T12:33:52.208512 | 2020-06-20T14:23:04 | 2020-06-20T14:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Generated by Django 3.0.4 on 2020-05-06 09:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('album_title', models.CharField(max_length=500)),
('genre', models.CharField(max_length=100)),
('album_logo', models.FileField(upload_to='')),
('is_favorite', models.BooleanField(default=False)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_title', models.CharField(max_length=250)),
('audio_file', models.FileField(default='', upload_to='')),
('is_favorite', models.BooleanField(default=False)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| [
"noreply@github.com"
] | Hadryan.noreply@github.com |
56defed19b3fac8299dfa81f38dd0acfcfe3c66f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03131/s164720294.py | c6dae1c13c3e44a1cdba7b1c447617b939afe4a9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | K,A,B=map(int,input().split())
q,r=divmod(K-A+1,2)
print(max(A+q*(B-A)+r,K+1)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d44c6a98ce3c3643012d4716f2421c191aced214 | 3d61905cb470e4918027d2b6d995246d60aab2b4 | /python/brenpysandbox/fbx/icons/icons.py | 4c4886a31942461edadeecb6010a77a12914b1c2 | [] | no_license | brenainnJordan/brenpy-sandbox | 6e36cfba617c4c9c8989bb36b78c3780b9d0959c | 6dd20d8b7722719742613d2efec2c2992fcfdd9a | refs/heads/master | 2020-08-28T17:16:24.041945 | 2020-06-21T19:58:15 | 2020-06-21T19:58:15 | 217,766,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,821 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sat 9. Jun 20:37:12 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
try:
from PySide import QtCore
except ImportError:
from PySide2 import QtCore
qt_resource_data = "\x00\x00\x05\xbe\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x03\x86iTXtXML:com.adobe.xmp\x00\x00\x00\x00\x00<?xpacket begin=\x22\xef\xbb\xbf\x22 id=\x22W5M0MpCehiHzreSzNTczkc9d\x22?> <x:xmpmeta xmlns:x=\x22adobe:ns:meta/\x22 x:xmptk=\x22Adobe XMP Core 5.6-c111 79.158325, 2015/09/10-01:10:20 \x22> <rdf:RDF xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22> <rdf:Description rdf:about=\x22\x22 xmlns:xmpMM=\x22http://ns.adobe.com/xap/1.0/mm/\x22 xmlns:stRef=\x22http://ns.adobe.com/xap/1.0/sType/ResourceRef#\x22 xmlns:xmp=\x22http://ns.adobe.com/xap/1.0/\x22 xmpMM:OriginalDocumentID=\x22xmp.did:292fc24e-e640-4207-9676-069d307baaca\x22 xmpMM:DocumentID=\x22xmp.did:7FBBEC6C317611E69EEFB31E5056D98A\x22 xmpMM:InstanceID=\x22xmp.iid:7FBBEC6B317611E69EEFB31E5056D98A\x22 xmp:CreatorTool=\x22Adobe Photoshop CC 2015 (Macintosh)\x22> <xmpMM:DerivedFrom stRef:instanceID=\x22xmp.iid:cc3e9a49-904f-46f1-b610-9e9e26a0881a\x22 stRef:documentID=\x22adobe:docid:photoshop:9cce669d-6d53-1179-8ecc-8230e80e3713\x22/> </rdf:Description> </rdf:RDF> </x:xmpmeta> <?xpacket end=\x22r\x22?>\xeb\x95\xa0\xe4\x00\x00\x01\xceIDATx\xdab\xfc\xff\xff?\x03>`mm\x1d\x05\xa42\xa1\xdc\xe9G\x8f\x1e]\x86O=\x0b\x03a\x90\xc9\x1d\x16g\x03b|]\xb5\x08DQl\xa0\x0d\xb3\x94,\x9cMH1\x13\x03\x95\x01\xd5\x0ddA\x8b\x80hh\x04pB#`\x0e\x8e\x88J\x81\xaa\xfb\x0eU\xb7\x14W\x18\x02# \xde\x9a\x81\x85\x85\xe1\xc7\xc1]\xb3\x81\x1a\x1d\xb0\x18\xb6\x84YZ.\x9a\xc3\xde\x95\x81\xe1\xcf\x1f`D-\x04\x09\xe34\x90\x9d\x81\x99\x99\x81YL\x82\x81;4\x8e\xe1\xf7\x8d+\xd1?\x0e\xef\x85K2r\xf30p\xd8:G\xb3j\xe8\x80\xf9\x7f_<\x03Q\xac\xc8\x060\x22\xa7C\xa0\xedI@\x03\xe7\xb2\x1b[0\xb0\x99Z30\xb2\xb22\xfc\xff\xf5\x8b\x81\x91\x8d\x0d,\xff\xff7\x90\xcd\xca\x06\xa4\x7f3\xfc:}\x94\xe1\xe7\xd9\x13@S\xff&\x02\xbd\xbc\x00\xab\x81PC\x85\x80T%\x13/_\x09\xbb\x9d\x0b\x03\xab\xaa&\x8a\xfc\xef\xdb\xd7\x19~\x1e\xda\xc3\xf0\xef\xf3\xa7\x1e \xb7\x0dh\xd8{B\xb1\xfc\x1f\x8a\xb1\x83\xff\xf8\xd5\xa1{9\x19\xe8\xe59\x10/[A\xbd\x07\xf1&\xaa\x97\x7f1\xfc:\x05\xf4\xf2\xb9\x93 /'\x00]\xb9\x10\x97\x0b3\xb9\xc3\xe3\x19\xd8\xad\x1c\xc0\x1a\x7f\xdf\xb8\xca\xf0e\xc1t\xb8$\x88\x0d\x12\x03\xc9\xb1[;\x02#.\x96\x01)\x9fc5\xf0'\xd0F\x86\xbf\xaf^0|]\xb3\x98\xe1\xfb\x8e\x0d\xcb\xfe\x7f\xfd\x82\xf0#\x90\x0d\x12\x03\xc9\x81\xd40@|\xf7\x17_^\x9e\xfeu%\xd8\xf5\x1c\xb0\x84\x0d-m\xe0\x00(\x16\x0d\x14;\xf0u\xd9\x5cx\xc2\xc6i P\xf1\x12 \xb5\x84P\xf6\x02\xaa\x9b\x0d\xa4f\x0f\xcd\xc2\x81\x18\x03\x8f\xfc}\xf6\x98\xe1\xef\xb3'`\xdf\x92T\xda\xe0\x00\xd3\xa1%5#z\x04`\x03\x00\x01\x06\x007\xb2\xc2\xd2@\xe1D\x22\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x066\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x03\x86iTXtXML:com.adobe.xmp\x00\x00\x00\x00\x00<?xpacket begin=\x22\xef\xbb\xbf\x22 id=\x22W5M0MpCehiHzreSzNTczkc9d\x22?> <x:xmpmeta xmlns:x=\x22adobe:ns:meta/\x22 x:xmptk=\x22Adobe XMP Core 5.6-c111 79.158325, 2015/09/10-01:10:20 \x22> <rdf:RDF xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22> <rdf:Description rdf:about=\x22\x22 xmlns:xmpMM=\x22http://ns.adobe.com/xap/1.0/mm/\x22 xmlns:stRef=\x22http://ns.adobe.com/xap/1.0/sType/ResourceRef#\x22 xmlns:xmp=\x22http://ns.adobe.com/xap/1.0/\x22 xmpMM:OriginalDocumentID=\x22xmp.did:292fc24e-e640-4207-9676-069d307baaca\x22 xmpMM:DocumentID=\x22xmp.did:7FBBEC70317611E69EEFB31E5056D98A\x22 xmpMM:InstanceID=\x22xmp.iid:7FBBEC6F317611E69EEFB31E5056D98A\x22 xmp:CreatorTool=\x22Adobe Photoshop CC 2015 (Macintosh)\x22> <xmpMM:DerivedFrom stRef:instanceID=\x22xmp.iid:cc3e9a49-904f-46f1-b610-9e9e26a0881a\x22 stRef:documentID=\x22adobe:docid:photoshop:9cce669d-6d53-1179-8ecc-8230e80e3713\x22/> </rdf:Description> </rdf:RDF> </x:xmpmeta> <?xpacket end=\x22r\x22?>o\x7f\x08X\x00\x00\x02FIDATx\xdab\xfc\xff\xff?\x03\xa9\xc0\xda\xda\xba\x02He\x021H\xf3\xf4\xa3G\x8fv\xc2\xe4\x18I5\x10hX\xad\xb2\x9ca\x93\xb3e4\x98\xbf\xfb\xe8\x22\x86\xfbO.U\x03\x0dm\x03\xf1\x99H0\xc8\x06\x88\x97\x02\x99\xf9n\xd6q\x0c|<\xc2`\xecf\x93\xc0\x00u-\x03\xd1\x06\x02\x0d\x02\xaac<\xec\xed\x90\x1e\xc5\xc4\xc4,\xcc\xc0\xc8\x08\x97c\x84\xb0\x99H2\x10\x08\xba\x0c\xb5\x9c\x184\x95-\x18Lt\xdc\x19v\x1dY\xc8\xf0\xe5\xdb\x07 ~\x0fd/\x00\xc9O':\x0c\x81\xaeK\x16\x12\x90\x9c\x13\xeb\xdf\xc0\xc0\xca\xc2\x06\x16;tf\x0d\xc3\x99\xcb;\x18\xfe\xfd\xfb\xfb\x0a\xc8\x9d\x04\x0c\xbfV\xa2\x5c\x084L\x8e\x99\x89e\x8e\xb7}\x1a\xdc0\x10\xb03\x09fp\xb1\x8c\x0117 \x1b\x06\x02,\x04\xbcZje\x14\xc0 .\xa2\x00\x17\xb8\xfb\xe8\x22\xc3\xb1s\xeb\x19^\xbe}8\x0b\xd9\xab8\x0d\x04\xba\xaa\x1c\x1ak\xfc\xfc\xbc\xa2\x02fz\x9e`\xf1\xfbO.3\x1c\x05\x1a\xf4\xe2\xf5\xfd-\xd0\xb4\xb7\x0d\x9b\x0bP\xc2\x10hX\x95\xa2\x8c^\xab+0Y\x80\xc0\xde\xe3K\x198\xd8\xb9\x19\xde\x7fz\xc1\xf0\xec\xe5\x9d\x1dP\x836\xe1\xf3\x12z\x18f\x82\xd2\x15<\x8d\x01\x0d\xbez\xfb\xc8o\xa0a\x07\x81rM\x84\x0c\xc3f \x13#R\x1a\x03\xa57`\xbac\xf5\xb2O\xb5\x07r\x8e\x01}\xc0H\xaa\x81\xd3w\x1f]\x00N_\xa0t\x06Jo\xa0t\xa7\xa5b\xc5`\xa0\xe9\x00\x92\xef&d F:\x04\xba\xa2\x1aH\xe5\x01]&f\xa2\xeb\x01L\x22!`\xf1\xdf\x7f~1,\xde\xd8\xc0\xf0\xee\xc3\xf3\x14\xa0\xd7\xe7\x12\xebB\x06h\xbaZ\xef\x0cLg\xa0\xf4\x06\x03\xa0t\x08J\x8f\xa0t\x09J\x9fD\x1b\x88\xf0\xfa\xc2\x99\x8b74\x82\xd3\x1d\x0c\x80\xd2#(]\x82\xd2'\xd1^F\xf3\xbe\x17(\xe6%D\x15}\xac\x8d\x02\x19\x14et\x19\xfe\xff\xff\xc7\xb0j[\x17\xc3\xe3\x177+\x90\xcbA\x92\xcaC\xa0\xc1~ \x83\xa5\xc4T<\xac\x8d\x03\x19\x04\xf9\xc4\x19\x16\xae\xafc\xf8\xf9\xeb\x9b \xd0\xd0\x0f$\x1b\x88dp\x10\xc8`iqU\x17a\x01)\x86K7\x0f\xa6\x01\x0d\x9cM\xb6\x81\xc8\x85-\x90\xd2D7\x0c\x04\x00\x02\x0c\x00\xa0\x0c\xdfkT\xe1{e\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xd0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x03\x86iTXtXML:com.adobe.xmp\x00\x00\x00\x00\x00<?xpacket begin=\x22\xef\xbb\xbf\x22 id=\x22W5M0MpCehiHzreSzNTczkc9d\x22?> <x:xmpmeta xmlns:x=\x22adobe:ns:meta/\x22 x:xmptk=\x22Adobe XMP Core 5.6-c111 79.158325, 2015/09/10-01:10:20 \x22> <rdf:RDF xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22> <rdf:Description rdf:about=\x22\x22 xmlns:xmpMM=\x22http://ns.adobe.com/xap/1.0/mm/\x22 xmlns:stRef=\x22http://ns.adobe.com/xap/1.0/sType/ResourceRef#\x22 xmlns:xmp=\x22http://ns.adobe.com/xap/1.0/\x22 xmpMM:OriginalDocumentID=\x22xmp.did:292fc24e-e640-4207-9676-069d307baaca\x22 xmpMM:DocumentID=\x22xmp.did:15E95962946F11E68607E45737FB4601\x22 xmpMM:InstanceID=\x22xmp.iid:15E95961946F11E68607E45737FB4601\x22 xmp:CreatorTool=\x22Adobe Photoshop CC 2015 (Macintosh)\x22> <xmpMM:DerivedFrom stRef:instanceID=\x22xmp.iid:44f008fb-0707-480a-8f6c-606db61f25e5\x22 stRef:documentID=\x22adobe:docid:photoshop:9cce669d-6d53-1179-8ecc-8230e80e3713\x22/> </rdf:Description> </rdf:RDF> </x:xmpmeta> <?xpacket end=\x22r\x22?>\xb5YX\x85\x00\x00\x00\xe0IDATx\xdab\xfc\xff\xff?\x035\x01\x13\x03\x95\xc1\xe07\x90\x85\x12\xcd\xd6\xd6\xd6f@*\x13\x88\xa7\x1f=z\xf4\x14\x8a\x81@\xc9\x1c\xa8\xa4\x16\x1e3\xaeB5O\x05\xaa\xcfbee\x9d\xea\xeb\xeb\xcb\xb0n\xdd:\x90\x1c\xd8@FX,\x03\x15\x5c\xcd\xcd\xcd\xd5\x12\x13\x13\xc3i\xda\xabW\xaf\x18&O\x9e\x0c2t\x87\xb8\xb8xqhh(\x03\x90f\xa8\xad\xade\x00Z\xc2\x88\xeee\xbc\x86\x81\x00T^\xdb\xd0\xd0P\x1b\xe42\xa0\x0b\xa9\x13\x86AAA#-\x1d\x82b\xf5\xf7\xef\xdf\x04\x0d\xbc\xf2\xf2\xe5K\xbc\x06\xbdx\xf1\x02D]:\x7f\xfe|\xef\xcc\x993\xc1\xb1\x8e/R\xa6O\x992\x05\x94\x0eu\xf0\x98y\x09\x9a\x0eg\x00\x93\xd9\xfd\x193fL\x01\xc56\x10,\x80)`\xa4\xa4\xb4\xc1\x96S\x18G\x8b\xaf\xc1g @\x80\x01\x00r\xd2Tb\xd4\x95\xd8\x09\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = "\x00\x0f\x0f\xa2?g\x00o\x00u\x00t\x00_\x00l\x00o\x00c\x00a\x00t\x00o\x00r\x00.\x00p\x00n\x00g\x00\x0d\x05\xc3\x94'\x00o\x00u\x00t\x00_\x00j\x00o\x00i\x00n\x00t\x00.\x00p\x00n\x00g\x00\x0e\x05\xcbb\x87\x00o\x00u\x00t\x00_\x00c\x00a\x00m\x00e\x00r\x00a\x00.\x00p\x00n\x00g"
qt_resource_struct = "\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00$\x00\x00\x00\x00\x00\x01\x00\x00\x05\xc2\x00\x00\x00D\x00\x00\x00\x00\x00\x01\x00\x00\x0b\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(
0x01, qt_resource_struct, qt_resource_name, qt_resource_data
)
def qCleanupResources():
QtCore.qUnregisterResourceData(
0x01, qt_resource_struct, qt_resource_name, qt_resource_data
)
qInitResources()
| [
"brenainnjordan@googlemail.com"
] | brenainnjordan@googlemail.com |
a5ff6eb348ef84d8446262dfaac54d71e4608979 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2345/60708/256477.py | 3cb0de279548277ee58180dc06b8c2254444f0e3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | N=eval(input())
for n in range(0,N):
l=eval(input())
listwrong=input().split(" ")
listresult=[]
listright=[]
for i in range(0,l):
listright.append(str(i+1))
for i in range(0,l-1):
if listwrong[i]==listwrong[i+1]:
listresult.append(listwrong[i])
break
if len(listresult)!=1:
listresult.append("0")
for i in range(0,l):
if listright[i]!=listwrong[i]:
listresult.append(listright[i])
break
if len(listresult)!=2:
listresult.append("0")
for j,item in enumerate(listresult):
if j!=len(listresult)-1:
print(item,end=" ")
else:
print(item,end="")
print("") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
578200ca81aacd2162a3509662c44dda5f63f3d6 | 33a50bb13812090a36257078522b798762978c66 | /top/api/rest/LogisticsAddressAddRequest.py | 7d798e9cee0dfaf1051e7c34d3b596dd71dc72e3 | [] | no_license | aa3632840/quanlin | 52ac862073608cd5b977769c14a7f6dcfb556678 | 2890d35fa87367d77e295009f2d911d4b9b56761 | refs/heads/master | 2021-01-10T22:05:14.076949 | 2014-10-25T02:28:15 | 2014-10-25T02:28:15 | 23,178,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | '''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class LogisticsAddressAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.addr = None
self.cancel_def = None
self.city = None
self.contact_name = None
self.country = None
self.get_def = None
self.memo = None
self.mobile_phone = None
self.phone = None
self.province = None
self.seller_company = None
self.zip_code = None
def getapiname(self):
return 'taobao.logistics.address.add'
| [
"262708239@qq.com"
] | 262708239@qq.com |
6a9939922082aade9970368ecb9bd35d3ca06246 | 1297634c6641ec62c31cf30b8fabe1886aa8d9ea | /products_and_services_client/models/loan_interest_rate.py | afbcd7cfed79b47a5e2af0d0a10ef1bcfc3bb79d | [
"MIT"
] | permissive | pitzer42/opbk-br-quickstart | d77f19743fcc264bed7af28a3d956dbc2d20ac1a | b3f86b2e5f82a6090aaefb563614e174a452383c | refs/heads/main | 2023-03-04T13:06:34.205003 | 2021-02-21T23:41:56 | 2021-02-21T23:41:56 | 336,898,721 | 2 | 0 | MIT | 2021-02-07T22:03:15 | 2021-02-07T21:57:06 | null | UTF-8 | Python | false | false | 7,178 | py | # coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from products_and_services_client.models.interest_rate_fee import InterestRateFee # noqa: F401,E501
class LoanInterestRate(InterestRateFee):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'applications': 'list[ApplicationRate]',
'minimum_rate': 'str',
'maximum_rate': 'str'
}
if hasattr(InterestRateFee, "swagger_types"):
swagger_types.update(InterestRateFee.swagger_types)
attribute_map = {
'applications': 'applications',
'minimum_rate': 'minimumRate',
'maximum_rate': 'maximumRate'
}
if hasattr(InterestRateFee, "attribute_map"):
attribute_map.update(InterestRateFee.attribute_map)
def __init__(self, applications=None, minimum_rate=None, maximum_rate=None, *args, **kwargs): # noqa: E501
"""LoanInterestRate - a model defined in Swagger""" # noqa: E501
self._applications = None
self._minimum_rate = None
self._maximum_rate = None
self.discriminator = None
self.applications = applications
self.minimum_rate = minimum_rate
self.maximum_rate = maximum_rate
InterestRateFee.__init__(self, *args, **kwargs)
@property
def applications(self):
"""Gets the applications of this LoanInterestRate. # noqa: E501
Lista das faixas de cobrança da taxa efetiva aplicada pela contratação de crédito # noqa: E501
:return: The applications of this LoanInterestRate. # noqa: E501
:rtype: list[ApplicationRate]
"""
return self._applications
@applications.setter
def applications(self, applications):
"""Sets the applications of this LoanInterestRate.
Lista das faixas de cobrança da taxa efetiva aplicada pela contratação de crédito # noqa: E501
:param applications: The applications of this LoanInterestRate. # noqa: E501
:type: list[ApplicationRate]
"""
if applications is None:
raise ValueError("Invalid value for `applications`, must not be `None`") # noqa: E501
self._applications = applications
@property
def minimum_rate(self):
"""Gets the minimum_rate of this LoanInterestRate. # noqa: E501
Percentual mínimo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:return: The minimum_rate of this LoanInterestRate. # noqa: E501
:rtype: str
"""
return self._minimum_rate
@minimum_rate.setter
def minimum_rate(self, minimum_rate):
"""Sets the minimum_rate of this LoanInterestRate.
Percentual mínimo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:param minimum_rate: The minimum_rate of this LoanInterestRate. # noqa: E501
:type: str
"""
if minimum_rate is None:
raise ValueError("Invalid value for `minimum_rate`, must not be `None`") # noqa: E501
self._minimum_rate = minimum_rate
@property
def maximum_rate(self):
"""Gets the maximum_rate of this LoanInterestRate. # noqa: E501
Percentual máximo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:return: The maximum_rate of this LoanInterestRate. # noqa: E501
:rtype: str
"""
return self._maximum_rate
@maximum_rate.setter
def maximum_rate(self, maximum_rate):
"""Sets the maximum_rate of this LoanInterestRate.
Percentual máximo cobrado (taxa efetiva) no mês de referência, para o Empréstimo contratado A apuração pode acontecer com até 4 casas decimais. O preenchimento deve respeitar as 4 casas decimais, mesmo que venham preenchidas com zeros (representação de porcentagem p.ex: 0.1500. Este valor representa 15%. O valor 1 representa 100%) # noqa: E501
:param maximum_rate: The maximum_rate of this LoanInterestRate. # noqa: E501
:type: str
"""
if maximum_rate is None:
raise ValueError("Invalid value for `maximum_rate`, must not be `None`") # noqa: E501
self._maximum_rate = maximum_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LoanInterestRate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LoanInterestRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"arthurpitzer@id.uff.br"
] | arthurpitzer@id.uff.br |
3a877527b811e3300cf094da3d9842cd84d9119f | 629606ef6e0ce252f74729ac60f57ca8805c3c78 | /hw_001_Django/hw_009_test1/venv/bin/pip | 35101f0af30f654e8feaaf103ad4040a374ec8aa | [] | no_license | LeeXyan/lxgzhw006 | cc31024874725f60b766c9d5d24c2dafc66b8de3 | 621a73544262df7e104806579242deeaa8dbe2c2 | refs/heads/master | 2021-10-10T17:41:52.381843 | 2019-01-15T00:25:08 | 2019-01-15T00:25:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | #!/home/lxgzhw/PythonWork/hw_001_Django/hw_009_test1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"1156956636@qq.com"
] | 1156956636@qq.com | |
0d21f18658bf890b75fc662137cf8b561bcd829b | 059e13f143a56ffe091c3181000c6928a14e2931 | /gen_of_passwords/asgi.py | 4bfcef1cf41b7ba7a6afb7dd84b04e0bb3c4f9e2 | [] | no_license | bhobbs20/Password-Generator | 5b4fea8720c4b3f36bb129e87e3a0312247d17ea | 043be29289e9c217a9c2db2dacfd219f8bed11fc | refs/heads/master | 2022-12-29T05:18:45.905247 | 2020-10-13T19:21:27 | 2020-10-13T19:21:27 | 303,807,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for gen_of_passwords project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gen_of_passwords.settings')
application = get_asgi_application()
| [
"brianhobbs216@gmail.com"
] | brianhobbs216@gmail.com |
dcb68361f62778ad7774c7e26665ce66d6246006 | 0378a2f1adad86f439ce214ebfe2a904cda6eb41 | /badball/migrations/0059_auto_20181203_0912.py | f113b7a5add80e8fe4053ec5329a7e873956d941 | [] | no_license | jeremyjbowers/badball | 2035902b5f8d2bc05219af887bd3e1bfcb45192b | 323289ec871e0e7e98e397c9d528d83773c86f85 | refs/heads/master | 2020-12-20T22:08:53.083230 | 2020-01-25T23:09:11 | 2020-01-25T23:09:11 | 236,222,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # Generated by Django 2.0.8 on 2018-12-03 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('badball', '0058_team_championships'),
]
operations = [
migrations.AlterField(
model_name='tradereceipt',
name='picks',
field=models.ManyToManyField(blank=True, null=True, related_name='picks', to='badball.DraftPick'),
),
migrations.AlterField(
model_name='tradereceipt',
name='players',
field=models.ManyToManyField(blank=True, null=True, related_name='players', to='badball.Player'),
),
]
| [
"jeremyjbowers@gmail.com"
] | jeremyjbowers@gmail.com |
c33cd22e723b9f23db21dcc4d74c7254b66bddb4 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Programming_for_Computations/osc_odespy.py | 738ce8a9cd8f905bd423e40bb2c2d69a7eacb3ec | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 2,100 | py | """Use odespy to solve undamped oscillation ODEs."""
import odespy
from matplotlib.pyplot import \
plot, savefig, legend, xlabel, figure, title, hold, axis, show
def f(u, t, omega=2):
v, u = u
return [-omega**2*u, v]
def compare(odespy_methods,
omega,
X_0,
number_of_periods,
time_intervals_per_period=20):
from numpy import pi, linspace, cos
P = 2*pi/omega # length of one period
dt = P/time_intervals_per_period
T = number_of_periods*P
# If odespy_methods is not a list, but just the name of
# a single Odespy solver, we wrap that name in a list
# so we always have odespy_methods as a list
if type(odespy_methods) != type([]):
odespy_methods = [odespy_methods]
# Make a list of solver objects
solvers = [method(f, f_args=[omega]) for method in
odespy_methods]
for solver in solvers:
solver.set_initial_condition([0, X_0])
# Compute the time points where we want the solution
dt = float(dt) # avoid integer division
N_t = int(round(T/dt))
time_points = linspace(0, N_t*dt, N_t+1)
legends = []
for solver in solvers:
sol, t = solver.solve(time_points)
v = sol[:,0]
u = sol[:,1]
# Plot only the last p periods
p = 6
m = p*time_intervals_per_period # no time steps to plot
plot(t[-m:], u[-m:])
hold('on')
legends.append(solver.name())
xlabel('t')
# Plot exact solution too
plot(t[-m:], X_0*cos(omega*t)[-m:], 'k--')
legends.append('exact')
legend(legends, loc='lower left')
axis([t[-m], t[-1], -2*X_0, 2*X_0])
title('Simulation of %d periods with %d intervals per period'
% (number_of_periods, time_intervals_per_period))
savefig('tmp.pdf'); savefig('tmp.png')
show()
compare(
odespy_methods=[
odespy.EulerCromer,
#odespy.BackwardEuler,
odespy.RKFehlberg,
],
omega=2, X_0=2,
number_of_periods=200,
time_intervals_per_period=240)
| [
"bb@b.om"
] | bb@b.om |
82905322ff92eb35452700ad5dc945091769f7b8 | 64182f24837437f00d2676d0b88f385bff29ecdd | /skflow/ops/__init__.py | bfe55da03ec838382cf75e837869c94b5f529222 | [
"Apache-2.0"
] | permissive | riyazbhat/skflow | b413fb13027074c7bfcb426e401b07e50e04aa28 | e82fccc9cc27b535a60b4e13a0b4251269c858cc | refs/heads/master | 2020-12-29T00:56:39.206565 | 2015-12-06T02:43:41 | 2015-12-06T02:43:41 | 47,548,464 | 2 | 0 | null | 2015-12-07T11:36:47 | 2015-12-07T11:36:46 | null | UTF-8 | Python | false | false | 704 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conv_ops import *
from dnn_ops import *
from embeddings_ops import *
from losses_ops import *
| [
"ilblackdragon@gmail.com"
] | ilblackdragon@gmail.com |
6566b8387a6df52a45b46c0e1a4d58d513a0c009 | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/fb_post_learning/fb_post_v2/tests/storages/test_reply_to_comment_post.py | d4c7d3954ef46369b70fa8885366f43143a54278 | [] | no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | {"filter":false,"title":"test_reply_to_comment_post.py","tooltip":"/fb_post_learning/fb_post_v2/tests/storages/test_reply_to_comment_post.py","undoManager":{"mark":0,"position":0,"stack":[[{"start":{"row":0,"column":0},"end":{"row":0,"column":2},"action":"insert","lines":["# "],"id":2},{"start":{"row":1,"column":0},"end":{"row":1,"column":2},"action":"insert","lines":["# "]},{"start":{"row":2,"column":0},"end":{"row":2,"column":2},"action":"insert","lines":["# "]},{"start":{"row":5,"column":0},"end":{"row":5,"column":2},"action":"insert","lines":["# "]},{"start":{"row":7,"column":0},"end":{"row":7,"column":2},"action":"insert","lines":["# "]},{"start":{"row":9,"column":0},"end":{"row":9,"column":2},"action":"insert","lines":["# "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":2},"action":"insert","lines":["# "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":2},"action":"insert","lines":["# "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":2},"action":"insert","lines":["# "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":2},"action":"insert","lines":["# "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":2},"action":"insert","lines":["# "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":2},"action":"insert","lines":["# "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":2},"action":"insert","lines":["# "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":2},"action":"insert","lines":["# "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":2},"action":"insert","lines":["# "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":2},"action":"insert","lines":["# "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":2},"action":"insert","lines":["# "]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":22,"column":35},"end":{"row":22,"column":35},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1590076923165,"hash":"4536db45ba765ad5873d92a8b6695b1b0c12e357"} | [
"senammohanakrishna@gmail.com"
] | senammohanakrishna@gmail.com |
82e1ac853cc31945253c155f19edfcdbcc2ff2ce | 71e8bdddd84338bbb2d77934351d76251c2fd77d | /best-time-to-buy-and-sell-stock-iv.py | d811820af9e000057e587a39aa06076967a8a1a7 | [] | no_license | onestarshang/leetcode | 3da20fbec1b42d3565eb95a64ea3f30c29f1e1eb | 0a7aa09a2b95e4caca5b5123fb735ceb5c01e992 | refs/heads/master | 2021-01-09T06:00:06.018037 | 2016-12-17T16:17:49 | 2016-12-17T16:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | '''
https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv/
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
'''
class Solution(object):
def maxProfit(self, k, prices): # O(kn) in worst case
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n == 0:
return 0
if k >= n / 2:
ans = 0
for i in xrange(1, n):
if prices[i] > prices[i - 1]:
ans += prices[i] - prices[i - 1]
return ans
d = [[0 for j in xrange(n)] for i in xrange(k + 1)]
for t in xrange(1, k + 1):
max_d = -(1 << 31)
for i in xrange(n):
# d[t][i] = max(d[t][i], d[t][i - 1], d[t - 1][j] + prices[i] - prices[j])
d[t][i] = max(d[t][i], d[t][i - 1], max_d + prices[i])
max_d = max(max_d, d[t - 1][i] - prices[i])
return d[k][n - 1]
if __name__ == '__main__':
f = Solution().maxProfit
assert f(2, [1, 4, 2]) == 3
| [
"irachex@gmail.com"
] | irachex@gmail.com |
80593f47310c42501c062cf336ec8a03b7cc05fb | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1576.reorder-routes-to-make-all-paths-lead-to-the-city-zero/1576.reorder-routes-to-make-all-paths-lead-to-the-city-zero.py | 0b9722ea1630b49eec25f9439a0d93b492c11a58 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | class Solution:
def minReorder(self, n: int, connections: List[List[int]]) -> int:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
c87a4af03eb9fa29c6d0bbf0cbccdba9ae574442 | c4d9bdeb5353c6dd014f7c3f8d1f6380a76402af | /pylibviso2/node.py | 322165a8990a261c071e8120c8f71250ff8322d9 | [] | no_license | AtlasBuggy/libviso2-python | 28f390b7f516d7abe5c3acfdff5544d47788f726 | 5ae736a2e7f2bbe362e839c8d1c9dd2340245909 | refs/heads/master | 2021-03-22T01:09:12.302707 | 2017-10-26T06:37:22 | 2017-10-26T06:37:22 | 108,196,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import time
from atlasbuggy.opencv import OpenCVPipeline
from .viso2 import Viso2Mono
class Viso2MonoPipeline(OpenCVPipeline):
def __init__(self, f, cu, cv, width=None, height=None, enabled=True, logger=None):
# self.set_logger(level=20)
super(Viso2MonoPipeline, self).__init__(enabled, logger=logger)
self.viso2 = Viso2Mono(f, cu, cv, width, height)
self.pose_service = "pose"
self.define_service(self.pose_service, message_type=tuple)
def pipeline(self, image):
status, image = self.viso2.update(image)
if status:
self.broadcast_nowait((self.viso2.x, self.viso2.y, self.viso2.z), self.pose_service)
return image
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
878f42c1659a8c2669d666f0fb1f88e53d6df1ec | 894b8a99a3e05dda63ff156d9a2f3ce81f25c3ba | /imix/evaluation/evaluator_imix.py | dda4bcbfe99888f98db7687b73393101f01022ce | [
"Apache-2.0"
] | permissive | jjInsper/iMIX | e5e46c580e2925fb94a2571c25777ce504ffab14 | 99898de97ef8b45462ca1d6bf2542e423a73d769 | refs/heads/master | 2023-08-08T01:24:47.161948 | 2021-09-16T09:35:35 | 2021-09-16T09:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | import itertools
import json
import logging
import os
import pickle as pkl
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from typing import Dict, Optional
import torch
from ..utils import distributed_info as comm
from ..utils.registry import Registry, build_from_cfg
METRICS = Registry('metric')
DATASET_CONVERTER = Registry('DatasetConverter')
POST_PROCESSOR = Registry('PostProcessor')
def build(cfg, registry, default_args=None):
"""Build a object.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
obj
"""
if isinstance(cfg, list):
objs = [build_from_cfg(_cfg, registry, default_args) for _cfg in cfg]
return objs
elif isinstance(cfg, Dict):
return build_from_cfg(cfg, registry, default_args)
else:
raise TypeError
def get_predictions_and_labels(func):
def wrapper(self, *args, **kwargs):
if self.distributed is False:
predictions = self._predictions
labels = getattr(self, '_labels') if hasattr(self, '_labels') else None
else:
def get_all_data(data):
data_list = comm.gather(data, dst_rank=0)
all_data = list(itertools.chain(*data_list))
return all_data
comm.synchronize()
predictions = get_all_data(self._predictions)
labels = get_all_data(self._labels) if hasattr(self, '_labels') else None
if not comm.is_main_process():
return {}
kwargs['predictions'] = predictions
kwargs['labels'] = labels
return func(self, *args, **kwargs)
return wrapper
class PostProcessor(metaclass=ABCMeta):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.distributed = comm.get_world_size() > 1
@abstractmethod
def reset(self):
pass
@abstractmethod
def convert(self, batch_data, model_outputs):
pass
@abstractmethod
@get_predictions_and_labels
def process(self, *args, **kwargs):
pass
@staticmethod
def list_to_tensor(list_data: list) -> torch.tensor:
# tensor_size = (len(list_data), list_data[0].shape[1])
if not isinstance(list_data[0], dict):
if isinstance(list_data[0], list) or isinstance(list_data[0], str):
tensor_data = list_data
return tensor_data
if len(list_data[0].shape) == 0:
tensor_size = (len(list_data), 1)
elif len(list_data[0].shape) == 1:
tensor_size = (len(list_data), list_data[0].shape[0])
else:
tensor_size = (len(list_data), list_data[0].shape[1])
tensor_dtype = list_data[0].dtype
tensor_data = torch.zeros(size=tensor_size, dtype=tensor_dtype)
for idx, data in enumerate(list_data):
tensor_data[idx] = data
else:
tensor_data = list_data
return tensor_data
@POST_PROCESSOR.register_module()
class Evaluator(PostProcessor):
def __init__(self, metrics, dataset_converters):
super().__init__()
self._metrics: list = self.build_metrics(metrics)
self._dataset_converters: list = self.build_dataset_converters(
dataset_converters=dataset_converters, default_args={'post_process_type': str(self)})
self._labels: list = []
self._predictions: list = []
def reset(self):
self._labels = []
self._predictions = []
@classmethod
def build_metrics(cls, metrics):
metrics = metrics if isinstance(metrics, list) else [metrics]
return build(metrics, METRICS)
@classmethod
def build_dataset_converters(cls, dataset_converters, default_args: Optional[Dict]):
dataset_converters = dataset_converters if isinstance(dataset_converters, list) else [dataset_converters]
return build(dataset_converters, DATASET_CONVERTER, default_args)
def convert(self, batch_data, model_outputs):
for dataset_obj in self._dataset_converters:
model_outputs, labels = dataset_obj.convert(batch_data, model_outputs)
if labels is not None:
self._labels.extend(labels)
self._predictions.extend(model_outputs)
@get_predictions_and_labels
def process(self, *args, **kwargs):
predictions, labels = kwargs['predictions'], kwargs['labels']
if len(labels) != 0:
labels = self.list_to_tensor(labels)
predictions = self.list_to_tensor(predictions)
eval_results = {}
for metric_obj in self._metrics:
eval_results[str(metric_obj)] = metric_obj.evaluate(predictions, labels)
self.print_eval_results(eval_results)
return eval_results
def print_eval_results(self, eval_results: Dict) -> None:
for metric_name, metric_value in eval_results.items():
self.logger.info('{}: --> {}'.format(metric_name, metric_value))
def __str__(self):
return 'evaluator'
@POST_PROCESSOR.register_module()
class Submitter(PostProcessor):
def __init__(self,
dataset_converters,
*,
output_dir: str = None,
file_name: str = 'submit_result.json',
post_process_type: Dict = None):
super().__init__()
self._predictions: list = []
self._file_name = file_name
self._output_dir = os.path.abspath('./') if output_dir is None else output_dir
if not os.path.exists(self._output_dir):
os.mkdir(self._output_dir)
post_process_type = {'post_process_type': str(self)} if post_process_type is None else post_process_type
self._save_file_name = os.path.join(self._output_dir, self._file_name)
self._dataset_converters: list = Evaluator.build_dataset_converters(
dataset_converters=dataset_converters, default_args=post_process_type)
def reset(self):
self._predictions = []
def convert(self, batch_data, model_outputs):
for dataset_obj in self._dataset_converters:
section_predictions = dataset_obj.convert(batch_data, model_outputs)
self._predictions.extend(section_predictions)
@get_predictions_and_labels
def process(self, *args, **kwargs):
predictions = kwargs['predictions']
assert len(predictions) > 0, ValueError('predictions are empty!')
with open(self._save_file_name, 'w') as f:
f.write(json.dumps(predictions, indent=2))
self.logger.info('The submit file has been saved to {}'.format(self._save_file_name))
return None
def __str__(self):
return 'submitter'
@POST_PROCESSOR.register_module()
class Predictor(Submitter):
def __init__(self, dataset_converters, *, output_dir: str = None, file_name: str = 'predict_result.pkl'):
super().__init__(
dataset_converters=dataset_converters,
output_dir=output_dir,
file_name=file_name,
post_process_type={'post_process_type': str(self)})
@get_predictions_and_labels
def process(self, *args, **kwargs):
predictions = kwargs['predictions']
assert len(predictions) > 0, ValueError('predictions are empty!')
with open(self._save_file_name, 'wb') as f:
pkl.dump(predictions, f)
self.logger.info('The prediction file has been saved to path {}:'.format(self._save_file_name))
return None
def __str__(self):
return 'predictor'
def build_post_processor(cfg):
return build(cfg, POST_PROCESSOR, default_args=None)
def inference_on_dataset(cfg, model, data_loader):
post_processor = build_post_processor(cfg.post_processor)
post_processor.reset()
logger = logging.getLogger(__name__)
logger.info('Starting inference on {} batch images'.format(len(data_loader)))
with to_inference(model), torch.no_grad():
for idx, batch_data in enumerate(data_loader, start=1):
logger.info('{} running idx: {}/{}'.format(str(post_processor), idx, len(data_loader)))
outputs = model(batch_data)
if torch.cuda.is_available():
torch.cuda.synchronize()
post_processor.convert(batch_data=batch_data, model_outputs=outputs)
results = post_processor.process()
return results if results is not None else {}
@contextmanager
def to_inference(model):
old_mode = model.training
model.eval()
yield
model.train(old_mode)
| [
"hsslab.inspur@gmail.com"
] | hsslab.inspur@gmail.com |
9ee34a36c6cbeac87b5646c5dbbb11eab6be70e7 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/xia2/Wrappers/XDS/XDSIdxrefHelpers.py | 1ece3814e6d03fb1f8d8c1c98da05f3ab386cc0f | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 4,598 | py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from xia2.Experts.LatticeExpert import ApplyLattice
def _parse_idxref_lp_distance_etc(lp_file_lines):
"""Parse the LP file for refined distance, beam centre and so on..."""
beam = None
diatance = None
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if "DETECTOR COORDINATES" in line and "DIRECT BEAM" in line:
beam = tuple(map(float, line.split()[-2:]))
if "CRYSTAL TO DETECTOR" in line:
distance = float(line.split()[-1])
if distance < 0:
distance *= -1
return beam, distance
def _parse_idxref_index_origin(lp_file_lines):
"""Parse the LP file for the possible index origin etc."""
origins = {}
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if "INDEX_" in line and "QUALITY" in line and "DELTA" in line:
while not "SELECTED" in line:
line = lp_file_lines[i]
i += 1
try:
hkl = tuple(map(int, line.split()[:3]))
quality, delta, xd, yd = tuple(map(float, line.split()[3:7]))
origins[hkl] = quality, delta, xd, yd
except Exception:
pass
return origins
raise RuntimeError("should never reach this point")
def _parse_idxref_lp(lp_file_lines):
"""Parse the list of lines from idxref.lp."""
lattice_character_info = {}
i = 0
mosaic = 0.0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
# get the mosaic information
if "CRYSTAL MOSAICITY" in line:
mosaic = float(line.split()[-1])
# get the lattice character information - coding around the
# non-standard possibility of mI, by simply ignoring it!
# bug # 2355
if "CHARACTER LATTICE OF FIT a b c" in line:
# example line (note potential lack of white space between b and c cell parameters):
# 9 hR 999.0 3966.3 5324.610528.6 85.6 64.6 132.0
j = i + 1
while lp_file_lines[j].strip() != "":
l = lp_file_lines[j].replace("*", " ")
character = int(l[:12].strip())
lattice = l[12:23].strip()
fit = float(l[23:32].strip())
cell = tuple(
float(c)
for c in (
l[32:39],
l[39:46],
l[46:53],
l[53:59],
l[59:65],
l[65:71],
)
)
# FIXME need to do something properly about this...
# bug # 2355
if lattice == "mI":
j += 1
continue
# reindex_card = tuple(map(int, record[9:]))
reindex_card = () # XXX need example where this is present in the IDXREF.LP
constrained_cell = ApplyLattice(lattice, cell)[0]
lattice_character_info[character] = {
"lattice": lattice,
"fit": fit,
"cell": constrained_cell,
"mosaic": mosaic,
"reidx": reindex_card,
}
j += 1
return lattice_character_info
def _parse_idxref_lp_subtree(lp_file_lines):
subtrees = {}
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if line.split() == ["SUBTREE", "POPULATION"]:
j = i + 1
line = lp_file_lines[j]
while line.strip():
subtree, population = tuple(map(int, line.split()))
subtrees[subtree] = population
j += 1
line = lp_file_lines[j]
return subtrees
def _parse_idxref_lp_quality(lp_file_lines):
fraction = None
rmsd = None
rmsphi = None
for record in lp_file_lines:
if "OUT OF" in record and "SPOTS INDEXED" in record:
fraction = float(record.split()[0]) / float(record.split()[3])
if "STANDARD DEVIATION OF SPOT POSITION" in record:
rmsd = float(record.split()[-1])
if "STANDARD DEVIATION OF SPINDLE POSITION" in record:
rmsphi = float(record.split()[-1])
return fraction, rmsd, rmsphi
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
154c06a1de6e9daa5b49b9d632fe6d9e1f3aca12 | f549367629d0a7cb04a7b39e5e1231a0cb9facd1 | /meter_mount/cnc/drill.py | a8ccf00b40ca4744656a641e5fdee5ffc29b43f0 | [] | no_license | iorodeo/lasercutter | 9d0a64e549a688eb7efa93d765dab5ed1b753110 | f99dddd183bdd200b2367ef11b4b72fefe82bbbe | refs/heads/master | 2022-11-05T06:59:56.153523 | 2016-01-17T02:25:38 | 2016-01-17T02:25:38 | 273,791,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | from __future__ import print_function
import os
import sys
from py2gcode import gcode_cmd
from py2gcode import cnc_dxf
feedrate = 50.0
fileName = 'meter_mount.dxf'
stockThickness = 0.25
drillMargin = 0.125
startZ = 0.0
stopZ = -(stockThickness + drillMargin)
safeZ = 0.3
stepZ = 0.05
startDwell = 0.5
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
param = {
'fileName' : fileName,
'layers' : ['4-40_THROUGH_HOLE'],
'dxfTypes' : ['CIRCLE'],
'startZ' : startZ,
'stopZ' : stopZ,
'safeZ' : safeZ,
'stepZ' : stepZ,
'startDwell' : startDwell,
}
drill = cnc_dxf.DxfDrill(param)
prog.add(drill)
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.End(),comment=True)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
print('generating: {0}'.format(fileName))
prog.write(fileName)
| [
"will@iorodeo.com"
] | will@iorodeo.com |
d6e362aeef9e06deff41345d07bc7e077179895f | c6ed09339ff21fa70f154f34328e869f0dd8e394 | /python/PIL/img_resize.py | df58e1a603cfe03dbd1eaf110ceb6417453cdd19 | [] | no_license | fits/try_samples | f9b15b309a67f7274b505669db4486b17bd1678b | 0986e22d78f35d57fe1dd94673b68a4723cb3177 | refs/heads/master | 2023-08-22T14:35:40.838419 | 2023-08-07T12:25:07 | 2023-08-07T12:25:07 | 642,078 | 30 | 19 | null | 2022-12-28T06:31:24 | 2010-05-02T02:23:55 | Java | UTF-8 | Python | false | false | 177 | py |
import sys
from PIL import Image
img_file = sys.argv[1]
w = int(sys.argv[2])
h = int(sys.argv[3])
dest_file = sys.argv[4]
Image.open(img_file).resize((w, h)).save(dest_file)
| [
"wadays_wozx@nifty.com"
] | wadays_wozx@nifty.com |
bb624387b9809c5be48f30160e3823420ebc7d8c | 11763b1150a3a05db89c13dcd6152f8fcca87eaa | /designs/nonlinear/permutation/multipermutation.py | 4eadfbacef2804804aa094c272ce232c834a6bb7 | [] | no_license | acad2/crypto | 343c32fa25aaec73e169290579fc3d02c4b226f6 | cb283df4101fcd618a0478a0018273f00d0734ae | refs/heads/master | 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null | UTF-8 | Python | false | false | 1,866 | py | from crypto.utilities import random_bytes
WORD_SIZE_BITS = 32
MASK64 = (2 ** WORD_SIZE_BITS) - 1
STATE_LENGTH = 8
def generate_state(length=STATE_LENGTH):
return range(length)
def generate_key(length=STATE_LENGTH, mask=MASK64):
key_m = [(item & mask) | 1 for item in bytearray(random_bytes(length))]
key_e = [item & mask for item in bytearray(random_bytes(length))]
return key_m + key_e
def permute_columns(state, key, mask=MASK64):
for index, word in enumerate(state):
state[index] = ((word * key[index]) + key[STATE_LENGTH + index]) & mask
def permute_row(state, key):
size = len(state)
for index in range(size - 1):
for index2 in range(index + 1, size):
word1 = state[index]; word2 = state[index2]
word1, word2 = choice_swap(key, word1, word2)
state[index] = word1; state[index2] = word2
def choice(a, b, c):
return c ^ (a & (b ^ c))
def choice_swap(key, word1, word2):
# if key:
# key = 0x63
t = word1
word1 = choice(key, word1, word2)
word2 = choice(key, word2, t)
return word1, word2
# a1x + b1
#a2(a1x + b1) + b2
#a1a2x + a2b1 + b2
#a3(a1a2x + a2b1 + b2) + b3
#a1a2a3x + a2a3b1 + a3b2 + b3
def permutation(state, key=generate_key()):
# permutation /\ 4 1 2 3
# | 3 4 1 2
# \/ 2 3 4 1
# -------
#permutation <--> 1 2 3 4
state = list(state)
# permute_columns(state, key)
permute_row(state, key)
return state
def visualize_permutation():
from crypto.analysis.visualization import test_8x32_function
test_8x32_function(lambda *args: permutation(args), generate_state())
if __name__ == "__main__":
visualize_permutation()
| [
"python_pride@protonmail.com"
] | python_pride@protonmail.com |
07f4280c356519b82898efa367c3a2e25905248c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03033/s453850284.py | f13a451bed63db4a6bf67d8ade18d71786d676fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | def main():
n,q = map(int, input().split())
tl = []
que = []
d = dict()
for _ in range(n):
s,t,x = map(int, input().split())
tl.append((s-x,1,x))
tl.append((t-x,0,x))
for _ in range(q):
t = int(input())
tl.append((t,2))
tl.sort()
wor = set()
wcur = 0
cur = -1
flg = 0
for x in tl:
if x[1] == 1:
wcur += 1
wor.add(x[2])
if cur < 0 or x[2] < cur:
cur = x[2]
flg = 0
elif x[1] == 0:
wcur -= 1
wor.remove(x[2])
if x[2] == cur:
flg = 1
if not wcur:
cur = -1
flg = 0
else:
if flg:
cur = min(wor)
flg = 0
print(cur)
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
78ece2afb6dc8d9e7775dd7cae8618c12045b454 | 49e1b436eaeb7064b674d611aa33d70ed8138cb5 | /examples/composing_pdf.py | e06a4c6f02e07f766bbd9ac4cf061e11f0852073 | [
"BSD-3-Clause"
] | permissive | aburke1605/zfit | ee810cf786b5121eee3cc2770d0d1b3c02ff86ac | d49fb5513b61b653cf0ca5b5720d4210862b2a70 | refs/heads/master | 2023-09-05T05:08:59.214839 | 2021-09-20T11:16:42 | 2021-09-20T11:16:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Copyright (c) 2020 zfit
import zfit
# create space
obs = zfit.Space("x", limits=(-10, 10))
# parameters
mu = zfit.Parameter("mu", 1., -4, 6)
sigma = zfit.Parameter("sigma", 1., 0.1, 10)
lambd = zfit.Parameter("lambda", -1., -5., 0)
frac = zfit.Parameter("fraction", 0.5, 0., 1.)
# pdf creation
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
exponential = zfit.pdf.Exponential(lambd, obs=obs)
sum_pdf = zfit.pdf.SumPDF([gauss, exponential], fracs=frac)
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
ffb5aae78efbd94d9112b4913b0759fb644ffd30 | d50bf972c9e4321eb77aad8a0126b27d70432779 | /apps/person/api/attribute/views.py | e350811058631c777e41b4a0d4984b9b8ffe0072 | [] | no_license | PUYUP/kawalmedia | 1778b3473220ff64e2f5c998649fc0f637787976 | ffff74b94f111bb17d7a290ba57a13c63e32e5fa | refs/heads/master | 2022-10-20T19:36:45.413061 | 2019-11-07T08:02:31 | 2019-11-07T08:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,487 | py | from uuid import UUID
from itertools import chain
from django.db.models import F, Subquery, OuterRef
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
# THIRD PARTY
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.parsers import (
FormParser, FileUploadParser, MultiPartParser)
from rest_framework import status as response_status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, NotAcceptable
# SERIALIZERS
from .serializers import AttributeSerializer, AttributeValueSerializer
# PERMISSIONS
from ..permissions import IsOwnerOrReject, IsEntityOwnerOrReject
# LOCAL UTILS
from ...utils.attributes import update_attribute_values
# GET MODELS FROM GLOBAL UTILS
from utils.validators import get_model
Attribute = get_model('person', 'Attribute')
AttributeValue = get_model('person', 'AttributeValue')
class AttributeApiView(viewsets.ViewSet):
""" Get attribute options for persons
Read only... """
lookup_field = 'uuid'
permission_classes = (IsAuthenticated,)
parser_class = (FormParser, FileUploadParser, MultiPartParser,)
permission_action = {
# Disable update if not owner
'update': [IsOwnerOrReject],
'partial_update': [IsOwnerOrReject],
'destroy': [IsEntityOwnerOrReject],
}
def get_permissions(self):
"""
Instantiates and returns
the list of permissions that this view requires.
"""
try:
# return permission_classes depending on `action`
return [permission() for permission in self.permission_action
[self.action]]
except KeyError:
# action is not set return default permission_classes
return [permission() for permission in self.permission_classes]
def list(self, request, format=None):
context = {'request': self.request}
identifiers = request.GET.get('identifiers', None)
# Attributes
if hasattr(request.user, 'person') and identifiers:
person = getattr(request.user, 'person', None)
identifiers = identifiers.split(',')
# ContentType berdasarkan entity (model)
entity_type = ContentType.objects.get_for_model(person)
# Get roles from person
roles = person.roles.filter(is_active=True) \
.values_list('id', flat=True)
# Get attributes by roles
queryset = Attribute.objects \
.prefetch_related('option_group', 'content_type', 'roles') \
.select_related('option_group') \
.filter(
content_type=entity_type,
roles__in=roles,
identifier__in=identifiers,
attributevalue__object_id=person.pk) \
.distinct()
if queryset.exists():
for qs in queryset:
identifiers.remove(qs.identifier)
annotate = dict()
for q in queryset:
field = 'value_' + q.field_type
if q.field_type == 'multi_option':
annotate[field] = F('attributevalue')
else:
annotate[field] = F('attributevalue__%s' % field)
annotate['value_uuid'] = F('attributevalue__uuid')
# Call value each field
queryset = queryset.annotate(**annotate)
# Here we get all attributes
# But filter by empty attributevalue
queryset_all = Attribute.objects \
.prefetch_related('option_group', 'content_type', 'roles') \
.select_related('option_group') \
.filter(
content_type=entity_type,
roles__in=roles,
identifier__in=identifiers,
secured=False) \
.distinct()
# Combine two or more queryset
queryset = list(chain(queryset, queryset_all))
# JSON Api
serializer = AttributeSerializer(
queryset, many=True, context=context)
return Response(serializer.data, status=response_status.HTTP_200_OK)
raise NotAcceptable(detail=_("Data tidak valid."))
# Update person attributes
@method_decorator(csrf_protect)
@transaction.atomic
def update(self, request, uuid=None):
"""Update attribute values
UUID used is Person identifier"""
context = {'request': self.request}
if type(uuid) is not UUID:
try:
uuid = UUID(uuid)
except ValueError:
raise NotFound()
person = getattr(request.user, 'person', None)
if person and request.data:
# Append file
if request.FILES:
setattr(request.data, 'files', request.FILES)
# Update attribute
update_attribute_values(
person, identifiers=None, values=request.data)
# Get last inserted value
entity_type = ContentType.objects.get_for_model(person)
attribute_value = AttributeValue.objects \
.filter(object_id=person.pk, content_type=entity_type) \
.order_by('date_created') \
.last()
serializer = AttributeValueSerializer(
attribute_value, many=False, context=context)
return Response(serializer.data, status=response_status.HTTP_200_OK)
raise NotAcceptable()
# Delete...
@method_decorator(csrf_protect)
@method_decorator(never_cache)
@transaction.atomic
def destroy(self, request, uuid=None):
"""uuid used uuid from attribute value"""
queryset = AttributeValue.objects.filter(uuid=uuid)
if queryset.exists():
queryset.delete()
return Response(
{'detail': _("Berhasil dihapus.")},
status=response_status.HTTP_204_NO_CONTENT)
| [
"hellopuyup@gmail.com"
] | hellopuyup@gmail.com |
ff7c7cba1e29a444a9f3768a79707a6dc595f9e4 | d8b08995348ca42e1e748d8ca94cbf5a04912049 | /DataFetch-by-Python.py | 93bf77ab06b7c1b172ab67ae9c122b2c66be1ff9 | [] | no_license | sonkrsh/excelDataFetch | 8b9d375d3d660995fff0ad32065e07254baf2ad7 | 787db09349c483747d14933e3b8ebdd518117e69 | refs/heads/main | 2023-03-19T00:57:54.579518 | 2021-03-12T19:33:54 | 2021-03-12T19:33:54 | 347,176,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,203 | py | from calendar import monthrange
import pandas as pds
import openpyxl
from openpyxl.styles import PatternFill
# check = monthrange(2021, 3)
excelName = "reporddst.xlsx"
wb = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\test.xlsx")
sh1 = wb["Sheet1"]
newData = pds.read_excel(r'C:\Users\S.S\PycharmProjects\locationData\Insurance.xlsx', sheet_name="Sheet1")
def fetchData(start, end):
delhiCount = 0
nodiaCount = 0
gazibadCount = 0
gurgoanCount = 0
faridadCount = 0
chatishgarhCount = 0
jaipurCount = 0
mumbaiCount = 0
puneCount = 0
ahmedabadCount = 0
nagarCount = 0
suratCount = 0
benguluruCount = 0
hyderabadCount = 0
chennaiCount = 0
kochiCount = 0
coimbatoreCount = 0
kanpurCount = 0
lucknowCount = 0
indoreCount = 0
kolkataCount = 0
findMasterLoop = end[1] - start[1]
masterLoop = 0
month = 0
loopvalue = 0
noOfMonth = []
sh1.cell(2, column=1, value="Delhi")
sh1.cell(3, column=1, value="Noida")
sh1.cell(4, column=1, value="Ghaziabad")
sh1.cell(5, column=1, value="Gurgaon")
sh1.cell(6, column=1, value="Faridabad")
sh1.cell(7, column=1, value="Chandigarh")
sh1.cell(8, column=1, value="Jaipur")
sh1.cell(9, column=1).fill = PatternFill("solid", fgColor="FFFF00")
sh1.cell(9, column=1, value="Delhi NCR")
sh1.cell(10, column=1, value="Mumbai")
sh1.cell(11, column=1, value="Pune")
sh1.cell(12, column=1, value="Ahmedabad")
sh1.cell(13, column=1, value="Nagpur")
sh1.cell(14, column=1, value="Surat")
sh1.cell(15, column=1).fill = PatternFill("solid", fgColor="FFFF00")
sh1.cell(15, column=1, value="West")
sh1.cell(16, column=1, value="Bengaluru")
sh1.cell(17, column=1, value="Hyderabad")
sh1.cell(18, column=1, value="Chennai")
sh1.cell(19, column=1).fill = PatternFill("solid", fgColor="FFFF00")
sh1.cell(19, column=1, value="South")
sh1.cell(20, column=1, value="Kochi")
sh1.cell(21, column=1, value="Coimbatore")
sh1.cell(22, column=1, value="Kanpur")
sh1.cell(23, column=1, value="Lucknow")
sh1.cell(24, column=1, value="Indore")
sh1.cell(25, column=1, value="Kolkata")
if findMasterLoop != 0:
for x in range(0, findMasterLoop + 1):
if x == 0:
addData = start[1]
else:
addData = 1
month += addData
if x == 0:
monthRange = monthrange(start[2], month)
tempDifference = monthRange[1] - start[0] + 1
masterLoop += tempDifference
noOfMonth.append(monthRange[1])
else:
if x == findMasterLoop:
masterLoop += end[0]
noOfMonth.append(monthRange[1])
else:
monthRange = monthrange(start[2], month)
tempDifference = monthRange[1] - 0
masterLoop += tempDifference
noOfMonth.append(monthRange[1])
else:
masterLoop = end[0] - start[0]
indexmonth = 0
tempdate = 0
date = 0
val = 1
if len(noOfMonth) == 0:
val = 2
else:
val = 1
for main in range(1, masterLoop + val):
bo = 0
if main == 1:
tempdate = start[0]
tempmonth = start[1] + indexmonth
date = "0" + str(tempdate)
if len(str(tempmonth)) == 1:
month = "0" + str(tempmonth)
else:
month = tempmonth
else:
tempmonth = start[1] + indexmonth
tempdate += 1
if len(str(tempdate)) == 1:
date = "0" + str(tempdate)
else:
date = tempdate
if len(str(tempmonth)) == 1:
month = "0" + str(tempmonth)
else:
month = tempmonth
if len(noOfMonth) != 0:
try:
if tempdate == noOfMonth[indexmonth]:
if len(str(tempmonth)) == 1:
month = "0" + str(tempmonth)
else:
month = tempmonth
date = tempdate
tempmonth = 0
tempdate = 0
indexmonth += 1
bo = 1
except:
""
# print(date)
# print("--------->", month)
for row in range(0, len(newData)):
fetchingcolumn = newData["Pickup Completion Date"][row].date()
dateOld = str(fetchingcolumn)
dateModify = ""
if dateOld != "NaT":
dateModify = dateOld
if dateModify == "2021-" + str(month) + "-" + str(date):
dateActual = fetchingcolumn.strftime("%d-%b-%Y")
city = newData["City"][row]
if city == "Delhi":
delhiCount += 1
if city == "Noida":
nodiaCount += 1
if city == "Ghaziabad":
gazibadCount += 1
if city == "Gurgaon":
gurgoanCount += 1
if city == "Faridabad":
faridadCount += 1
if city == "Chandigarh":
chatishgarhCount += 1
if city == "Jaipur":
jaipurCount += 1
if city == "Mumbai" or city == "Navi Mumbai":
mumbaiCount += 1
if city == "Pune":
puneCount += 1
if city == "Ahmedabad":
ahmedabadCount += 1
if city == "Nagpur":
nagarCount += 1
if city == "Surat":
suratCount += 1
if city == "Bengaluru":
benguluruCount += 1
if city == "Hyderabad":
hyderabadCount += 1
if city == "Chennai":
chennaiCount += 1
if city == "Kochi":
kochiCount += 1
if city == "Coimbatore":
coimbatoreCount += 1
if city == "Kanpur":
kanpurCount += 1
if city == "Lucknow":
lucknowCount += 1
if city == "Indore":
indoreCount += 1
if city == "Kolkata":
kolkataCount += 1
sh1.cell(row=1, column=main + 1, value=dateActual)
sh1.cell(row=2, column=main + 1, value=delhiCount)
sh1.cell(row=3, column=main + 1, value=nodiaCount)
sh1.cell(row=4, column=main + 1, value=gazibadCount)
sh1.cell(row=5, column=main + 1, value=gurgoanCount)
sh1.cell(row=6, column=main + 1, value=faridadCount)
sh1.cell(row=7, column=main + 1, value=chatishgarhCount)
sh1.cell(row=8, column=main + 1, value=jaipurCount)
exactValue1 = 0
for x in range(2, 9):
convertedvalue1 = sh1.cell(x, column=main + 1).value
exactValue1 += int(convertedvalue1)
sh1.cell(9, column=main + 1, value=exactValue1)
sh1.cell(10, column=main + 1, value=mumbaiCount)
sh1.cell(11, column=main + 1, value=puneCount)
sh1.cell(12, column=main + 1, value=ahmedabadCount)
sh1.cell(13, column=main + 1, value=nagarCount)
sh1.cell(14, column=main + 1, value=suratCount)
exactValue2 = 0
for x in range(10, 15):
convertedvalue2 = sh1.cell(x, column=main + 1).value
exactValue2 += int(convertedvalue2)
sh1.cell(15, column=main + 1, value=exactValue2)
sh1.cell(16, column=main + 1, value=benguluruCount)
sh1.cell(17, column=main + 1, value=hyderabadCount)
sh1.cell(18, column=main + 1, value=chennaiCount)
exactValue3 = 0
for x in range(16, 19):
convertedvalue3 = sh1.cell(x, column=main + 1).value
exactValue3 += int(convertedvalue3)
sh1.cell(19, column=main + 1, value=exactValue3)
sh1.cell(20, column=main + 1, value=kochiCount)
sh1.cell(21, column=main + 1, value=coimbatoreCount)
sh1.cell(22, column=main + 1, value=kanpurCount)
sh1.cell(23, column=main + 1, value=lucknowCount)
sh1.cell(24, column=main + 1, value=indoreCount)
sh1.cell(25, column=main + 1, value=kolkataCount)
delhiCount = 0
nodiaCount = 0
gazibadCount = 0
gurgoanCount = 0
faridadCount = 0
chatishgarhCount = 0
jaipurCount = 0
mumbaiCount = 0
puneCount = 0
ahmedabadCount = 0
nagarCount = 0
suratCount = 0
benguluruCount = 0
hyderabadCount = 0
chennaiCount = 0
kochiCount = 0
coimbatoreCount = 0
kanpurCount = 0
lucknowCount = 0
indoreCount = 0
kolkataCount = 0
def calulateTotal():
wb = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\" + excelName)
sh1 = wb["Sheet1"]
totalRow = sh1.max_row
newValue = 0
totalColumn = sh1.max_column
for x in range(1, totalRow + 1):
for col in range(1, totalColumn + 1):
rowValue = sh1.cell(row=x, column=col + 1).value
if rowValue != None:
try:
newValue += int(rowValue)
except:
""
if rowValue is None:
if x == 1:
print('weee')
sh1.cell(row=x, column=col + 1, value="Pick Up Done")
else:
sh1.cell(row=x, column=col + 1, value=newValue)
newValue = 0
wb.save(excelName)
wb = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\" + excelName)
sh1 = wb["Sheet1"]
totalColumn = sh1.max_column
print(totalColumn)
totalRow = sh1.max_row
wb2 = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\pickUpTarget.xlsx")
sh2 = wb2["Sheet1"]
totalRow2 = sh2.max_row
for de in range(1, totalRow2 + 1):
rowValue2 = sh2.cell(row=de, column=1).value
if de == 1:
sh1.merge_cells(start_row=2, start_column=totalColumn + 1, end_row=4, end_column=totalColumn + 1)
sh1.cell(row=2, column=totalColumn + 1, value=rowValue2)
wb2.save(excelName)
if de == 2:
sh1.merge_cells(start_row=5, start_column=totalColumn + 1, end_row=6, end_column=totalColumn + 1)
sh1.cell(row=5, column=totalColumn + 1, value=rowValue2)
if de == 3:
sh1.cell(row=7, column=totalColumn + 1, value=rowValue2)
if de == 4:
sh1.cell(row=8, column=totalColumn + 1, value=rowValue2)
if de == 5:
sh1.cell(row=10, column=totalColumn + 1, value=rowValue2)
if de == 6:
sh1.cell(row=11, column=totalColumn + 1, value=rowValue2)
if de == 7:
sh1.cell(row=12, column=totalColumn + 1, value=rowValue2)
if de == 8:
sh1.cell(row=13, column=totalColumn + 1, value=rowValue2)
if de == 9:
sh1.cell(row=14, column=totalColumn + 1, value=rowValue2)
if de == 10:
sh1.cell(row=16, column=totalColumn + 1, value=rowValue2)
if de == 11:
sh1.cell(row=17, column=totalColumn + 1, value=rowValue2)
if de == 12:
sh1.cell(row=18, column=totalColumn + 1, value=rowValue2)
if de == 13:
sh1.cell(row=20, column=totalColumn + 1, value=rowValue2)
if de == 14:
sh1.cell(row=21, column=totalColumn + 1, value=rowValue2)
if de == 15:
sh1.cell(row=22, column=totalColumn + 1, value=rowValue2)
if de == 16:
sh1.cell(row=23, column=totalColumn + 1, value=rowValue2)
if de == 17:
sh1.cell(row=24, column=totalColumn + 1, value=rowValue2)
if de == 18:
sh1.cell(row=25, column=totalColumn + 1, value=rowValue2)
wb.save(excelName)
def overAchived():
counto = 0
counto2 = 0
count = 0
count2 = 0
wb = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\" + excelName)
sh1 = wb["Sheet1"]
totalRow = sh1.max_row
totalColumn = sh1.max_column
for value in range(1, totalRow + 1):
if value != 1 and value < 5:
count += sh1.cell(row=value, column=totalColumn - 1).value
if sh1.cell(row=value, column=totalColumn).value is not None:
count2 += sh1.cell(row=value, column=totalColumn).value
if (value == 4):
total = (count / count2) * 100
sh1.cell(row=value, column=totalColumn + 1, value=total)
elif value >= 5 and value < 7:
counto += sh1.cell(row=value, column=totalColumn - 1).value
if sh1.cell(row=value, column=totalColumn).value is not None:
counto2 += sh1.cell(row=value, column=totalColumn).value
print('------>', counto)
if (value == 6):
total = (counto / counto2) * 100
sh1.cell(row=value, column=totalColumn + 1, value=total)
else:
newcount = 0
newcountd = 0
newcount = sh1.cell(row=value, column=totalColumn - 1).value
newcountd = sh1.cell(row=value, column=totalColumn).value
if newcountd and newcount != None:
total = (int(newcount) / int(newcountd)) * 100
sh1.cell(row=value, column=totalColumn + 1, value=total)
wb.save(excelName)
def color():
wb = openpyxl.load_workbook("C:\\Users\\S.S\\PycharmProjects\\locationData\\" + excelName)
sh1 = wb["Sheet1"]
totalRow = sh1.max_row
totalColumn = sh1.max_column
total = 0
total2 = 0
new = ""
for give in range(1, totalRow + 1):
if (give == 1):
sh1.cell(row=give, column=totalColumn - 1, value="Pick Up Target")
elif (give == 9):
sh1.cell(row=9, column=totalColumn - 1, value=total)
total = 0
if give != 1:
values = sh1.cell(row=give, column=totalColumn - 1).value
if give < 9:
if values != None:
total += int(values)
elif (give > 9):
if values != None:
total += int(values)
else:
values = sh1.cell(row=give, column=totalColumn - 1, value=total)
total = 0
if (give == 1):
sh1.cell(row=give, column=totalColumn, value="% OverAchieved")
elif (give == 9):
sh1.cell(row=9, column=totalColumn, value=total2)
total2 = 0
if give != 1:
values = sh1.cell(row=give, column=totalColumn).value
if give < 9:
if values != None:
total2 += int(values)
elif (give > 9):
if values != None:
total2 += int(values)
else:
values = sh1.cell(row=give, column=totalColumn, value=total2)
total2 = 0
if give > 1:
valz = sh1.cell(row=give, column=totalColumn).value
if valz != None:
permanent = int(valz)
if permanent >= 100:
sh1.cell(give, column=totalColumn).fill = PatternFill("solid", fgColor="98FB98")
else:
sh1.cell(give, column=totalColumn).fill = PatternFill("solid", fgColor="F08080")
new = sh1.cell(row=give, column=1).value
if new == "Delhi NCR" or new == "West" or new == "South":
for x in range(1, totalColumn):
sh1.cell(give, column=x).fill = PatternFill("solid", fgColor="FFFF00")
# sh1.cell(22, column=8).fill = PatternFill("solid", fgColor="FFFF00")
wb.save(excelName)
fetchData([1, 3, 2021], [5, 3, 2021])
wb.save(excelName)
calulateTotal()
overAchived()
color()
| [
"you@example.com"
] | you@example.com |
b7270e1e061dbd90bf3c7a6898118fd3d3223cfd | 222dbb2f43dccbd4538ef76798a26457edffe07c | /utils/plot_utils.py | 87ac03b4a10ee2e0494afbd36ccfe9a6ea1c7221 | [] | no_license | MJHutchinson/PytorchBayes | 9699351822416deeb61e95a34653580fdfbbb5ae | e95a9bd308c595b9603bdfb799288a0ed50cc7c6 | refs/heads/master | 2020-04-09T18:39:57.643468 | 2019-01-15T16:06:05 | 2019-01-15T16:06:05 | 160,519,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,693 | py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
# plt.style.use('ggplot')
# matplotlib.rcParams['text.usetex'] = True
def plot_training_curves(input, val = 'accuracies', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel(val)
ax.set_title(val)
if legend is None:
legend = []
for results in input:
result = results['results']
ax.plot(result[val])
legend.append(f'{results["hidden_size"]} lr: {results["lr"]} prior width: {results["prior_var"]}')
ax.legend(legend)
def plot_training_curves_rv(input, legend=None, rolling_av_len=5):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
if legend is None:
legend = []
for results in input:
for key in results.keys():
acc = results[key]['results']['accuracies']
av_acc = [0] * (len(acc) - rolling_av_len)
for i, _ in enumerate(av_acc):
for j in range(rolling_av_len):
av_acc[i] += acc[i+j]/rolling_av_len
ax.plot(av_acc)
ax.legend(legend)
def plot_cost_curves(*input, legend=None, key='rmse'):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel('Cost')
legend = []
for results in input:
result = results['results']
ax.plot(result['costs'])
legend.append(key)
ax.legend(legend)
def plot_min_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Minimum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(min(r))
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_min_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'Epoch {i+1} {val}')
ax.set_ylabel(f'Minimum {val}')
ax.set_title(f'Plot of epoch {i+1} {val} vs minimum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[i])
best_accs.append(min(r))
ax.scatter(initial_accs, best_accs)
# ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_max_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Maximum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(max(r))
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_max_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'Epoch {i+1} {val}')
ax.set_ylabel(f'Maximum {val}')
ax.set_title(f'Plot of epoch {i+1} {val} vs maximum {val}')
initial_accs = []
best_accs = []
legend = []
for result in input:
r = result['results'][val]
initial_accs.append(r[i])
best_accs.append(max(r))
ax.scatter(r[i], max(r))
legend.append(f'{result["hidden_size"]} lr: {result["lr"]} prior width: {result["prior_var"]}')
# ax.scatter(initial_accs, best_accs)
# ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_last_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Final epoch {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(r[-1])
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_last_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'{i} epoch {val}')
ax.set_ylabel(f'Final epoch {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(r[-1])
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_xy(x, y, x_lablel='', y_label='', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(x_lablel)
ax.set_ylabel(y_label)
ax.scatter(x, y)
if legend is not None:
ax.legend(legend)
def plot_dict(x_dict, y_dict, x_lablel='', y_label='', log_scale=False, legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(x_lablel)
ax.set_ylabel(y_label)
if log_scale: ax.set_xscale('log')
legend = list(x_dict.keys())
for key in legend:
ax.scatter(x_dict[key], y_dict[key])
if legend is not None:
ax.legend(legend)
def rank_best_value(input, n=10, value = 'accuracies', minimum=False):
print(f'{"Minimum" if minimum else "Maximum"} {value} (limited to {n})')
pairs = []
for results in input:
pairs.append((results['hidden_size'], min(results['results'][value]) if minimum else max(results['results'][value])))
pairs = sorted(pairs, key = lambda t: t[1], reverse=not minimum)
for i, pair in enumerate(pairs):
if i<10:
print(f'{pair[0]}: {value}: {pair[1]}')
print('\n')
def rank_final_value(*input, n=10, value = 'accuracies', minimum=False):
print(f'{"Minimum" if minimum else "Maximum"} final {value} (limited to {n})')
for results in input:
pairs = []
for result in results:
pairs.append((f'{result["hidden_size"]} lr: {result["lr"]} prior width: {result["prior_var"]}', np.mean(result['results'][value][-20:])))
pairs = sorted(pairs, key = lambda t: t[1], reverse=not minimum)
for i, pair in enumerate(pairs):
if i<10:
print(f'{pair[0]}: {value}: {pair[1]}') | [
"hutchinson.michael.john@gmail.com"
] | hutchinson.michael.john@gmail.com |
6da560cb7e9b94ac74c6ccaf666967b5c0da89c0 | 3d7dece5254e42059e8a2cb1e72b295460284983 | /components/py_engine/adapter/haas/fs/lib/qmi8610/qmi8610.py | 862159bccd9490abc5a0760baebe0603c505929c | [
"Apache-2.0"
] | permissive | windowxia/AliOS-Things | 172639d6e0d2b2e2e816bce757cf95e89187c132 | a99f20706f9c666903a12a205edce13263b1fadb | refs/heads/master | 2023-09-01T06:03:57.853390 | 2023-07-04T05:51:52 | 2023-07-04T06:49:36 | 149,751,180 | 0 | 0 | Apache-2.0 | 2018-09-21T10:56:09 | 2018-09-21T10:56:08 | null | UTF-8 | Python | false | false | 25,897 | py | """
Copyright (C) 2015-2021 Alibaba Group Holding Limited
MicroPython's driver for QMI8610
Author: HaaS
Date: 2021/09/14
"""
from micropython import const
from driver import I2C
from utime import sleep_ms
import math
M_PI = (3.14159265358979323846)
ONE_G = (9.80665)
FISIMU_STATUS1_CMD_DONE = const(0x01)
FISIMU_STATUS1_WAKEUP_EVENT = const(0x04)
FISIMU_CTRL7_DISABLE_ALL = const(0x0)
FISIMU_CTRL7_ACC_ENABLE = const(0x1)
FISIMU_CTRL7_GYR_ENABLE = const(0x2)
FISIMU_CTRL7_MAG_ENABLE = const(0x4)
FISIMU_CTRL7_AE_ENABLE = const(0x8)
FISIMU_CTRL7_ENABLE_MASK = const(0xF)
FISIMU_CONFIG_ACC_ENABLE = FISIMU_CTRL7_ACC_ENABLE
FISIMU_CONFIG_GYR_ENABLE = FISIMU_CTRL7_GYR_ENABLE
FISIMU_CONFIG_MAG_ENABLE = FISIMU_CTRL7_MAG_ENABLE
FISIMU_CONFIG_AE_ENABLE = FISIMU_CTRL7_AE_ENABLE
FISIMU_CONFIG_ACCGYR_ENABLE = (FISIMU_CONFIG_ACC_ENABLE | FISIMU_CONFIG_GYR_ENABLE)
FISIMU_CONFIG_ACCGYRMAG_ENABLE = (FISIMU_CONFIG_ACC_ENABLE | FISIMU_CONFIG_GYR_ENABLE | FISIMU_CONFIG_MAG_ENABLE)
FISIMU_CONFIG_AEMAG_ENABLE = (FISIMU_CONFIG_AE_ENABLE | FISIMU_CONFIG_MAG_ENABLE)
FisRegister_WhoAmI = const(0) # FIS device identifier register.
FisRegister_Revision = const(1) # FIS hardware revision register.
FisRegister_Ctrl1 = const(2) # General and power management modes.
FisRegister_Ctrl2 = const(3) # Accelerometer control. *
FisRegister_Ctrl3 = const(4) # Gyroscope control.
FisRegister_Ctrl4 = const(5) # Magnetometer control.
FisRegister_Ctrl5 = const(6) # Data processing settings.
FisRegister_Ctrl6 = const(7) # AttitudeEngine control.
FisRegister_Ctrl7 = const(8) # Sensor enabled status.
FisRegister_Ctrl8 = const(9) # Reserved - do not write.
FisRegister_Ctrl9 = const(10) # Host command register.
FisRegister_Cal1_L = const(11) # Calibration register 1 least significant byte.
FisRegister_Cal1_H = const(12) # Calibration register 1 most significant byte.
FisRegister_Cal2_L = const(13) # Calibration register 2 least significant byte.
FisRegister_Cal2_H = const(14) # Calibration register 2 most significant byte.
FisRegister_Cal3_L = const(15) # Calibration register 3 least significant byte.
FisRegister_Cal3_H = const(16) # Calibration register 3 most significant byte.
FisRegister_Cal4_L = const(17) # Calibration register 4 least significant byte.
FisRegister_Cal4_H = const(18) # Calibration register 4 most significant byte.
FisRegister_FifoCtrl = const(19) # FIFO control register.
FisRegister_FifoData = const(20) # FIFO data register.
FisRegister_FifoStatus = const(21) # FIFO status register.
FisRegister_Status0 = const(22) # Output data overrun and availability.
FisRegister_Status1 = const(23) # Miscellaneous status register.
FisRegister_CountOut = const(24) # Sample counter.
FisRegister_Ax_L = const(25) # Accelerometer X axis least significant byte.
FisRegister_Ax_H = const(26) # Accelerometer X axis most significant byte.
FisRegister_Ay_L = const(27) # Accelerometer Y axis least significant byte.
FisRegister_Ay_H = const(28) # Accelerometer Y axis most significant byte.
FisRegister_Az_L = const(29) # Accelerometer Z axis least significant byte.
FisRegister_Az_H = const(30) # Accelerometer Z axis most significant byte.
FisRegister_Gx_L = const(31) # Gyroscope X axis least significant byte.
FisRegister_Gx_H = const(32) # Gyroscope X axis most significant byte.
FisRegister_Gy_L = const(33) # Gyroscope Y axis least significant byte.
FisRegister_Gy_H = const(34) # Gyroscope Y axis most significant byte.
FisRegister_Gz_L = const(35) # Gyroscope Z axis least significant byte.
FisRegister_Gz_H = const(36) # Gyroscope Z axis most significant byte.
FisRegister_Mx_L = const(37) # Magnetometer X axis least significant byte.
FisRegister_Mx_H = const(38) # Magnetometer X axis most significant byte.
FisRegister_My_L = const(39) # Magnetometer Y axis least significant byte.
FisRegister_My_H = const(40) # Magnetometer Y axis most significant byte.
FisRegister_Mz_L = const(41) # Magnetometer Z axis least significant byte.
FisRegister_Mz_H = const(42) # Magnetometer Z axis most significant byte.
FisRegister_Q1_L = const(43) # Quaternion increment W least significant byte.
FisRegister_Q1_H = const(44) # Quaternion increment W most significant byte.
FisRegister_Q2_L = const(45) # Quaternion increment X least significant byte.
FisRegister_Q2_H = const(46) # Quaternion increment X most significant byte.
FisRegister_Q3_L = const(47) # Quaternion increment Y least significant byte.
FisRegister_Q3_H = const(48) # Quaternion increment Y most significant byte.
FisRegister_Q4_L = const(49) # Quaternion increment Z least significant byte.
FisRegister_Q4_H = const(50) # Quaternion increment Z most significant byte.
FisRegister_Dvx_L = const(51) # Velocity increment X least significant byte.
FisRegister_Dvx_H = const(52) # Velocity increment X most significant byte.
FisRegister_Dvy_L = const(53) # Velocity increment Y least significant byte.
FisRegister_Dvy_H = const(54) # Velocity increment Y most significant byte.
FisRegister_Dvz_L = const(55) # Velocity increment Z least significant byte.
FisRegister_Dvz_H = const(56) # Velocity increment Z most significant byte.
FisRegister_Temperature = const(57) # Temperature output.
FisRegister_AeClipping = const(58) # AttitudeEngine clipping flags.
FisRegister_AeOverflow = const(59) # AttitudeEngine overflow flags.
Ctrl9_Nop = const(0) # No operation.
Ctrl9_ResetFifo = const(0x2) # Reset FIFO.
Ctrl9_SetMagXCalibration = const(0x6) # Set magnetometer X calibration values.
Ctrl9_SetMagYCalibration = const(0x7) # Set magnetometer Y calibration values.
Ctrl9_SetMagZCalibration = const(0x8) # Set magnetometer Z calibration values.
Ctrl9_SetAccelOffset = const(0x12) # Set accelerometer offset correction value.
Ctrl9_SetGyroOffset = const(0x13) # Set gyroscope offset correction value.
Ctrl9_SetAccelSensitivity = const(0x14) # Set accelerometer sensitivity.
Ctrl9_SetGyroSensitivity = const(0x15) # Set gyroscope sensitivity.
Ctrl9_UpdateMagBias = const(0xB) # Update magnemoter bias compensation.
Ctrl9_TriggerMotionOnDemand = const(0x0c) # Trigger motion on demand sample.
Ctrl9_UpdateAttitudeEngineGyroBias = const(0xE) # Update gyroscope bias compensation.
Ctrl9_ReadTrimmedFrequencyValue = const(0x18) # Read frequency correction value.
Ctrl9_ReadFifo = const(0x0D) # Prepare for FIFO read sequence.
Ctrl9_ConfigureWakeOnMotion = const(0x19) # Set wake on motion parameters.
Lpf_Disable = const(0) # Disable low pass filter.
Lpf_Enable = const(1) # Enable low pass filter.
Hpf_Disable = const(0) # Disable high pass filter.
Hpf_Enable = const(1) # Enable high pass filter.
AccRange_2g = const(0 << 3) # +/- 2g range
AccRange_4g = const(1 << 3) # +/- 4g range
AccRange_8g = const(2 << 3) # +/- 8g range
AccRange_16g = const(3 << 3) # +/- 16g range
AccOdr_1024Hz = const(0) # High resolution 1024Hz output rate.
AccOdr_256Hz = const(1) # High resolution 256Hz output rate.
AccOdr_128Hz = const(2) # High resolution 128Hz output rate.
AccOdr_32Hz = const(3) # High resolution 32Hz output rate.
AccOdr_LowPower_128Hz = const(4) # Low power 128Hz output rate.
AccOdr_LowPower_64Hz = const(5) # Low power 64Hz output rate.
AccOdr_LowPower_25Hz = const(6) # Low power 25Hz output rate.
AccOdr_LowPower_3Hz = const(7) # Low power 3Hz output rate.
GyrRange_32dps = const(0 << 3) # +-32 degrees per second.
GyrRange_64dps = const(1 << 3) # +-64 degrees per second.
GyrRange_128dps = const(2 << 3) # +-128 degrees per second.
GyrRange_256dps = const(3 << 3) # +-256 degrees per second.
GyrRange_512dps = const(4 << 3) # +-512 degrees per second.
GyrRange_1024dps = const(5 << 3) # +-1024 degrees per second.
GyrRange_2048dps = const(6 << 3) # +-2048 degrees per second.
GyrRange_2560dps = const(7 << 3) # +-2560 degrees per second.
"""
Gyroscope output rate configuration.
"""
GyrOdr_1024Hz = const(0) # High resolution 1024Hz output rate.
GyrOdr_256Hz = const(1) # High resolution 256Hz output rate.
GyrOdr_128Hz = const(2) # High resolution 128Hz output rate.
GyrOdr_32Hz = const(3) # High resolution 32Hz output rate.
GyrOdr_OIS_8192Hz = const(6) # OIS Mode 8192Hz output rate.
GyrOdr_OIS_LL_8192Hz = const(7) # OIS LL Mode 8192Hz output rate.
AeOdr_1Hz = const(0) # 1Hz output rate.
AeOdr_2Hz = const(1) # 2Hz output rate.
AeOdr_4Hz = const(2) # 4Hz output rate.
AeOdr_8Hz = const(3) # 8Hz output rate.
AeOdr_16Hz = const(4) # 16Hz output rate.
AeOdr_32Hz = const(5) # 32Hz output rate.
AeOdr_64Hz = const(6) # 64Hz output rate.
"""
* Motion on demand mode.
*
* In motion on demand mode the application can trigger AttitudeEngine
* output samples as necessary. This allows the AttitudeEngine to be
* synchronized with external data sources.
*
* When in Motion on Demand mode the application should request new data
* by calling the FisImu_requestAttitudeEngineData() function. The
* AttitudeEngine will respond with a data ready event (INT2) when the
* data is available to be read.
"""
AeOdr_motionOnDemand = const(128) # 128Hz output rate.
MagOdr_32Hz = const(2) # 32Hz output rate.
MagDev_AK8975 = const(0 << 4) # AKM AK8975.
MagDev_AK8963 = const(1 << 4) # AKM AK8963.
AccUnit_g = const(0) # Accelerometer output in terms of g (9.81m/s^2).
AccUnit_ms2 = const(1) # Accelerometer output in terms of m/s^2.
GyrUnit_dps = const(0) # Gyroscope output in degrees/s.
GyrUnit_rads = const(1) # Gyroscope output in rad/s.
AXIS_X = const(0)
AXIS_Y = const(1)
AXIS_Z = const(2)
AXIS_TOTAL = const(4)
# FIS INT1 line.
Fis_Int1 = const(0 << 6)
# FIS INT2 line.
Fis_Int2 = const(1 << 6)
InterruptInitialState_high = const(1 << 7) # Interrupt high.
InterruptInitialState_low = const(0 << 7) # Interrupt low.
WomThreshold_high = const(128) # High threshold - large motion needed to wake.
WomThreshold_low = const(32) # Low threshold - small motion needed to wake.
acc_lsb_div = 0
gyro_lsb_div = 0
qmi8610_dict = {'temp': 0.0, 'gyroX': 0.0, 'gyroY': 0.0, 'gyroZ': 0.0, 'accX': 0.0, 'accY': 0.0, 'accZ': 0.0}
class QMI8610(object):
"""
This class implements QMI8610 chip's defs.
"""
def __init__(self, i2cDev):
self._i2cDev = None
if not isinstance(i2cDev, I2C):
raise ValueError("parameter is not an I2C object")
# make QMI8610's internal object points to i2cDev
self._i2cDev = i2cDev
# 初始化QMI8610传感器
r = self.init()
if r == 0:
raise ValueError("QMI8610 init error")
def int16(self, dat):
if dat > (1 << 15):
return dat - (1 << 16)
else:
return dat
def int32(self, dat):
if dat > (1 << 31):
return dat - (1 << 32)
else:
return dat
#写寄存器
def writeReg(self, addr, value):
Reg = bytearray([addr, value])
self._i2cDev.write(Reg)
#print("--> write addr " + str(addr) + ", value = " + str(value))
#写多个寄存器
def writeRegs(self, addr, value, len):
Regs = bytearray(value)
if (len != Regs.count):
return "Error code:%d, Error message: %s" % (self, str(self.msg))
self.writeReg(addr,Regs)
#print("--> write addr " + str(addr) + ", value = " + str(Regs))
#读寄存器
def readReg(self, addr, len):
reg = bytearray([addr])
data = bytearray(len)
self._i2cDev.memRead(data, addr, 8)
#print("--> read " + str(len) + " bytes from addr " + str(addr) + ", " + str(len) + " bytes value = " + str(data))
return data
# 设置layout
def setLayout(self, layout):
sign = [1,2,3]
map = [1,2,3]
if (layout == 0):
sign[AXIS_X] = 1
sign[AXIS_Y] = 1
sign[AXIS_Z] = 1
map[AXIS_X] = AXIS_X
map[AXIS_Y] = AXIS_Y
map[AXIS_Z] = AXIS_Z
elif (layout == 1):
sign[AXIS_X] = -1
sign[AXIS_Y] = 1
sign[AXIS_Z] = 1
map[AXIS_X] = AXIS_Y
map[AXIS_Y] = AXIS_X
map[AXIS_Z] = AXIS_Z
elif (layout == 2):
sign[AXIS_X] = -1
sign[AXIS_Y] = -1
sign[AXIS_Z] = 1
map[AXIS_X] = AXIS_X
map[AXIS_Y] = AXIS_Y
map[AXIS_Z] = AXIS_Z
elif (layout == 3):
sign[AXIS_X] = 1
sign[AXIS_Y] = -1
sign[AXIS_Z] = 1
map[AXIS_X] = AXIS_Y
map[AXIS_Y] = AXIS_X
map[AXIS_Z] = AXIS_Z
elif (layout == 4):
sign[AXIS_X] = -1
sign[AXIS_Y] = 1
sign[AXIS_Z] = -1
map[AXIS_X] = AXIS_X
map[AXIS_Y] = AXIS_Y
map[AXIS_Z] = AXIS_Z
elif (layout == 5):
sign[AXIS_X] = 1
sign[AXIS_Y] = 1
sign[AXIS_Z] = -1
map[AXIS_X] = AXIS_Y
map[AXIS_Y] = AXIS_X
map[AXIS_Z] = AXIS_Z
elif (layout == 6):
sign[AXIS_X] = 1
sign[AXIS_Y] = -1
sign[AXIS_Z] = -1
map[AXIS_X] = AXIS_X
map[AXIS_Y] = AXIS_Y
map[AXIS_Z] = AXIS_Z
elif (layout == 7):
sign[AXIS_X] = 1
sign[AXIS_Y] = 1
sign[AXIS_Z] = 1
map[AXIS_X] = AXIS_X
map[AXIS_Y] = AXIS_Y
map[AXIS_Z] = AXIS_Z
def configACC(self, range, odr, lpfEnable, hpfEnable):
ctl_dada = 0
range_set = 0
global acc_lsb_div
if (range == AccRange_2g):
range_set = 0 << 3
acc_lsb_div = (1 << 14)
elif (range == AccRange_4g):
range_set = 1 << 3
acc_lsb_div = (1 << 13)
elif (range == AccRange_8g):
range_set = 2 << 3
acc_lsb_div = (1 << 12)
else:
range_set = 2 << 3
acc_lsb_div = (1 << 12)
ctl_dada = range_set | odr
self.writeReg(FisRegister_Ctrl2, ctl_dada)
# set LPF & HPF
ctl_dada = self.readReg(FisRegister_Ctrl5, 1)[0]
ctl_dada &= 0xfc
if (lpfEnable == Lpf_Enable):
ctl_dada |= 0x02
if (hpfEnable == Hpf_Enable):
ctl_dada |= 0x01
self.writeReg(FisRegister_Ctrl5, ctl_dada)
def configGyro(self, range, odr, lpfEnable, hpfEnable):
# Set the CTRL3 register to configure dynamic range and ODR
global gyro_lsb_div
ctl_dada = range | odr
self.writeReg(FisRegister_Ctrl3, ctl_dada)
# Store the scale factor for use when processing raw data
if (range == GyrRange_32dps):
gyro_lsb_div = 1024
elif (range == GyrRange_64dps):
gyro_lsb_div = 512
elif (range == GyrRange_128dps):
gyro_lsb_div = 256
elif (range == GyrRange_256dps):
gyro_lsb_div = 128
elif (range == GyrRange_512dps):
gyro_lsb_div = 64
elif (range == GyrRange_1024dps):
gyro_lsb_div = 32
elif (range == GyrRange_2048dps):
gyro_lsb_div = 16
elif (range == GyrRange_2560dps):
#gyro_lsb_div = 8
pass
else:
gyro_lsb_div = 32
# Conversion from degrees/s to rad/s if necessary
# set LPF & HPF
ctl_dada = self.readReg(FisRegister_Ctrl5, 1)[0]
ctl_dada &= 0xf3
if (lpfEnable == Lpf_Enable):
ctl_dada |= 0x08
if (hpfEnable == Hpf_Enable):
ctl_dada |= 0x04
self.writeReg(FisRegister_Ctrl5, ctl_dada)
def configAe(self, odr):
# Configure Accelerometer and Gyroscope settings
self.configACC(AccRange_8g, AccOdr_1024Hz, Lpf_Enable, Hpf_Disable)
self.configGyro(GyrRange_2048dps, GyrOdr_1024Hz, Lpf_Enable, Hpf_Disable)
self.writeReg(FisRegister_Ctrl5, odr)
def readStatus0(self):
status = self.readReg(FisRegister_Status0, 1)[0]
return status
def readStatus1(self):
status = self.readReg(FisRegister_Status1, 1)[0]
return status
# 读取xyz的值
def readXyz(self):
buf_reg = bytearray(12)
raw_acc_xyz = [1, 2, 3]
raw_gyro_xyz = [1, 2, 3]
xyz_t = [1, 2, 3, 4, 5, 6]
buf_reg = self.readReg(FisRegister_Ax_L | 0x80, 12)
raw_acc_xyz[0] = self.int16((buf_reg[1] << 8) | (buf_reg[0]))
raw_acc_xyz[1] = self.int16((buf_reg[3] << 8) | (buf_reg[2]))
raw_acc_xyz[2] = self.int16((buf_reg[5] << 8) | (buf_reg[4]))
raw_gyro_xyz[0] = self.int16((buf_reg[7] << 8) | (buf_reg[6]))
raw_gyro_xyz[1] = self.int16((buf_reg[9] << 8) | (buf_reg[8]))
raw_gyro_xyz[2] = self.int16((buf_reg[11] << 8) | (buf_reg[10]))
# m/s2
xyz_t[0] = (raw_acc_xyz[0] * ONE_G) / acc_lsb_div
xyz_t[1] = (raw_acc_xyz[1] * ONE_G) / acc_lsb_div
xyz_t[2] = (raw_acc_xyz[2] * ONE_G) / acc_lsb_div
xyz_t[0] = -xyz_t[0]
xyz_t[1] = -xyz_t[1]
xyz_t[2] = -xyz_t[2]
# rad/s
xyz_t[3] = (raw_gyro_xyz[0] * M_PI / 180) / gyro_lsb_div
xyz_t[4] = (raw_gyro_xyz[1] * M_PI / 180) / gyro_lsb_div
xyz_t[5] = (raw_gyro_xyz[2] * M_PI / 180) / gyro_lsb_div
xyz_t[3] = xyz_t[3]
xyz_t[4] = xyz_t[4]
xyz_t[5] = -xyz_t[5]
return xyz_t
def applyScaleFactor(self, scaleFactor, nElements, rawData, calibratedData):
for i in range(nElements):
calibratedData[i] = (scaleFactor * rawData[2 * i]) | (rawData[2 * i + 1] << 8)
def processAccelerometerData(self, rawData, calibratedData):
self.applyScaleFactor(ONE_G/acc_lsb_div, 3, rawData, calibratedData)
def processGyroscopeData(self, rawData, calibratedData):
self.applyScaleFactor(M_PI / (gyro_lsb_div * 180), 3, rawData, calibratedData)
def readRawsample(self, rawData, calibratedData):
self.applyScaleFactor(M_PI / (gyro_lsb_div * 180), 3, rawData, calibratedData)
def writeCalibrationVectorBuffer(self, calVector, conversionFactor, fractionalBits):
o = 0
calCmd = bytearray(6)
for i in range(3):
o = round(calVector[i] * conversionFactor * (1 << fractionalBits))
calCmd[(2 * i)] = o & 0xFF
calCmd[(2 * i) + 1] = o >> 8
self.writeRegs(FisRegister_Cal1_L, calCmd, 6)
def doCtrl9Command(self, cmd):
gyroConfig = 0
oisModeBits = const(0x06)
oisEnabled = 0
status = 0
count = 0
gyroConfig = self.readReg(FisRegister_Ctrl3, 1)[0]
oisEnabled = ((gyroConfig & oisModeBits) == oisModeBits)
if (oisEnabled):
self.writeReg(FisRegister_Ctrl3, (gyroConfig & ~oisModeBits))
self.writeReg(FisRegister_Ctrl9, cmd)
# Check that command has been executed
while (((status & FISIMU_STATUS1_CMD_DONE) == 0) and (count < 10000)):
status = self.readReg(FisRegister_Status1, 1)[0]
count += 1
if (oisEnabled):
# Re-enable OIS mode configuration if necessary
self.writeReg(FisRegister_Ctrl3, gyroConfig)
def applyAccelerometerOffset(self, offset, unit):
if (unit == AccUnit_ms2):
conversionFactor = 1 / ONE_G
else:
conversionFactor = 1
self.writeCalibrationVectorBuffer(offset, conversionFactor, 11)
self.doCtrl9Command(Ctrl9_SetAccelOffset)
def applyGyroscopeOffset(self, offset, unit):
if (unit == GyrUnit_rads):
conversionFactor = 180 / M_PI
else:
conversionFactor = 1
self.writeCalibrationVectorBuffer(offset, conversionFactor, 6)
self.doCtrl9Command(Ctrl9_SetGyroOffset)
def applyOffsetCalibration(self, accUnit, accOffset, gyrUnit, gyrOffset):
self.applyAccelerometerOffset(accOffset, accUnit)
self.applyGyroscopeOffset(gyrOffset, gyrUnit)
# for XKF3
def enableWakeOnMotion(self):
womCmd = bytearray[3]
interrupt = Fis_Int1
initialState = InterruptInitialState_low
threshold = WomThreshold_low
blankingTime = 0x00
blankingTimeMask = 0x3F
self.enableSensors(FISIMU_CTRL7_DISABLE_ALL)
self.configACC(AccRange_2g, AccOdr_LowPower_25Hz, Lpf_Disable, Hpf_Disable)
womCmd[0] = FisRegister_Cal1_L # WoM Threshold: absolute value in mg (with 1mg/LSB resolution)
womCmd[1] = threshold
womCmd[2] = interrupt | initialState | (blankingTime & blankingTimeMask)
self.writeReg(FisRegister_Cal1_L, womCmd[1])
self.writeReg(FisRegister_Cal1_H, womCmd[2])
self.doCtrl9Command(Ctrl9_ConfigureWakeOnMotion)
self.enableSensors(FISIMU_CTRL7_ACC_ENABLE)
def disableWakeOnMotion(self):
self.enableSensors(FISIMU_CTRL7_DISABLE_ALL)
self.writeReg(FisRegister_Cal1_L, 0)
self.doCtrl9Command(Ctrl9_ConfigureWakeOnMotion)
def enableSensors(self, enableFlags):
if (enableFlags & FISIMU_CONFIG_AE_ENABLE):
enableFlags |= FISIMU_CTRL7_ACC_ENABLE | FISIMU_CTRL7_GYR_ENABLE
self.writeReg(FisRegister_Ctrl7, enableFlags & FISIMU_CTRL7_ENABLE_MASK)
def configMAG(self, device, odr):
pass
def configApply(self, inputSelection, accRange, accOdr, gyrRange, gyrOdr, aeOdr, magOdr, magDev):
fisSensors = inputSelection
if (fisSensors & FISIMU_CONFIG_AE_ENABLE):
self.configAe(aeOdr)
else:
if (inputSelection & FISIMU_CONFIG_ACC_ENABLE):
self.configACC(accRange, accOdr, Lpf_Enable, Hpf_Disable)
if (inputSelection & FISIMU_CONFIG_GYR_ENABLE):
self.configGyro(gyrRange, gyrOdr, Lpf_Enable, Hpf_Disable)
if (inputSelection & FISIMU_CONFIG_MAG_ENABLE):
self.configMAG(magDev, magOdr)
self.enableSensors(fisSensors)
# 得到温度值
# 返回值:温度值
def getTemperature(self):
temp = self.readReg(FisRegister_Temperature, 1)[0]
return round(temp, 2)
# 得到加速度值(原始值)
# (gx,gy,gz):陀螺仪x,y,z轴的重力加速度,单位:m/s²
# 返回值:0,成功
# 其他,错误代码
def getAcceleration(self):
global acc_lsb_div
buf_reg = bytearray(6)
raw_acc_xyz = [1, 2, 3]
acc_xyz = [1, 2, 3]
buf_reg[0] = self.readReg(FisRegister_Ax_L, 1)[0]
buf_reg[1] = self.readReg(FisRegister_Ax_H, 1)[0]
buf_reg[2] = self.readReg(FisRegister_Ay_L, 1)[0]
buf_reg[3] = self.readReg(FisRegister_Ay_H, 1)[0]
buf_reg[4] = self.readReg(FisRegister_Az_L, 1)[0]
buf_reg[5] = self.readReg(FisRegister_Az_H, 1)[0]
raw_acc_xyz[0] = self.int16((buf_reg[1] << 8) | (buf_reg[0]))
raw_acc_xyz[1] = self.int16((buf_reg[3] << 8) | (buf_reg[2]))
raw_acc_xyz[2] = self.int16((buf_reg[5] << 8) | (buf_reg[4]))
print("raw acc is acc0[%d] acc1[%d] acc2[%d]" %(raw_acc_xyz[0], raw_acc_xyz[1], raw_acc_xyz[2]))
acc_xyz[0] = (raw_acc_xyz[0] * ONE_G) / acc_lsb_div
acc_xyz[1] = (raw_acc_xyz[1] * ONE_G) / acc_lsb_div
acc_xyz[2] = (raw_acc_xyz[2] * ONE_G) / acc_lsb_div
print("fis210x acc is", acc_xyz[0], acc_xyz[1], acc_xyz[2])
return acc_xyz
# 得到陀螺仪值(原始值)
# gx,gy,gz:陀螺仪x,y,z轴的原始读数(带符号)
# 返回值:0,成功
# 其他,错误代码
def getGyro(self):
global gyro_lsb_div
buf_reg = bytearray(6)
raw_gyro_xyz = [1, 2, 3]
gyro_xyz = [1, 2, 3]
#buf_reg = self.readReg(FisRegister_Gx_L, 6)
buf_reg[0] = self.readReg(FisRegister_Gx_L, 1)[0]
buf_reg[1] = self.readReg(FisRegister_Gx_H, 1)[0]
buf_reg[2] = self.readReg(FisRegister_Gy_L, 1)[0]
buf_reg[3] = self.readReg(FisRegister_Gy_H, 1)[0]
buf_reg[4] = self.readReg(FisRegister_Gz_L, 1)[0]
buf_reg[5] = self.readReg(FisRegister_Gz_H, 1)[0]
raw_gyro_xyz[0] = self.int16((buf_reg[1] << 8) | (buf_reg[0]))
raw_gyro_xyz[1] = self.int16((buf_reg[3] << 8) | (buf_reg[2]))
raw_gyro_xyz[2] = self.int16((buf_reg[5] << 8) | (buf_reg[4]))
#print("raw gyro is g0[%d] g1[%d g2[%d]" %(raw_gyro_xyz[0], raw_gyro_xyz[1], raw_gyro_xyz[2]))
gyro_xyz[0] = (raw_gyro_xyz[0] * 1.0) / gyro_lsb_div
gyro_xyz[1] = (raw_gyro_xyz[1] * 1.0) / gyro_lsb_div
gyro_xyz[2] = (raw_gyro_xyz[2] * 1.0) / gyro_lsb_div
#print("fis210x gyro is", gyro_xyz[0], gyro_xyz[1], gyro_xyz[2])
return gyro_xyz
def getData(self):
global qmi8610_dict
qmi8610_dict['temp'] = self.getTemperature()
arr = self.getGyro()
qmi8610_dict['gyroX'] = arr[0]
qmi8610_dict['gyroY'] = arr[1]
qmi8610_dict['gyroZ'] = arr[2]
brr = self.getAcceleration()
qmi8610_dict['accX'] = brr[0]
qmi8610_dict['accY'] = brr[1]
qmi8610_dict['accZ'] = brr[2]
return qmi8610_dict
def init(self):
chip_id = 0x00
chip_id = self.readReg(FisRegister_WhoAmI, 1)[0]
sleep_ms(100)
if (chip_id == 0xfc):
inputSelection = FISIMU_CONFIG_ACCGYR_ENABLE
accRange = AccRange_4g
accOdr = AccOdr_128Hz
gyrRange = GyrRange_1024dps # GyrRange_1024dps;
gyrOdr = GyrOdr_256Hz # GyrOdr_1024Hz
magOdr = MagOdr_32Hz
magDev = MagDev_AK8963
aeOdr = AeOdr_32Hz
sleep_ms(100)
self.configApply(inputSelection, accRange, accOdr, gyrRange, gyrOdr, aeOdr, magOdr, magDev)
sleep_ms(100)
self.setLayout(2)
else:
chip_id = 0
return chip_id
| [
"yilu.myl@alibaba-inc.com"
] | yilu.myl@alibaba-inc.com |
f0541ee8e9970bfd430b5485a39c0009a9631e76 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /pushbase/user_component.py | 4dbab9d5ed556ff989cde117595a4b3af7d19a17 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 2,340 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/user_component.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens, task
from ableton.v2.control_surface import Component
from . import sysex
class UserComponentBase(Component):
__events__ = (u'mode', u'before_mode_sent', u'after_mode_sent')
defer_sysex_sending = False
def __init__(self, value_control=None, *a, **k):
assert value_control is not None
super(UserComponentBase, self).__init__(*a, **k)
self._value_control = value_control
self.__on_value.subject = self._value_control
self._selected_mode = sysex.LIVE_MODE
self._pending_mode_to_select = None
return
def toggle_mode(self):
self.mode = sysex.LIVE_MODE if self.mode == sysex.USER_MODE else sysex.USER_MODE
def _get_mode(self):
return self._selected_mode
def _set_mode(self, mode):
self._do_set_mode(mode)
mode = property(_get_mode, _set_mode)
def _do_set_mode(self, mode):
if self.is_enabled():
self._apply_mode(mode)
else:
self._pending_mode_to_select = mode
def update(self):
super(UserComponentBase, self).update()
if self.is_enabled() and self._pending_mode_to_select:
self._apply_mode(self._pending_mode_to_select)
self._pending_mode_to_select = None
return
def force_send_mode(self):
self._do_apply_mode(self._selected_mode)
def _apply_mode(self, mode):
if mode != self._selected_mode:
self._do_apply_mode(mode)
def _do_apply_mode(self, mode):
self.notify_before_mode_sent(mode)
if self.defer_sysex_sending:
self._tasks.add(task.sequence(task.delay(1), task.run(lambda : self._send_mode_change(mode))))
else:
self._send_mode_change(mode)
def _send_mode_change(self, mode):
self._selected_mode = mode
self._value_control.send_value((mode,))
self.notify_after_mode_sent(mode)
@listens('value')
def __on_value(self, value):
mode = value[0]
self._selected_mode = mode
self.notify_mode(mode)
| [
"julien@julienbayle.net"
] | julien@julienbayle.net |
70b37329ad3c3cce6622f4f307c71cffaad6359f | ab19b1e637109f6a6f32e99714ea1c7cbe1d5ec0 | /month/migrations/0003_theme_slug.py | 1fbf6c4bf241ba87a30afe5e9f364aafa2a4cbf7 | [] | no_license | devonwarren/totemag | daf05876cfe636c4dcfe83b764900a0bc4c9c29d | 304ab0e2f72b926e63de706a6e3dc0b043db36fd | refs/heads/master | 2021-01-17T20:48:48.671352 | 2016-06-02T00:57:11 | 2016-06-02T00:57:11 | 58,146,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-31 19:21
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('month', '0002_auto_20160218_1741'),
]
operations = [
migrations.AddField(
model_name='theme',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, null=True, populate_from='name', unique=True, verbose_name='URL'),
),
]
| [
"devon.warren@gmail.com"
] | devon.warren@gmail.com |
7dce2e412e685f782d213ae6b57619108af2a154 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ksZrMdraPqHjvbaE6_21.py | 62dd6e59f1ddb1b33b29e51991bece4db558cf38 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py |
def largest_even(lst):
try: return list(set(i for i in lst if i%2==0))[-1]
except: return -1
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fcac3eef07e02c904d9de6b2316190c32ac0beb8 | 6550cc368f029b3955261085eebbddcfee0547e1 | /第6部分-Django(哪吒,肖锋)/django-2-进阶-肖锋/day69/day69/about_middleware/app01/views.py | 5de771df74370c060b8d9679eadaa846206f9934 | [] | no_license | vividyellow/oldboyeduPython14qi | d00c8f45326e16464c3d4e8df200d93779f68bd3 | de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11 | refs/heads/master | 2022-09-17T21:03:17.898472 | 2020-01-31T10:55:01 | 2020-01-31T10:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from django.shortcuts import render, HttpResponse
def index(request,num):
# print(id(request))
print('这是index函数')
print(num)
# int('xxxx')
ret = HttpResponse('ok')
def xxxx():
print('这是index中的xxxx')
return HttpResponse('这是index中的xxxx')
ret.render = xxxx
print(id(ret))
return ret
| [
"524991368@qq.com"
] | 524991368@qq.com |
ba7f3cbbdd32ee244a8efb114ad65a1eb459ec44 | 49812e663d0033700af72c4f451581d1f05791ef | /scons/scons-local-3.0.1/SCons/Tool/GettextCommon.py | 65bcab474f5520609b22b109e130e8f6ada59217 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | faustus123/JANA | 1d9f6af0976b45eb9ceade584f3c0faeacf19b2c | 38ca14e79deeb4c13042c60d948356ab8e98cf0c | refs/heads/master | 2023-01-09T04:18:06.795419 | 2020-11-14T16:00:10 | 2020-11-14T16:00:10 | 103,759,870 | 0 | 2 | Apache-2.0 | 2020-11-14T15:57:50 | 2017-09-16T14:50:58 | Python | UTF-8 | Python | false | false | 18,282 | py | """SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__(self, env, nodefault=True, alias=None, precious=True
, noclean=True):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory=None, create=1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory=None, create=1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory=None, create=1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files=None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = ['LINGUAS']
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [l for l in _re_lang.findall(contents) if l]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
# * The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
# * The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if 'LINGUAS_FILE' in env and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute(self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [target]
result = []
for tgt in target:
r = BuilderBase._execute(self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current working directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = os.path.relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env: 0
if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this our fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if 'XGETTEXT' in env:
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound, "Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if 'MSGINIT' in env:
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if 'MSGMERGE' in env:
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if 'MSGFMT' in env:
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return ['xgettext', 'msginit', 'msgmerge', 'msgfmt']
#############################################################################
| [
"davidl@jlab.org"
] | davidl@jlab.org |
e7a09ab9ec31c89c57ed38ea021b3bf04496abf5 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/recoveryservices/v20210801/protected_item.py | a2f146601d0a8d02bd34288bd91194fa26b3ab31 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,641 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ProtectedItemArgs', 'ProtectedItem']
@pulumi.input_type
class ProtectedItemArgs:
def __init__(__self__, *,
container_name: pulumi.Input[str],
fabric_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
vault_name: pulumi.Input[str],
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['AzureFileshareProtectedItemArgs', 'AzureIaaSClassicComputeVMProtectedItemArgs', 'AzureIaaSComputeVMProtectedItemArgs', 'AzureIaaSVMProtectedItemArgs', 'AzureSqlProtectedItemArgs', 'AzureVmWorkloadProtectedItemArgs', 'AzureVmWorkloadSAPAseDatabaseProtectedItemArgs', 'AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs', 'AzureVmWorkloadSQLDatabaseProtectedItemArgs', 'DPMProtectedItemArgs', 'GenericProtectedItemArgs', 'MabFileFolderProtectedItemArgs']]] = None,
protected_item_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ProtectedItem resource.
:param pulumi.Input[str] container_name: Container name associated with the backup item.
:param pulumi.Input[str] fabric_name: Fabric name associated with the backup item.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] vault_name: The name of the recovery services vault.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union['AzureFileshareProtectedItemArgs', 'AzureIaaSClassicComputeVMProtectedItemArgs', 'AzureIaaSComputeVMProtectedItemArgs', 'AzureIaaSVMProtectedItemArgs', 'AzureSqlProtectedItemArgs', 'AzureVmWorkloadProtectedItemArgs', 'AzureVmWorkloadSAPAseDatabaseProtectedItemArgs', 'AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs', 'AzureVmWorkloadSQLDatabaseProtectedItemArgs', 'DPMProtectedItemArgs', 'GenericProtectedItemArgs', 'MabFileFolderProtectedItemArgs']] properties: ProtectedItemResource properties
:param pulumi.Input[str] protected_item_name: Item name to be backed up.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "fabric_name", fabric_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vault_name", vault_name)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if protected_item_name is not None:
pulumi.set(__self__, "protected_item_name", protected_item_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
Container name associated with the backup item.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> pulumi.Input[str]:
"""
Fabric name associated with the backup item.
"""
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group where the recovery services vault is present.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Input[str]:
"""
The name of the recovery services vault.
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_name", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['AzureFileshareProtectedItemArgs', 'AzureIaaSClassicComputeVMProtectedItemArgs', 'AzureIaaSComputeVMProtectedItemArgs', 'AzureIaaSVMProtectedItemArgs', 'AzureSqlProtectedItemArgs', 'AzureVmWorkloadProtectedItemArgs', 'AzureVmWorkloadSAPAseDatabaseProtectedItemArgs', 'AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs', 'AzureVmWorkloadSQLDatabaseProtectedItemArgs', 'DPMProtectedItemArgs', 'GenericProtectedItemArgs', 'MabFileFolderProtectedItemArgs']]]:
"""
ProtectedItemResource properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['AzureFileshareProtectedItemArgs', 'AzureIaaSClassicComputeVMProtectedItemArgs', 'AzureIaaSComputeVMProtectedItemArgs', 'AzureIaaSVMProtectedItemArgs', 'AzureSqlProtectedItemArgs', 'AzureVmWorkloadProtectedItemArgs', 'AzureVmWorkloadSAPAseDatabaseProtectedItemArgs', 'AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs', 'AzureVmWorkloadSQLDatabaseProtectedItemArgs', 'DPMProtectedItemArgs', 'GenericProtectedItemArgs', 'MabFileFolderProtectedItemArgs']]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="protectedItemName")
def protected_item_name(self) -> Optional[pulumi.Input[str]]:
"""
Item name to be backed up.
"""
return pulumi.get(self, "protected_item_name")
@protected_item_name.setter
def protected_item_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ProtectedItem(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureFileshareProtectedItemArgs'], pulumi.InputType['AzureIaaSClassicComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSVMProtectedItemArgs'], pulumi.InputType['AzureSqlProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPAseDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSQLDatabaseProtectedItemArgs'], pulumi.InputType['DPMProtectedItemArgs'], pulumi.InputType['GenericProtectedItemArgs'], pulumi.InputType['MabFileFolderProtectedItemArgs']]]] = None,
protected_item_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Base class for backup items.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_name: Container name associated with the backup item.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[str] fabric_name: Fabric name associated with the backup item.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union[pulumi.InputType['AzureFileshareProtectedItemArgs'], pulumi.InputType['AzureIaaSClassicComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSVMProtectedItemArgs'], pulumi.InputType['AzureSqlProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPAseDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSQLDatabaseProtectedItemArgs'], pulumi.InputType['DPMProtectedItemArgs'], pulumi.InputType['GenericProtectedItemArgs'], pulumi.InputType['MabFileFolderProtectedItemArgs']]] properties: ProtectedItemResource properties
:param pulumi.Input[str] protected_item_name: Item name to be backed up.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vault_name: The name of the recovery services vault.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProtectedItemArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Base class for backup items.
:param str resource_name: The name of the resource.
:param ProtectedItemArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProtectedItemArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureFileshareProtectedItemArgs'], pulumi.InputType['AzureIaaSClassicComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSComputeVMProtectedItemArgs'], pulumi.InputType['AzureIaaSVMProtectedItemArgs'], pulumi.InputType['AzureSqlProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPAseDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs'], pulumi.InputType['AzureVmWorkloadSQLDatabaseProtectedItemArgs'], pulumi.InputType['DPMProtectedItemArgs'], pulumi.InputType['GenericProtectedItemArgs'], pulumi.InputType['MabFileFolderProtectedItemArgs']]]] = None,
protected_item_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProtectedItemArgs.__new__(ProtectedItemArgs)
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
__props__.__dict__["e_tag"] = e_tag
if fabric_name is None and not opts.urn:
raise TypeError("Missing required property 'fabric_name'")
__props__.__dict__["fabric_name"] = fabric_name
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
__props__.__dict__["protected_item_name"] = protected_item_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
if vault_name is None and not opts.urn:
raise TypeError("Missing required property 'vault_name'")
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210801:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20160601:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20160601:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20190513:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20190513:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20190615:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20190615:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20201001:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201001:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20201201:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201201:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210101:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210101:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201preview:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201preview:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210210:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210210:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210301:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210301:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210401:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210401:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210601:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210601:ProtectedItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210701:ProtectedItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210701:ProtectedItem")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ProtectedItem, __self__).__init__(
'azure-native:recoveryservices/v20210801:ProtectedItem',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ProtectedItem':
"""
Get an existing ProtectedItem resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ProtectedItemArgs.__new__(ProtectedItemArgs)
__props__.__dict__["e_tag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ProtectedItem(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
ProtectedItemResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
e7f5999714364ca89bfd6d481fb3df9478301b51 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_fondue.py | 40fa3e508406b5f07e49c5433e9cbb0992d0ff0d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py |
#calss header
class _FONDUE():
def __init__(self,):
self.name = "FONDUE"
self.definitions = [u'a hot dish prepared by keeping a container of either hot oil or melted cheese over a flame at the table and putting pieces of meat in the oil to be cooked or pieces of bread into the cheese: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cbf38e40373cc0ba6702a4cd35e5d42ac32433e4 | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/allauth/socialaccount/app_settings.py | feffe48c9996e4b546c0e9e2cfdecc3b368227bf | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 2,466 | py | class AppSettings(object):
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def QUERY_EMAIL(self):
"""
Request e-mail address from 3rd party account provider?
E.g. using OpenID AX
"""
from allauth.account import app_settings as account_settings
return self._setting("QUERY_EMAIL",
account_settings.EMAIL_REQUIRED)
@property
def AUTO_SIGNUP(self):
"""
Attempt to bypass the signup form by using fields (e.g. username,
email) retrieved from the social account provider. If a conflict
arises due to a duplicate e-mail signup form will still kick in.
"""
return self._setting("AUTO_SIGNUP", True)
@property
def PROVIDERS(self):
"""
Provider specific settings
"""
return self._setting("PROVIDERS", {})
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_VERIFICATION",
account_settings.EMAIL_VERIFICATION)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.socialaccount.adapter'
'.DefaultSocialAccountAdapter')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def STORE_TOKENS(self):
return self._setting('STORE_TOKENS', True)
@property
def UID_MAX_LENGTH(self):
return 191
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys # noqa
app_settings = AppSettings('SOCIALACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| [
"levabd@gmail.com"
] | levabd@gmail.com |
d04dd1eb86c2d5be15f8e8890339a17b61609cdb | 60b48df762a515a734cfbedd7ca101df43f04824 | /python/ray/tune/callback.py | 1782127612853015a8431a862aee2d96d2901aad | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | LuBingtan/ray | a02b13c4dceab2b0d54870fd3abae5c11bae916e | 298742d7241681ee1f307ec0dd3cd7e9713a3c7d | refs/heads/master | 2023-03-05T16:32:35.596725 | 2022-06-05T23:21:53 | 2022-06-05T23:21:53 | 223,334,544 | 0 | 1 | Apache-2.0 | 2023-03-04T08:56:53 | 2019-11-22T06:01:51 | Python | UTF-8 | Python | false | false | 10,687 | py | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from abc import ABCMeta
import warnings
from ray.tune.checkpoint_manager import _TuneCheckpoint
from ray.util.annotations import PublicAPI, DeveloperAPI
if TYPE_CHECKING:
from ray.tune.trial import Trial
from ray.tune.stopper import Stopper
class _CallbackMeta(ABCMeta):
"""A helper metaclass to ensure container classes (e.g. CallbackList) have
implemented all the callback methods (e.g. `on_*`).
"""
def __new__(mcs, name: str, bases: Tuple[type], attrs: Dict[str, Any]) -> type:
cls = super().__new__(mcs, name, bases, attrs)
if mcs.need_check(cls, name, bases, attrs):
mcs.check(cls, name, bases, attrs)
return cls
@classmethod
def need_check(
mcs, cls: type, name: str, bases: Tuple[type], attrs: Dict[str, Any]
) -> bool:
return attrs.get("IS_CALLBACK_CONTAINER", False)
@classmethod
def check(
mcs, cls: type, name: str, bases: Tuple[type], attrs: Dict[str, Any]
) -> None:
methods = set()
for base in bases:
methods.update(
attr_name
for attr_name, attr in vars(base).items()
if mcs.need_override_by_subclass(attr_name, attr)
)
overridden = {
attr_name
for attr_name, attr in attrs.items()
if mcs.need_override_by_subclass(attr_name, attr)
}
missing = methods.difference(overridden)
if missing:
raise TypeError(
f"Found missing callback method: {missing} "
f"in class {cls.__module__}.{cls.__qualname__}."
)
@classmethod
def need_override_by_subclass(mcs, attr_name: str, attr: Any) -> bool:
return (attr_name.startswith("on_") or attr_name == "setup") and callable(attr)
@PublicAPI(stability="beta")
class Callback(metaclass=_CallbackMeta):
"""Tune base callback that can be extended and passed to a ``TrialRunner``
Tune callbacks are called from within the ``TrialRunner`` class. There are
several hooks that can be used, all of which are found in the submethod
definitions of this base class.
The parameters passed to the ``**info`` dict vary between hooks. The
parameters passed are described in the docstrings of the methods.
This example will print a metric each time a result is received:
.. code-block:: python
from ray import tune
from ray.tune import Callback
class MyCallback(Callback):
def on_trial_result(self, iteration, trials, trial, result,
**info):
print(f"Got result: {result['metric']}")
def train(config):
for i in range(10):
tune.report(metric=i)
tune.run(
train,
callbacks=[MyCallback()])
"""
# arguments here match Experiment.public_spec
def setup(
self,
stop: Optional["Stopper"] = None,
num_samples: Optional[int] = None,
total_num_samples: Optional[int] = None,
**info,
):
"""Called once at the very beginning of training.
Any Callback setup should be added here (setting environment
variables, etc.)
Arguments:
stop: Stopping criteria.
If ``time_budget_s`` was passed to ``tune.run``, a
``TimeoutStopper`` will be passed here, either by itself
or as a part of a ``CombinedStopper``.
num_samples: Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times. If this is -1, (virtually) infinite
samples are generated until a stopping condition is met.
total_num_samples: Total number of samples factoring
in grid search samplers.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
"""Called at the start of each tuning loop step.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_step_end(self, iteration: int, trials: List["Trial"], **info):
"""Called at the end of each tuning loop step.
The iteration counter is increased before this hook is called.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_start(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
"""Called after starting a trial instance.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just has been started.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_restore(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
"""Called after restoring a trial instance.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just has been restored.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_save(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
"""Called after receiving a checkpoint from a trial.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just saved a checkpoint.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_result(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
result: Dict,
**info,
):
"""Called after receiving a result from a trial.
The search algorithm and scheduler are notified before this
hook is called.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just sent a result.
result: Result that the trial sent.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
"""Called after a trial instance completed.
The search algorithm and scheduler are notified before this
hook is called.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just has been completed.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_trial_error(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
"""Called after a trial instance failed (errored).
The search algorithm and scheduler are notified before this
hook is called.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just has errored.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_checkpoint(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
checkpoint: _TuneCheckpoint,
**info,
):
"""Called after a trial saved a checkpoint with Tune.
Arguments:
iteration: Number of iterations of the tuning loop.
trials: List of trials.
trial: Trial that just has errored.
checkpoint: Checkpoint object that has been saved
by the trial.
**info: Kwargs dict for forward compatibility.
"""
pass
def on_experiment_end(self, trials: List["Trial"], **info):
"""Called after experiment is over and all trials have concluded.
Arguments:
trials: List of trials.
**info: Kwargs dict for forward compatibility.
"""
pass
@DeveloperAPI
class CallbackList(Callback):
"""Call multiple callbacks at once."""
IS_CALLBACK_CONTAINER = True
def __init__(self, callbacks: List[Callback]):
self._callbacks = callbacks
def setup(self, **info):
for callback in self._callbacks:
try:
callback.setup(**info)
except TypeError as e:
if "argument" in str(e):
warnings.warn(
"Please update `setup` method in callback "
f"`{callback.__class__}` to match the method signature"
" in `ray.tune.callback.Callback`.",
FutureWarning,
)
callback.setup()
else:
raise e
def on_step_begin(self, **info):
for callback in self._callbacks:
callback.on_step_begin(**info)
def on_step_end(self, **info):
for callback in self._callbacks:
callback.on_step_end(**info)
def on_trial_start(self, **info):
for callback in self._callbacks:
callback.on_trial_start(**info)
def on_trial_restore(self, **info):
for callback in self._callbacks:
callback.on_trial_restore(**info)
def on_trial_save(self, **info):
for callback in self._callbacks:
callback.on_trial_save(**info)
def on_trial_result(self, **info):
for callback in self._callbacks:
callback.on_trial_result(**info)
def on_trial_complete(self, **info):
for callback in self._callbacks:
callback.on_trial_complete(**info)
def on_trial_error(self, **info):
for callback in self._callbacks:
callback.on_trial_error(**info)
def on_checkpoint(self, **info):
for callback in self._callbacks:
callback.on_checkpoint(**info)
def on_experiment_end(self, **info):
for callback in self._callbacks:
callback.on_experiment_end(**info)
| [
"noreply@github.com"
] | LuBingtan.noreply@github.com |
43e93fad15954e6b80db34598f7a446137cb7af2 | f1614f3531701a29a33d90c31ab9dd6211c60c6b | /alembic/versions/a27b6e57783e_add_price_field_into_product.py | abe46cef631b593c42bf69355694e6b56fccead4 | [] | no_license | pfpacheco/menu-sun-api | 8a1e11543b65db91d606b2f3098847e3cc5f2092 | 9bf2885f219b8f75d39e26fd61bebcaddcd2528b | refs/heads/master | 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | """add_price_field_into_product
Revision ID: a27b6e57783e
Revises: 7bbdc8a9d923
Create Date: 2020-05-15 16:10:48.441492
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'a27b6e57783e'
down_revision = '7bbdc8a9d923'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('customer', 'active',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.add_column('product', sa.Column('list_price', sa.Float(), nullable=True))
op.add_column('product', sa.Column('sale_price', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('product', 'sale_price')
op.drop_column('product', 'list_price')
op.alter_column('customer', 'active',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
# ### end Alembic commands ###
| [
"pfpacheco@gmail.com"
] | pfpacheco@gmail.com |
0e9112638e0087b9dbc8196f6bbc82002dfc1c6f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_104/257.py | 07a54af8721054894995ab319662d0fe7ec4ce8d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | #!/usr/bin/env python
import sys
import multiprocessing
import itertools
log = sys.stderr
def doit(line):
n = [int(i) for i in line.split()]
n = n[1:n[0]+1]
log.write("Case: {0}\n".format(n))
for c in sorted([c for c in itertools.combinations(range(1, len(n)), 2)], key = lambda x: sum(x)):
log.write("Sizes: {0}\n".format(c))
for e1 in itertools.combinations(n, c[0]):
sum_e1 = sum(e1)
for e2 in itertools.combinations(n, c[1]):
sum_e2 = sum(e2)
if sum_e1 == sum_e2 and e1 != e2:
return (e1, e2)
return ()
def main():
input = sys.stdin
output = sys.stdout
worker = multiprocessing.Pool(multiprocessing.cpu_count())
count = int(input.readline().strip())
for caseno, result in enumerate(worker.map(doit,[line.strip() for line in input][:count])):
output.write("Case #{0}:\n".format(caseno + 1))
if len(result) > 0:
for rl in result:
output.write("{0}\n".format(" ".join([str(i) for i in rl])))
else:
output.write("Impossible\n")
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a6028b97150017982a67e819999bbc0d43e86378 | 61d248b587c6a89f30caa8bc92daeda4d30cbcd2 | /Crawler/tools/convert.py | cd292c49afd90d4afe1a0b67b660424dedb0e5b6 | [] | no_license | luckyyd/hackathon-ocw | 0f6aab5899c628246a6391fd94116245bc4505e4 | b03fbbe7cad9456c84093b00cb946f2d14de50c5 | refs/heads/master | 2021-01-17T10:26:05.793986 | 2016-04-27T05:47:42 | 2016-04-27T05:47:42 | 52,526,118 | 0 | 1 | null | 2016-04-27T05:47:43 | 2016-02-25T13:26:36 | Python | UTF-8 | Python | false | false | 1,458 | py | # 把爬下内容融合到items.json中
import json
import codecs
import io
from pprint import pprint
def get_duration(olist, url):
for item in olist:
if (item['courselink']) == url: return item['duration']
return ''
#input_file = open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\items.json', "r")
#output_file = codecs.open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\output.json', "w", encoding="utf-8")
def downloaded(items, link):
for item in items:
if item['link'] == link:
return True
return False
input_file_1 = open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\Crawler\infoqsub\out.json', "r", encoding="utf-8")
input_file_2 = open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\items.json', "r", encoding="utf-8")
output_file = codecs.open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\output.json', "w", encoding="utf-8")
items = json.load(input_file_2, encoding='utf-8')
lines = input_file_1.readlines()
i = 32964
for line in lines:
line = line.replace('\\','\\\\')
#print(line)
item = json.loads(line)
if not downloaded(items, item['link']):
item['item_id'] = i
item['duration'] = ''
item['enabled'] = True
items.append(item)
i += 1
json.dump(items ,output_file, indent=4,ensure_ascii=False,sort_keys=True)
| [
"foamliu@yeah.net"
] | foamliu@yeah.net |
112ef319e46a7d047de72eaa19d85ade85d1b4c9 | f807e5aecbe175e493ea1c47304ceca2817e6083 | /logging_exam/bar_logging.py | 53b4272d799d6cd9d906feec5f680ea020fd6675 | [] | no_license | jbking/logging-custom-for-json | d2a22745488d44fd667cb59a011c0232f531550c | 28613ef67fb0d1a4f7a440dcd838638ef8f9ee78 | refs/heads/master | 2023-03-24T03:47:12.295311 | 2021-03-18T04:44:10 | 2021-03-18T04:53:33 | 348,305,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | import logging
from loguru import logger
_logger = logging.getLogger(__name__)
def log_out(msg, additional_context=None):
if additional_context is None:
logger.info("logging out")
_logger.info("logging.logger out")
else:
with logger.contextualize(**additional_context):
logger.info("logging out with-in context")
_logger.info("logging.logger out with-in context")
| [
"yusuke@jbking.org"
] | yusuke@jbking.org |
8e10ece00228060d469cc533d663c551c8f60b8d | 5942e3e75ef7dc22a67b04fb1f12e14658a2093d | /documentation_files/findertools.py | f6431928c62051fbea22ed0766b2c89ba6574eb3 | [] | no_license | the-factory/kdevelop-python | 9e94d2a4d4906a31a4d2a8a08300766e02d41a59 | 1e91f2cb4c94d9455a2ee22fef13df680aeed1ab | refs/heads/master | 2021-01-18T08:57:16.707711 | 2012-04-09T22:37:47 | 2012-04-09T22:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""":platform: Mac
:synopsis: Wrappers around the finder's Apple Events interface.
"""
def launch(file):
"""
Tell the finder to launch *file*. What launching means depends on the file:
applications are started, folders are opened and documents are opened in the
correct application.
"""
pass
def Print(file):
"""
Tell the finder to print a file. The behaviour is identical to selecting the
file and using the print command in the finder's file menu.
"""
pass
def copy(file,destdir):
"""
Tell the finder to copy a file or folder *file* to folder *destdir*. The
function returns an :class:`Alias` object pointing to the new file.
"""
pass
def move(file,destdir):
"""
Tell the finder to move a file or folder *file* to folder *destdir*. The
function returns an :class:`Alias` object pointing to the new file.
"""
pass
def sleep():
"""
Tell the finder to put the Macintosh to sleep, if your machine supports it.
"""
pass
def restart():
"""
Tell the finder to perform an orderly restart of the machine.
"""
pass
| [
"svenbrauch@googlemail.com"
] | svenbrauch@googlemail.com |
48dd64e2d631e341b37a0b429a13766f9a0eb08a | 02fd239748a57ddd163ab411ce28a2b34e0182a9 | /homeassistant/components/bluetooth/__init__.py | f175b01b7980a21a213332fe499cdc9880fb8249 | [
"Apache-2.0"
] | permissive | fredrike/home-assistant | 77d05be0d2fd35dd862c56c7fb1ddde46d61ed05 | e852c9b012f2f949cc08e9498b8a051f362669e9 | refs/heads/dev | 2023-03-05T12:38:26.034307 | 2022-10-13T15:34:45 | 2022-10-13T15:34:45 | 107,095,841 | 2 | 0 | Apache-2.0 | 2023-02-22T06:14:52 | 2017-10-16T07:55:03 | Python | UTF-8 | Python | false | false | 14,372 | py | """The bluetooth integration."""
from __future__ import annotations
from asyncio import Future
from collections.abc import Callable, Iterable
import logging
import platform
from typing import TYPE_CHECKING, cast
import async_timeout
from awesomeversion import AwesomeVersion
from homeassistant.components import usb
from homeassistant.config_entries import (
SOURCE_IGNORE,
SOURCE_INTEGRATION_DISCOVERY,
ConfigEntry,
)
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback as hass_callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr, discovery_flow
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.issue_registry import (
IssueSeverity,
async_create_issue,
async_delete_issue,
)
from homeassistant.loader import async_get_bluetooth
from . import models
from .const import (
ADAPTER_ADDRESS,
ADAPTER_HW_VERSION,
ADAPTER_SW_VERSION,
CONF_ADAPTER,
CONF_DETAILS,
CONF_PASSIVE,
DATA_MANAGER,
DEFAULT_ADDRESS,
DOMAIN,
SOURCE_LOCAL,
AdapterDetails,
)
from .manager import BluetoothManager
from .match import BluetoothCallbackMatcher, IntegrationMatcher
from .models import (
BaseHaScanner,
BluetoothCallback,
BluetoothChange,
BluetoothScanningMode,
BluetoothServiceInfo,
BluetoothServiceInfoBleak,
HaBleakScannerWrapper,
HaBluetoothConnector,
ProcessAdvertisementCallback,
)
from .scanner import HaScanner, ScannerStartError
from .util import adapter_human_name, adapter_unique_name, async_default_adapter
if TYPE_CHECKING:
from bleak.backends.device import BLEDevice
from homeassistant.helpers.typing import ConfigType
__all__ = [
"async_ble_device_from_address",
"async_discovered_service_info",
"async_get_scanner",
"async_last_service_info",
"async_process_advertisements",
"async_rediscover_address",
"async_register_callback",
"async_register_scanner",
"async_track_unavailable",
"async_scanner_count",
"BaseHaScanner",
"BluetoothServiceInfo",
"BluetoothServiceInfoBleak",
"BluetoothScanningMode",
"BluetoothCallback",
"HaBluetoothConnector",
"SOURCE_LOCAL",
]
_LOGGER = logging.getLogger(__name__)
RECOMMENDED_MIN_HAOS_VERSION = AwesomeVersion("9.0.dev0")
def _get_manager(hass: HomeAssistant) -> BluetoothManager:
"""Get the bluetooth manager."""
return cast(BluetoothManager, hass.data[DATA_MANAGER])
@hass_callback
def async_get_scanner(hass: HomeAssistant) -> HaBleakScannerWrapper:
"""Return a HaBleakScannerWrapper.
This is a wrapper around our BleakScanner singleton that allows
multiple integrations to share the same BleakScanner.
"""
return HaBleakScannerWrapper()
@hass_callback
def async_scanner_count(hass: HomeAssistant, connectable: bool = True) -> int:
"""Return the number of scanners currently in use."""
return _get_manager(hass).async_scanner_count(connectable)
@hass_callback
def async_discovered_service_info(
hass: HomeAssistant, connectable: bool = True
) -> Iterable[BluetoothServiceInfoBleak]:
"""Return the discovered devices list."""
if DATA_MANAGER not in hass.data:
return []
return _get_manager(hass).async_discovered_service_info(connectable)
@hass_callback
def async_last_service_info(
hass: HomeAssistant, address: str, connectable: bool = True
) -> BluetoothServiceInfoBleak | None:
"""Return the last service info for an address."""
if DATA_MANAGER not in hass.data:
return None
return _get_manager(hass).async_last_service_info(address, connectable)
@hass_callback
def async_ble_device_from_address(
hass: HomeAssistant, address: str, connectable: bool = True
) -> BLEDevice | None:
"""Return BLEDevice for an address if its present."""
if DATA_MANAGER not in hass.data:
return None
return _get_manager(hass).async_ble_device_from_address(address, connectable)
@hass_callback
def async_address_present(
hass: HomeAssistant, address: str, connectable: bool = True
) -> bool:
"""Check if an address is present in the bluetooth device list."""
if DATA_MANAGER not in hass.data:
return False
return _get_manager(hass).async_address_present(address, connectable)
@hass_callback
def async_register_callback(
hass: HomeAssistant,
callback: BluetoothCallback,
match_dict: BluetoothCallbackMatcher | None,
mode: BluetoothScanningMode,
) -> Callable[[], None]:
"""Register to receive a callback on bluetooth change.
mode is currently not used as we only support active scanning.
Passive scanning will be available in the future. The flag
is required to be present to avoid a future breaking change
when we support passive scanning.
Returns a callback that can be used to cancel the registration.
"""
return _get_manager(hass).async_register_callback(callback, match_dict)
async def async_process_advertisements(
hass: HomeAssistant,
callback: ProcessAdvertisementCallback,
match_dict: BluetoothCallbackMatcher,
mode: BluetoothScanningMode,
timeout: int,
) -> BluetoothServiceInfoBleak:
"""Process advertisements until callback returns true or timeout expires."""
done: Future[BluetoothServiceInfoBleak] = Future()
@hass_callback
def _async_discovered_device(
service_info: BluetoothServiceInfoBleak, change: BluetoothChange
) -> None:
if not done.done() and callback(service_info):
done.set_result(service_info)
unload = _get_manager(hass).async_register_callback(
_async_discovered_device, match_dict
)
try:
async with async_timeout.timeout(timeout):
return await done
finally:
unload()
@hass_callback
def async_track_unavailable(
hass: HomeAssistant,
callback: Callable[[BluetoothServiceInfoBleak], None],
address: str,
connectable: bool = True,
) -> Callable[[], None]:
"""Register to receive a callback when an address is unavailable.
Returns a callback that can be used to cancel the registration.
"""
return _get_manager(hass).async_track_unavailable(callback, address, connectable)
@hass_callback
def async_rediscover_address(hass: HomeAssistant, address: str) -> None:
"""Trigger discovery of devices which have already been seen."""
_get_manager(hass).async_rediscover_address(address)
@hass_callback
def async_register_scanner(
hass: HomeAssistant, scanner: BaseHaScanner, connectable: bool
) -> CALLBACK_TYPE:
"""Register a BleakScanner."""
return _get_manager(hass).async_register_scanner(scanner, connectable)
@hass_callback
def async_get_advertisement_callback(
hass: HomeAssistant,
) -> Callable[[BluetoothServiceInfoBleak], None]:
"""Get the advertisement callback."""
return _get_manager(hass).scanner_adv_received
async def async_get_adapter_from_address(
hass: HomeAssistant, address: str
) -> str | None:
"""Get an adapter by the address."""
return await _get_manager(hass).async_get_adapter_from_address(address)
@hass_callback
def _async_haos_is_new_enough(hass: HomeAssistant) -> bool:
"""Check if the version of Home Assistant Operating System is new enough."""
# Only warn if a USB adapter is plugged in
if not any(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.source != SOURCE_IGNORE
):
return True
if (
not hass.components.hassio.is_hassio()
or not (os_info := hass.components.hassio.get_os_info())
or not (haos_version := os_info.get("version"))
or AwesomeVersion(haos_version) >= RECOMMENDED_MIN_HAOS_VERSION
):
return True
return False
@hass_callback
def _async_check_haos(hass: HomeAssistant) -> None:
"""Create or delete an the haos_outdated issue."""
if _async_haos_is_new_enough(hass):
async_delete_issue(hass, DOMAIN, "haos_outdated")
return
async_create_issue(
hass,
DOMAIN,
"haos_outdated",
is_fixable=False,
severity=IssueSeverity.WARNING,
learn_more_url="/config/updates",
translation_key="haos_outdated",
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the bluetooth integration."""
integration_matcher = IntegrationMatcher(await async_get_bluetooth(hass))
integration_matcher.async_setup()
manager = BluetoothManager(hass, integration_matcher)
await manager.async_setup()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, manager.async_stop)
hass.data[DATA_MANAGER] = models.MANAGER = manager
adapters = await manager.async_get_bluetooth_adapters()
async_migrate_entries(hass, adapters)
await async_discover_adapters(hass, adapters)
async def _async_rediscover_adapters() -> None:
"""Rediscover adapters when a new one may be available."""
discovered_adapters = await manager.async_get_bluetooth_adapters(cached=False)
_LOGGER.debug("Rediscovered adapters: %s", discovered_adapters)
await async_discover_adapters(hass, discovered_adapters)
discovery_debouncer = Debouncer(
hass, _LOGGER, cooldown=5, immediate=False, function=_async_rediscover_adapters
)
def _async_trigger_discovery() -> None:
# There are so many bluetooth adapter models that
# we check the bus whenever a usb device is plugged in
# to see if it is a bluetooth adapter since we can't
# tell if the device is a bluetooth adapter or if its
# actually supported unless we ask DBus if its now
# present.
_LOGGER.debug("Triggering bluetooth usb discovery")
hass.async_create_task(discovery_debouncer.async_call())
cancel = usb.async_register_scan_request_callback(hass, _async_trigger_discovery)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, hass_callback(lambda event: cancel())
)
# Wait to check until after start to make sure
# that the system info is available.
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED,
hass_callback(lambda event: _async_check_haos(hass)),
)
return True
@hass_callback
def async_migrate_entries(
hass: HomeAssistant, adapters: dict[str, AdapterDetails]
) -> None:
"""Migrate config entries to support multiple."""
current_entries = hass.config_entries.async_entries(DOMAIN)
default_adapter = async_default_adapter()
for entry in current_entries:
if entry.unique_id:
continue
address = DEFAULT_ADDRESS
adapter = entry.options.get(CONF_ADAPTER, default_adapter)
if adapter in adapters:
address = adapters[adapter][ADAPTER_ADDRESS]
hass.config_entries.async_update_entry(
entry, title=adapter_unique_name(adapter, address), unique_id=address
)
async def async_discover_adapters(
hass: HomeAssistant,
adapters: dict[str, AdapterDetails],
) -> None:
"""Discover adapters and start flows."""
if platform.system() == "Windows":
# We currently do not have a good way to detect if a bluetooth device is
# available on Windows. We will just assume that it is not unless they
# actively add it.
return
for adapter, details in adapters.items():
discovery_flow.async_create_flow(
hass,
DOMAIN,
context={"source": SOURCE_INTEGRATION_DISCOVERY},
data={CONF_ADAPTER: adapter, CONF_DETAILS: details},
)
async def async_update_device(
hass: HomeAssistant, entry: ConfigEntry, adapter: str
) -> None:
"""Update device registry entry.
The physical adapter can change from hci0/hci1 on reboot
or if the user moves around the usb sticks so we need to
update the device with the new location so they can
figure out where the adapter is.
"""
manager: BluetoothManager = hass.data[DATA_MANAGER]
adapters = await manager.async_get_bluetooth_adapters()
details = adapters[adapter]
registry = dr.async_get(manager.hass)
registry.async_get_or_create(
config_entry_id=entry.entry_id,
name=adapter_human_name(adapter, details[ADAPTER_ADDRESS]),
connections={(dr.CONNECTION_BLUETOOTH, details[ADAPTER_ADDRESS])},
sw_version=details.get(ADAPTER_SW_VERSION),
hw_version=details.get(ADAPTER_HW_VERSION),
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry for a bluetooth scanner."""
address = entry.unique_id
assert address is not None
adapter = await async_get_adapter_from_address(hass, address)
if adapter is None:
raise ConfigEntryNotReady(
f"Bluetooth adapter {adapter} with address {address} not found"
)
passive = entry.options.get(CONF_PASSIVE)
mode = BluetoothScanningMode.PASSIVE if passive else BluetoothScanningMode.ACTIVE
scanner = HaScanner(hass, mode, adapter, address)
try:
scanner.async_setup()
except RuntimeError as err:
raise ConfigEntryNotReady(
f"{adapter_human_name(adapter, address)}: {err}"
) from err
info_callback = async_get_advertisement_callback(hass)
entry.async_on_unload(scanner.async_register_callback(info_callback))
try:
await scanner.async_start()
except ScannerStartError as err:
raise ConfigEntryNotReady from err
entry.async_on_unload(async_register_scanner(hass, scanner, True))
await async_update_device(hass, entry, adapter)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = scanner
entry.async_on_unload(entry.add_update_listener(async_update_listener))
return True
async def async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
scanner: HaScanner = hass.data[DOMAIN].pop(entry.entry_id)
await scanner.async_stop()
return True
| [
"noreply@github.com"
] | fredrike.noreply@github.com |
ad3e0221462b158b16454706a9dd1f8ccf500736 | 7a649b4969eecc48a13924c610409f32502e945f | /workspace_tools/data/support.py | d46c99ee9bc24d6d2c330faaf52eccb1eb69432c | [
"Apache-2.0"
] | permissive | giapdangle/mbed | 3434cfa485220a3997653742e85a020ab6eb488a | 4a6e8aa5f6f6ee6749dbf7ff4dade7501f73c996 | refs/heads/master | 2020-12-29T02:07:13.522820 | 2013-04-11T16:45:02 | 2013-04-11T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | DEFAULT_SUPPORT = {
"LPC1768" : ["ARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"],
"LPC11U24": ["ARM", "uARM"],
"LPC2368" : ["ARM"],
"KL25Z" : ["ARM", "GCC_CW"],
"LPC812" : ["uARM"],
}
CORTEX_ARM_SUPPORT = {
"LPC1768" : ["ARM"],
"LPC11U24": ["ARM", "uARM"],
"KL25Z" : ["ARM"],
"LPC812" : ["uARM"],
} | [
"emilmont@gmail.com"
] | emilmont@gmail.com |
5c1c5c7f5bf537882d403059f0342a0d9cb50424 | b8b26feac86b66b0b534996cf9c3fbf7ec660240 | /aoc/2017/p3-2.py | b3003e4e3d06cb26545a197036c34205d9cd62a1 | [
"MIT"
] | permissive | neizod/problems | 775fffe32166c5b124d0e4c973b8d0aba7f3900b | 180aaf7d0ecfc3d0dd5f1d4345a7a4d83b1b884a | refs/heads/master | 2021-07-08T12:30:31.100320 | 2021-05-26T09:34:19 | 2021-05-26T09:34:19 | 6,245,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | #!/usr/bin/env python3
grid = [[1]]
def iter_ring(size):
x = size - 1
y = size - 2
while y > 0:
yield x, y
y -= 1
while x > 0:
yield x, y
x -= 1
while y < size - 1:
yield x, y
y += 1
while x < size - 1:
yield x, y
x += 1
yield x, y
def iter_surround(x, y, size):
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if (dx, dy) == (0, 0):
continue
if 0 <= x+dx < size and 0 <= y+dy < size:
yield grid[y+dy][x+dx]
def expand(grid):
for line in grid:
line[:0] = ['?']
line[len(line):] = ['?']
size = len(line)
grid[:0] = [['?' for _ in range(size)]]
grid[len(grid):] = [['?' for _ in range(size)]]
size = len(grid)
for x, y in iter_ring(size):
grid[y][x] = sum(v for v in iter_surround(x, y, size) if v is not '?')
yield grid[y][x]
def find_larger(n):
while True:
for m in expand(grid):
if m > n:
return m
if __name__ == '__main__':
print(find_larger(int(input())))
for line in grid:
print(' '.join('{:>6}'.format(x) for x in line))
| [
"neizod@gmail.com"
] | neizod@gmail.com |
93bf28364394d57d3c7f4a0af0218e1624b63385 | 429d5ec5f3d4941391807f2a46582938698f82dc | /doc/Homeworks/Solutions/CodesPart2firstmidterm.py | 536c95a48aeffc6437934bd27ab13e3e29d4e9e0 | [
"CC0-1.0"
] | permissive | mhjensen/Physics321 | b24548bbe69633b4618f39ed0b0cf2eb94f10266 | 91970ed5502de694e4812dc77d886c02701f300e | refs/heads/master | 2023-08-04T06:29:22.148776 | 2023-07-24T20:38:14 | 2023-07-24T20:38:14 | 190,315,277 | 30 | 42 | CC0-1.0 | 2021-07-13T17:34:44 | 2019-06-05T02:52:53 | HTML | UTF-8 | Python | false | false | 1,248 | py | import numpy as np
from math import *
import matplotlib.pyplot as plt
# The acceleration a = F/m with F = -dV/dx
def acceleration(x):
return (-1/m)*((10/(x**2)) + (-2*3/(x**3)) + 1)
def potential_energy(x):
return -10.0/x +3.0/(x**2) + 1.0*x
# initial time
t0 = 0
#final time
tf = 10.0
dt = 0.00001
# set up array for time steps
t = np.arange(t0,tf+dt,dt)
# mass and potential parameters part 2
m = 1.0
V0 = 0.1
d = 0.1
# initial values
v0 = 0.0
x0 = 2.0
x = np.zeros(len(t))
v = np.zeros(len(t))
v[0] = v0
x[0] = x0
# integrate v and x
for i in range(len(t)-1):
a_i = acceleration(x[i])
x[i+1] = x[i] + dt*v[i] + 0.5*a_i*(dt**2)
a_ip1 = acceleration(x[i+1])
v[i+1] = v[i] + 0.5*dt*(a_i + a_ip1)
plt.plot(t,x)
plt.show()
# now use the arrays of x and v to test energy conservation
# define potential, kinetic and total energies
Ekin = np.zeros(len(t))
Epot = np.zeros(len(t))
Etot = np.zeros(len(t))
Ekin[0] = 0.5*v0*v0/m
Epot[0] = potential_energy(x0)
Etot[0] = Ekin[0]+Epot[0]
ekin = epot =0.0
# set up total energy as function of time
for i in range(1,len(t)):
ekin = 0.5*v[i]*v[i]/m
Ekin[i] += ekin
epot = potential_energy(x[i])
Epot[i] += epot
Etot[i] += ekin+epot
plt.plot(t,Etot)
plt.show()
| [
"morten.hjorth-jensen@fys.uio.no"
] | morten.hjorth-jensen@fys.uio.no |
2bf0f812aba10ee36b3e812498abf89e69bdafa1 | 3b4e8cc46c1373d36150ad839d2f6539ea8f92b3 | /qmcpy/accumulate_data/_accumulate_data.py | 9b04e7fbd1ad2b0356f426d5bc5ce79ae29a46f9 | [
"Apache-2.0"
] | permissive | kachiann/QMCSoftware | a244efb085c95924ee80a5aa8b8480ea4a9f8e72 | 0ed9da2f10b9ac0004c993c01392b4c86002954c | refs/heads/master | 2023-01-11T12:01:19.527177 | 2020-10-29T03:49:47 | 2020-10-29T03:49:47 | 313,940,226 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from ..true_measure._true_measure import TrueMeasure
from ..util import ParameterError, MethodImplementationError, _univ_repr, DimensionError
class AccumulateData(object):
""" Accumulated Data abstract class. DO NOT INSTANTIATE. """
def __init__(self):
""" Initialize data instance """
prefix = 'A concrete implementation of AccumulateData must have '
if not hasattr(self,'stopping_criterion'):
raise ParameterError(prefix + 'self.stopping_criterion (a StoppingCriterion)')
if not hasattr(self,'integrand'):
raise ParameterError(prefix + 'self.integrand (an Integrand)')
if not hasattr(self,'measure'):
raise ParameterError(prefix + 'self.measure (a TrueMeasure)')
if not hasattr(self,'distribution'):
raise ParameterError(prefix + 'self.distribution (a DiscreteDistribution)')
if not hasattr(self, 'solution'):
raise ParameterError(prefix + 'self.solution')
if not hasattr(self, 'n_total'):
raise ParameterError(prefix + 'self.n_total (total number of samples)')
if not hasattr(self,'parameters'):
self.parameters = []
def update_data(self):
""" ABSTRACT METHOD to update the accumulated data."""
raise MethodImplementationError(self, 'update_data')
def __repr__(self):
string = "Solution: %-15.4f\n" % (self.solution)
for qmc_obj in [self.integrand, self.distribution, self.measure, self.stopping_criterion]:
if qmc_obj:
string += str(qmc_obj)+'\n'
string += _univ_repr(self, 'AccumulateData', self.parameters + ['time_integrate'])
return string
| [
"agsorokin3@gmail.com"
] | agsorokin3@gmail.com |
9dc8c6f47259ee29a2b53f1fabf5e6d90e53eb36 | ea8a9889534df7323b3d159ff4ba9563191b8eba | /phantomcli/scripts/util.py | 1dfa842bb230f1dcc79c38592d4b01c65ae2acaa | [
"MIT"
] | permissive | the16thpythonist/phantom-cli | 946b7fb69fd4917ba10f46fe2ecbd3884ee49513 | 921588dda66bf84bf79569493f4e4312b59cd56d | refs/heads/master | 2023-01-11T21:22:28.962704 | 2020-01-15T10:07:16 | 2020-01-15T10:07:16 | 171,679,615 | 2 | 2 | null | 2022-12-26T20:47:38 | 2019-02-20T13:37:21 | Python | UTF-8 | Python | false | false | 3,142 | py | # standard library imports
import logging
from collections import defaultdict
# local imports
from phantomcli.network import PhantomSocket
# ##############
# LOGGING CONFIG
# ##############
# This will be the translation table, which will be used to return the appropriate constant for defining the logging
# level based on the string passed through the command line option. We are using a default dict, so we do not have to
# deal with a if statement. In case an invalid string is given, the default dict will just return the constant for
# the debug mode, even though the given string isnt even one of its keys.
kwargs = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR
}
logging_config = defaultdict(lambda: logging.DEBUG, **kwargs)
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
###########################
# HANDLING TRANSFER FORMATS
###########################
# We are defining a default dict here to prevent the long if structure to sort out whether a passed format string is
# valid or not. This way, either a valid string is passed and it works or the default option is used anyways.
_formats = {
'P16': 'P16',
'P16R': 'P16R',
'P10': 'P10',
'P8': 'P8',
'8': 'P8',
'P8R': 'P8R',
'8R': 'P8R',
}
formats = defaultdict(lambda: 'P16', **_formats)
##############################
# HANDLING ACQUISITION MODES #
##############################
# This is a mapping from the strings, the user can pass as identifiers for acquisition modes to the actual constants
# needed to be passed to the according method of the phantom socket object.
_modes = {
'S': PhantomSocket.MODE_STANDARD,
'standard': PhantomSocket.MODE_STANDARD,
'SB': PhantomSocket.MODE_STANDARD_BINNED,
'standard-binned': PhantomSocket.MODE_STANDARD_BINNED,
'HS': PhantomSocket.MODE_HIGH_SPEED,
'high-speed': PhantomSocket.MODE_HIGH_SPEED,
'HSB': PhantomSocket.MODE_HIGH_SPEED_BINNED,
'high-speed-binned': PhantomSocket.MODE_HIGH_SPEED_BINNED
}
# ##################
# COMMAND HELP TEXTS
# ##################
# Many of the commands use the same options, to is makes sense to define the help texts here for them all instead of
# copy pasting them for each of them...
format_help = "The transfer format to be used, when transmitting image data. " \
"The possible options are 'P10', 'P16' and 'P8'. Default is 'P16' with 16 bit per pixel"
log_help = "The level of logging to be displayed in the console output. The options are 'ERROR' for only displaying " \
"error messages, 'INFO' for log messages marking important steps in the program execution or 'DEBUG' " \
"for displaying all log messages. Default is 'ERROR'"
xnetwork_help = "Setting this flag will enable the transmission using the 10G interface. Make sure, that you are " \
"indeed connected using the 10G ethernet interface before setting this flag."
| [
"jonseb1998@gmail.com"
] | jonseb1998@gmail.com |
578c8787ceceb57fd6f1b00ccf5a18c45bce3112 | d62863d049c0206bfa744ca4c9e886030bfce1ab | /core/sw_content/api/urls.py | bb16923771941697e7d045724f76f110488d8588 | [] | no_license | jurgeon018/box | 51738b99e640202936ed72357d3c67d2517e589b | 50b84a0afa73fab85a00eef54194f3c126d15397 | refs/heads/master | 2021-07-17T13:37:08.665292 | 2020-10-15T09:50:33 | 2020-10-15T09:50:33 | 232,013,297 | 0 | 1 | null | 2020-03-27T02:16:44 | 2020-01-06T03:01:34 | Python | UTF-8 | Python | false | false | 159 | py | from django.urls import path, include
from .views import *
urlpatterns = [
path('contents/', contents_list),
path('contents/<code>/', content),
]
| [
"jurgeon018@gmail.com"
] | jurgeon018@gmail.com |
fe51faa3136ef571cd703ca4d4e1d4e76927e009 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /pytools/map_partitioning/bin/logfile_utils/show_trajectory.py | d9f9db720d302afbb88b0a3504799fb9d6148ab7 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | from pyTklib import tklib_log_gridmap
import carmen_maptools
from sys import argv
from pylab import *
def load_logfile(filename):
filein = open(filename, 'r')
C =[]
X = []
Y = []
for line in filein:
c, x, y = line.split()
C.append(float(c))
X.append(float(x))
Y.append(float(y))
filein.close()
return C, X, Y
def show_trajectory(map_file, log_file):
#load the map and plot it
gridmap = tklib_log_gridmap()
gridmap.load_carmen_map(map_file)
themap = gridmap.to_probability_map_carmen()
carmen_maptools.plot_map(themap, gridmap.x_size, gridmap.y_size);
#initialize all this junk
L, X, Y = load_logfile(log_file)
pltypes = ['o','^','<','>','s','d','p','h','x', 'o']
plcolors = ['r','g','b','m','k','y','c','r','g','b']
#plot the trajectory
XHash = {}
YHash = {}
for i in range(len(L)):
try:
XHash[L[i]].append(X[i])
YHash[L[i]].append(Y[i])
except(KeyError):
XHash[L[i]] = []
YHash[L[i]] = []
for key in XHash.keys():
plot(XHash[key], YHash[key], plcolors[int(key)]+pltypes[int(key)]);
plot([X[0]], [Y[0]], 'go');
plot([X[len(X)-1]], [Y[len(Y)-1]], 'ro');
show()
if __name__=="__main__":
if(len(argv)==3):
show_trajectory(argv[1], argv[2])
else:
print "usage:\n\t>>python show_trajectory.py map_file emma_logfile"
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
0725144ab0336ebe177e178f18b16ccb28c97f24 | ea5b878376318675931f21ffda41c5914ad0e382 | /keras/optimizers/optimizer_experimental/rmsprop.py | f0ae4683563940c788925ed6817093352ec74525 | [
"Apache-2.0"
] | permissive | Wajih-O/keras | 44089847c6f284b2c2150da8530c5fe05c2a8bb5 | 9628af85a0a2cb04cf433b1ad991017b70ae2005 | refs/heads/master | 2022-03-03T15:20:18.045765 | 2022-02-19T00:16:17 | 2022-02-19T00:16:55 | 125,854,516 | 0 | 0 | null | 2018-03-19T12:32:07 | 2018-03-19T12:32:07 | null | UTF-8 | Python | false | false | 7,968 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RMSprop optimizer implementation."""
from keras.optimizers.optimizer_experimental import optimizer
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
@generic_utils.register_keras_serializable()
class RMSprop(optimizer.Optimizer):
r"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: Initial value for the learning rate:
either a floating point value,
or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Defaults to 0.001.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
clipnorm: see the `clipnorm` argument of `optimizer_experimental.Optimizer`.
clipvalue: see the `clipvalue` argument of
`optimizer_experimental.Optimizer`.
global_clipnorm: see the `global_clipnorm` argument of
`optimizer_experimental.Optimizer`.
use_ema: see the `use_ema` argument of `optimizer_experimental.Optimizer`.
ema_momentum: see the `ema_momentum` argument of
`optimizer_experimental.Optimizer`.
ema_overwrite_frequency: see the `ema_overwrite_frequency` argument of
`optimizer_experimental.Optimizer`.
jit_compile: see the `jit_compile` argument of
`optimizer_experimental.Optimizer`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"RMSprop"`.
**kwargs: see the `**kwargs` argument of `optimizer_experimental.Optimizer`.
Usage:
>>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> var1.numpy()
9.683772
Reference:
- [Hinton, 2012](
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100,
jit_compile=False,
name='RMSprop',
**kwargs):
super(RMSprop, self).__init__(
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
name=name,
**kwargs)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
super().build(var_list)
if hasattr(self, '_built') and self._built:
return
self._built = True
self._velocities = []
for var in var_list:
self._velocities.append(
self.add_variable_from_reference(var, 'velocity'))
self._momentums = []
if self.momentum > 0:
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(var, 'momentum'))
self._average_gradients = []
if self.centered:
for var in var_list:
self._average_gradients.append(
self.add_variable_from_reference(var, 'average_gradient'))
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
if self._var_key(variable) not in self._index_dict:
raise KeyError(f'Optimizer cannot recognize variable {variable.name}, '
f'this usually means you are calling an optimizer '
f'previously used on a different model. Please try '
f'creating a new optimizer instance.')
lr = tf.cast(self.learning_rate, variable.dtype)
var_key = self._var_key(variable)
velocity = self._velocities[self._index_dict[var_key]]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._index_dict[var_key]]
average_grad = None
if self.centered:
average_grad = self._average_gradients[self._index_dict[var_key]]
rho = self.rho
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
velocity.assign(rho * velocity)
velocity.scatter_add(tf.IndexedSlices(
tf.square(gradient.values) * (1 - rho), gradient.indices))
if self.centered:
average_grad.assign(rho * average_grad)
average_grad.scatter_add(
tf.IndexedSlices(
tf.square(gradient.values) * (1 - rho), gradient.indices))
velocity.assign_add(-tf.square(average_grad))
velocity_value = tf.gather(velocity, gradient.indices)
transformed_grad = tf.IndexedSlices(
gradient.values / (tf.sqrt(velocity_value) + self.epsilon),
gradient.indices)
if self.momentum > 0:
momentum.assign(self.momentum * momentum)
momentum.scatter_add(transformed_grad)
variable.assign_add(-lr * momentum)
else:
variable.scatter_add(
tf.IndexedSlices(-lr * transformed_grad.values,
transformed_grad.indices))
else:
# Dense gradients.
velocity.assign(rho * velocity + (1 - rho) * tf.square(gradient))
if self.centered:
average_grad.assign(rho * average_grad +
(1 - rho) * tf.square(gradient))
velocity.assign_add(-tf.square(average_grad))
transformed_grad = gradient / (tf.sqrt(velocity) + self.epsilon)
if self.momentum > 0:
momentum.assign(self.momentum * momentum + transformed_grad)
variable.assign_add(-lr * momentum)
else:
variable.assign_add(-lr * transformed_grad)
def get_config(self):
config = super(RMSprop, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter(self._learning_rate),
'rho': self.rho,
'momentum': self.momentum,
'epsilon': self.epsilon,
'centered': self.centered,
})
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
862419c52e382090fdc0f8bc88d9d9b50545d941 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /PwGFjiSG3kXzp8rjw_0.py | 2c3f3f4190281dfb4dc976c0f2a808f87fb5bbdb | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | """
Write a function that returns the number of users in a chatroom based on the
following rules:
1. If there is no one, return `"no one online"`.
2. If there is 1 person, return `"user1 online"`.
3. If there are 2 people, return `user1 and user2 online"`.
4. If there are `n>2` people, return the first two names and add `"and n-2 more online"`.
For example, if there are 5 users, return:
"user1, user2 and 3 more online"
### Examples
chatroom_status([]) ➞ "no one online"
chatroom_status(["paRIE_to"]) ➞ "paRIE_to online"
chatroom_status(["s234f", "mailbox2"]) ➞ "s234f and mailbox2 online"
chatroom_status(["pap_ier44", "townieBOY", "panda321", "motor_bike5", "sandwichmaker833", "violinist91"])
➞ "pap_ier44, townieBOY and 4 more online"
### Notes
N/A
"""
def chatroom_status(users):
if len(users)>2:
return ', '.join(u for u in users[:2])+' and '+str(len(users)-2)+' more online'
return ' and '.join([u for u in users])+' online' if users else 'no one online'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7826493709d5a6a51af0f7209ab65ba1793cbebc | dde0d75db42c19390f2625a7888586e4d2a14fd7 | /devel/lib/python2.7/dist-packages/cob_object_detection_msgs/msg/_AcquireObjectImageActionFeedback.py | d03d53c76fce7f6174de8350f9d81778cbd1ce33 | [] | no_license | dhemp09/uml-robotics | 16460efe8195a3f9a6a8296047f4fd4d9df0de80 | 862132e00e221b0a86bc283e7568efa984be673f | refs/heads/master | 2020-03-26T09:44:04.033762 | 2018-08-15T18:11:18 | 2018-08-15T18:11:18 | 144,762,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | /home/dan/dan_ws/devel/.private/cob_object_detection_msgs/lib/python2.7/dist-packages/cob_object_detection_msgs/msg/_AcquireObjectImageActionFeedback.py | [
"dhemp09@gmail.com"
] | dhemp09@gmail.com |
56701fd811a38660046b136f603979a0cef5719a | 964f2882117ff656d7a2757c233c6dd88226d975 | /services/catalog/src/simcore_service_catalog/core/application.py | 9738689b9d5b75e0d9567c716f0419f4c9c74665 | [
"MIT"
] | permissive | ignapas/osparc-simcore | a002dd47d7689af9c1c650eea33e31add2b182c1 | cb62e56b194265a907f260f3071c55a65f569823 | refs/heads/master | 2023-01-22T08:55:32.580775 | 2022-12-09T15:57:36 | 2022-12-09T15:57:36 | 170,852,656 | 0 | 0 | MIT | 2023-01-09T05:03:04 | 2019-02-15T11:12:34 | Python | UTF-8 | Python | false | false | 3,437 | py | import logging
import time
from typing import Callable, Optional
from fastapi import FastAPI, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.gzip import GZipMiddleware
from servicelib.fastapi.openapi import override_fastapi_openapi_method
from starlette import status
from starlette.exceptions import HTTPException
from starlette.middleware.base import BaseHTTPMiddleware
from ..api.errors.http_error import (
http_error_handler,
make_http_error_handler_for_exception,
)
from ..api.errors.validation_error import http422_error_handler
from ..api.root import router as api_router
from ..api.routes.health import router as health_router
from ..meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY
from ..services.function_services import setup_function_services
from .events import (
create_start_app_handler,
create_stop_app_handler,
on_shutdown,
on_startup,
)
from .settings import AppSettings, BootModeEnum
logger = logging.getLogger(__name__)
def init_app(settings: Optional[AppSettings] = None) -> FastAPI:
if settings is None:
settings = AppSettings.create_from_envs()
assert settings # nosec
logging.basicConfig(level=settings.CATALOG_LOG_LEVEL.value)
logging.root.setLevel(settings.CATALOG_LOG_LEVEL.value)
logger.debug(settings.json(indent=2))
app = FastAPI(
debug=settings.SC_BOOT_MODE
in [BootModeEnum.DEBUG, BootModeEnum.DEVELOPMENT, BootModeEnum.LOCAL],
title=PROJECT_NAME,
description=SUMMARY,
version=API_VERSION,
openapi_url=f"/api/{API_VTAG}/openapi.json",
docs_url="/dev/doc",
redoc_url=None, # default disabled
)
override_fastapi_openapi_method(app)
app.state.settings = settings
setup_function_services(app)
# events
app.add_event_handler("startup", on_startup)
app.add_event_handler("startup", create_start_app_handler(app))
app.add_event_handler("shutdown", on_shutdown)
app.add_event_handler("shutdown", create_stop_app_handler(app))
# exception handlers
app.add_exception_handler(HTTPException, http_error_handler)
app.add_exception_handler(RequestValidationError, http422_error_handler)
# SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy
app.add_exception_handler(
NotImplementedError,
make_http_error_handler_for_exception(
status.HTTP_501_NOT_IMPLEMENTED, NotImplementedError
),
)
app.add_exception_handler(
Exception,
make_http_error_handler_for_exception(
status.HTTP_500_INTERNAL_SERVER_ERROR, Exception
),
)
# Routing
# healthcheck at / and at /v0/
app.include_router(health_router)
# api under /v*
app.include_router(api_router, prefix=f"/{API_VTAG}")
# middleware to time requests (ONLY for development)
if settings.SC_BOOT_MODE != BootModeEnum.PRODUCTION:
async def _add_process_time_header(request: Request, call_next: Callable):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
app.add_middleware(BaseHTTPMiddleware, dispatch=_add_process_time_header)
# gzip middleware
app.add_middleware(GZipMiddleware)
return app
| [
"noreply@github.com"
] | ignapas.noreply@github.com |
b413251bf658c5caebaf53a11df9e546d7b74c8d | fb6037de54380ef9776fa18b099df03129cef27b | /config.py | 0f52e7589bdb34f27ca16a8992c3e2cb5ee0941f | [] | no_license | webclinic017/newmainbucketssurver | 1385dffe0ea573bb9cb81a4eeb5ddd341aabe88c | 71f86ec7d52b7d68960ecd2fed6b11713b11622e | refs/heads/main | 2023-07-08T10:03:02.915075 | 2021-08-05T13:25:13 | 2021-08-05T13:25:13 | 395,801,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from flask_restful import Api
from flask import Flask
from flask_cors import CORS
from flask_mail import Mail
from dotenv import load_dotenv
from cryptography.fernet import Fernet
from flask_jwt_extended import JWTManager
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from pymongo import MongoClient
import os
load_dotenv()
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['MAIL_SERVER'] = os.environ.get('MAIL_SERVER')
app.config['MAIL_PORT'] = os.environ.get('MAIL_PORT')
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = False
app.config['APCA_API_KEY_ID'] = os.environ.get('APCA_API_KEY_ID')
app.config['APCA_API_SECRET_KEY'] = os.environ.get('APCA_API_SECRET_KEY')
app.config['APCA_API_BASE_URL'] = os.environ.get('APCA_API_BASE_URL')
jobstores = {
'default': MongoDBJobStore(database='buckets', collection='scheduled_jobs', client=MongoClient(os.environ.get("DATABASE_URL")))
}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5)
}
fernet = Fernet(os.environ.get('FERNET_ENCRYPTION_KEY'))
CORS(app)
scheduler = BackgroundScheduler(jobstores=jobstores, job_defaults=job_defaults, executors=executors)
scheduler.start()
api = Api(app)
jwt = JWTManager(app)
mail = Mail(app) | [
"noreply@github.com"
] | webclinic017.noreply@github.com |
7f54b92aa08f3953e791158a06be7ed846bbe676 | 2029785d79244b601c978deb2617e88cc658dc9e | /config.py | f2cfc36ddde563db0c6ade78c57a0b1148b0b1fc | [] | no_license | JalexDooo/Pytorch_Learning | 025bcf422c5fb39b03a2a6521fc69502d899c37e | 622cddff30359763270fffa0b52dca79b02164bb | refs/heads/master | 2020-04-03T02:06:36.248827 | 2018-11-01T03:33:45 | 2018-11-01T03:33:45 | 154,947,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import torch as t
import warnings
class DefaultConfig(object):
"""docstring for DefaultConfig"""
env = 'default' # visdom
vis_port = 8097
model = 'AlexNet'
train_data_root = './data/train/'
test_data_root = './data/test'
load_model_path = None
batch_size = 32
use_gpu = True
num_workers = 4 # how many workers for loading data
print_freq = 20 # print every N batch
debug_file = './tmp/debug'
result_file = './result/result.csv'
max_epoch = 10
lr = 0.1
lr_decay = 0.95
weight_decay = 1e-4
device = t.device('cuda') if use_gpu else t.device('cpu')
def _parse(self, kwargs):
"""
update config
"""
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribute %s" %k)
setattr(self, k, v)
# opt.device = t.device('cuda') if opt.use_gpu else t.device('cpu')
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k, getattr(self, k))
opt = DefaultConfig()
"""
opt = DefaultConfig()
new_config = {
'batch_size':20,
'use_gpu':False,
}
opt._parse(new_config)
print(opt.batch_size)
print(opt.use_gpu)
""" | [
"393351322@qq.com"
] | 393351322@qq.com |
9da4f23ce929496941a6e018e60fc0d53ce7f602 | d4f9a423353fe79cf8824a8407690655fc1379fe | /django/virtualenv/django/lib/python2.7/site-packages/ansible-2.2.0-py2.7.egg/ansible/modules/extras/system/modprobe.py | 94c1a70437b97af6e6cb0e52a67fbc5c237d1106 | [] | no_license | 007root/python | 9ab62d433d17c8bb57622fd1d24a3b17cb3d13ad | 16bf729e5824555eab0c9de61ce6b8b055551bd1 | refs/heads/master | 2020-06-23T09:43:05.308328 | 2020-06-09T08:31:20 | 2020-06-09T08:31:20 | 74,656,519 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: modprobe
short_description: Add or remove kernel modules
requirements: []
version_added: 1.4
author:
- "David Stygstra (@stygstra)"
- "Julien Dauphant"
- "Matt Jeffery"
description:
- Add or remove kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present or absent.
params:
required: false
default: ""
version_added: "1.6"
description:
- Modules parameters.
'''
EXAMPLES = '''
# Add the 802.1q module
- modprobe: name=8021q state=present
# Add the dummy module
- modprobe: name=dummy state=present params="numdummies=2"
'''
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
import shlex
def main():
module = AnsibleModule(
argument_spec={
'name': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'params': {'default': ''},
},
supports_check_mode=True,
)
args = {
'changed': False,
'failed': False,
'name': module.params['name'],
'state': module.params['state'],
'params': module.params['params'],
}
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = args['name'].replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError:
e = get_exception()
module.fail_json(msg=str(e), **args)
# Check only; don't modify
if module.check_mode:
if args['state'] == 'present' and not present:
changed = True
elif args['state'] == 'absent' and present:
changed = True
else:
changed = False
module.exit_json(changed=changed)
# Add/remove module as needed
if args['state'] == 'present':
if not present:
command = [module.get_bin_path('modprobe', True), args['name']]
command.extend(shlex.split(args['params']))
rc, _, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
elif args['state'] == 'absent':
if present:
rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
module.exit_json(**args)
main()
| [
"wangzhishuai@gstianfu.com"
] | wangzhishuai@gstianfu.com |
680104933550f68f797b59cca2f9483dcc428cf3 | 7d8e040cb703e6f6e2d55b5dc64fc9124d85dde8 | /skl2onnx/algebra/onnx_ops.py | b7e0a2b62582c6dce562bb0d6f48ac7c95de2feb | [
"MIT"
] | permissive | Global-localhost/sklearn-onnx | fc44aa481a91482f187cfd2307df6061b77742af | a8267e7ba946d8b0596951060e5dca39fec47439 | refs/heads/master | 2023-03-23T00:19:31.474251 | 2021-03-03T19:17:12 | 2021-03-03T19:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,316 | py | """
Place holder for all ONNX operators.
"""
import sys
import numpy as np
import onnx
from .automation import get_rst_doc
def ClassFactory(class_name, op_name, inputs, outputs,
input_range, output_range,
domain, attr_names, doc,
deprecated, since_version,
past_version):
from .onnx_operator import OnnxOperator
def __init__(self, *args, **kwargs):
op_version = kwargs.pop('op_version', None)
if isinstance(op_version, dict):
op_version = op_version.get(domain, None)
if op_version is None:
if len(args) == 0 and input_range[0] == input_range[1]:
args = [_[0] for _ in self.__class__.expected_inputs]
if not (input_range[0] <= len(args) <= input_range[1]):
raise RuntimeError("Unexpected number of inputs, "
"got {}, expecting {} for operator "
"'{}'.".format(
len(args), len(inputs), op_name))
attr_names = self.attr_names
if '_' in self.__class__.__name__:
op_version_class = int(self.__class__.__name__.split('_')[-1])
if op_version is None:
op_version = op_version_class
try:
op_version = min(op_version, op_version_class)
except TypeError:
raise TypeError(
"Could not compare versions {} ? {} for "
"class '{}' since_version {}. Parameter 'op_version' "
"is probably missing when the class "
"is instantiated.".format(
op_version, op_version_class, class_name,
since_version))
else:
op_version_class = None
# By default, the op_version is None.
# None means the latest available.
if op_version is None:
op_version = since_version
found = None
if op_version is not None:
# attr_names refers to the most recent version of
# this operator. We may need an older one.
for op in range(op_version, 0, -1):
name = '{}_{}'.format(self.__class__.__name__, op)
if name in self.past_version:
found = (name, op)
attr_names = self.past_version[name].attr_names
break
if (op_version_class is not None and found is not None and
found[-1] != op_version_class):
raise RuntimeError(
"op_version={} does not refer to the same opset as the class "
"name ('{}').".format(op_version, self.__class__.__name__))
for key in kwargs:
if key in {'output_names', 'op_version', 'domain', 'ir_version'}:
continue
if key not in attr_names:
raise TypeError("Argument '%s' not valid for '%s'"
% (key, op_name))
if op_version is not None:
kwargs['op_version'] = op_version
OnnxOperator.__init__(self, *args, **kwargs)
newclass = type(class_name, (OnnxOperator,),
{"__init__": __init__, '__doc__': doc,
'expected_inputs': inputs,
'expected_outputs': outputs,
'operator_name': op_name,
'input_range': input_range,
'output_range': output_range,
'domain': domain,
'is_deprecated': deprecated,
'since_version': since_version,
'past_version': past_version,
'attr_names': attr_names})
return newclass
def dynamic_class_creation():
"""
Automatically generates classes for each of the operators
module *onnx* defines and described at
`Operators
<https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
and `Operators
<https://github.com/onnx/onnx/blob/master/docs/
Operators-ml.md>`_.
"""
res = {}
for schema in onnx.defs.get_all_schemas_with_history():
if schema.support_level == schema.SupportType.EXPERIMENTAL:
# Skips experimental operators.
continue
# Multiple version can coexist. The last one is kept.
if schema.name in res:
if schema.since_version > res[schema.name].since_version:
# We keep the most recent one.
res[schema.name] = schema
else:
res[schema.name] = schema
res[schema.name + '_' + str(schema.since_version)] = schema
cls = {}
def _c(obj, label, i):
name = '%s%d' % (obj.name or label, i)
tys = obj.typeStr or ''
return (name, tys)
for name in sorted(res):
schema = res[name]
doc = get_rst_doc(schema)
inputs = [_c(o, 'I', i) for i, o in enumerate(schema.inputs)]
outputs = [_c(o, 'O', i) for i, o in enumerate(schema.outputs)]
args = [p for p in schema.attributes]
if '_' in name:
class_name = "Onnx" + name
else:
class_name = "Onnx" + schema.name
cl = ClassFactory(class_name, schema.name, inputs, outputs,
[schema.min_input, schema.max_input],
[schema.min_output, schema.max_output],
schema.domain, args,
"**Version**" + doc.split('**Version**')[-1],
getattr(schema, 'deprecated', False),
schema.since_version, {})
cls[class_name] = cl
# Retrieves past classes.
for name in cls:
if '_' not in name:
continue
main, version = name.split('_')
last = cls[main]
last.past_version[name] = cls[name]
return cls
def _update_module():
"""
Dynamically updates the module with operators defined
by *ONNX*.
"""
res = dynamic_class_creation()
this = sys.modules[__name__]
for k, v in res.items():
setattr(this, k, v)
_update_module()
def OnnxReduceSumApi11(*x, axes=None, keepdims=1, op_version=None,
output_names=None):
"""
Adds operator ReduceSum with opset>=13 following API from opset 12.
"""
if op_version is None:
raise RuntimeError("op_version must be specified.")
if op_version is None or op_version >= 13:
if axes is None:
return OnnxReduceSum( # noqa
*x, keepdims=keepdims, op_version=op_version,
output_names=output_names)
return OnnxReduceSum( # noqa
*x, np.array(axes, dtype=np.int64),
keepdims=keepdims, op_version=op_version,
output_names=output_names)
if op_version >= 11:
if axes is None:
return OnnxReduceSum_11( # noqa
*x, keepdims=keepdims,
op_version=op_version, output_names=output_names)
return OnnxReduceSum_11( # noqa
*x, axes=axes, keepdims=keepdims,
op_version=op_version, output_names=output_names)
if axes is None:
return OnnxReduceSum_1(*x, keepdims=keepdims, # noqa
op_version=op_version,
output_names=output_names)
return OnnxReduceSum_1(*x, axes=axes, keepdims=keepdims, # noqa
op_version=op_version, output_names=output_names)
def OnnxSplitApi11(*x, axis=0, split=None, op_version=None,
output_names=None):
"""
Adds operator Split with opset>=13 following API from opset 11.
"""
if op_version is None:
raise RuntimeError("op_version must be specified.")
if op_version is None or op_version >= 13:
if split is None:
return OnnxSplit( # noqa
*x, axis=axis, op_version=op_version,
output_names=output_names)
return OnnxSplit( # noqa
*x, np.array(split, dtype=np.int64), axis=axis,
op_version=op_version, output_names=output_names)
if op_version >= 11:
if split is None:
return OnnxSplit_11( # noqa
*x, axis=axis, op_version=op_version,
output_names=output_names)
return OnnxSplit_11( # noqa
*x, split=split, axis=axis, op_version=op_version,
output_names=output_names)
if split is None:
return OnnxSplit_2( # noqa
*x, axis=axis, op_version=op_version, output_names=output_names)
return OnnxSplit_2(*x, split=split, axis=axis, # noqa
op_version=op_version, output_names=output_names)
def OnnxSqueezeApi11(*x, axes=None, op_version=None,
output_names=None):
"""
Adds operator Squeeze with opset>=13 following API from opset 11.
"""
if op_version is None:
raise RuntimeError("op_version must be specified.")
if op_version is None or op_version >= 13:
return OnnxSqueeze( # noqa
*x, np.array(axes, dtype=np.int64),
op_version=op_version, output_names=output_names)
if op_version >= 11:
return OnnxSqueeze_11( # noqa
*x, axes=axes, op_version=op_version,
output_names=output_names)
return OnnxSqueeze_1(*x, axes=axes, # noqa
op_version=op_version, output_names=output_names)
def OnnxUnsqueezeApi11(*x, axes=None, op_version=None,
output_names=None):
"""
Adds operator Unsqueeze with opset>=13 following API from opset 11.
"""
if op_version is None:
raise RuntimeError("op_version must be specified.")
if op_version is None or op_version >= 13:
return OnnxUnsqueeze( # noqa
*x, np.array(axes, dtype=np.int64),
op_version=op_version, output_names=output_names)
if op_version >= 11:
return OnnxUnsqueeze_11( # noqa
*x, axes=axes, op_version=op_version,
output_names=output_names)
return OnnxUnsqueeze_1(*x, axes=axes, # noqa
op_version=op_version, output_names=output_names)
| [
"noreply@github.com"
] | Global-localhost.noreply@github.com |
c6e6f419fb3b519d47880163dc872c60998281b1 | 845058c3434ff43c5f9bd48df13818bef74f04e3 | /tyler/cs301/spring19/materials/code/lec-08/sec3/battleship.py | d2e064aef38d1dee1842b8a4e1b5a7e87db4a51a | [] | no_license | tylerharter/caraza-harter-com | ad7d7f76a382dfd1d4ff4c05922ea57425d1be2b | 70b983a28d94d744b92c9f00dfb8ec6ca20e080d | refs/heads/master | 2023-08-18T23:16:21.588376 | 2023-08-09T17:03:18 | 2023-08-09T17:03:18 | 142,339,324 | 19 | 114 | null | 2023-02-21T18:28:00 | 2018-07-25T18:35:07 | HTML | UTF-8 | Python | false | false | 685 | py | def draw_map(x, y, character):
width = 10
height = 10
print(('.' * width + "\n") * y, end="")
print("." * x + character + "." * (width - (x + 1)))
print(('.' * width + "\n") * (height - (y + 1)))
def ship1_hit(x, y):
ship1_x = 5
ship1_y = 4
return (x == ship1_x and y == ship1_y)
def ship2_hit(x, y):
ship2_x = 8
ship2_y = 8
return (x == ship2_x and y == ship2_y)
def is_hit(x, y):
return ship1_hit(x, y) or ship2_hit(x, y)
def guess():
x = int(input("x: "))
y = int(input("y: "))
hit = is_hit(x, y)
print("Hit? " + str(hit))
symbol = str(int(hit))
# draw the map
draw_map(x, y, symbol)
guess()
| [
"tylerharter@gmail.com"
] | tylerharter@gmail.com |
2b00016707f4dff6df93aa5b50870a5ba7828138 | a47f76fafa48d5bc8888659f855f518b5369e47a | /Week2/Mathematics/Learn/nthTerm(GP).py | ac1ecc9079d6441dd278a38f76df3b825d515421 | [] | no_license | psycho-pomp/DSA-Workshop | 49fce46c3abd7f6a08f065cf61c217a429d4193c | d71bc6d796bb099b85bfc735ca0f1af2a3ac69fc | refs/heads/main | 2023-04-28T22:09:21.466000 | 2021-05-06T18:55:06 | 2021-05-06T18:55:06 | 316,889,663 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | # Abhishek Anand
# Geometric Progression
a,r,n=map(int,input().split())
nthTerm=a*r**(n-1)
print(nthTerm)
| [
"noreply@github.com"
] | psycho-pomp.noreply@github.com |
170b36e7ee43850c846de900bb40777b8a51c861 | c1eec99e798d71878b341cb016c4b1be193d5a68 | /tests/test_inplayserviceresources.py | 0ae060ae2704306a437f141d4dc57bad9881653c | [
"MIT"
] | permissive | KelvinVail/betfairlightweight | 7f7dc14ae80dc1778f8819d3990a5fe2f4d0703b | 3bcad61b2319e40c02fd41cd5179838e53e995ad | refs/heads/master | 2021-01-15T20:03:19.918425 | 2017-08-09T17:55:32 | 2017-08-09T17:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from __future__ import print_function
import unittest
import datetime
from betfairlightweight import resources
from tests.tools import create_mock_json
class InPlayServiceTest(unittest.TestCase):
def test_scores(self):
mock_response = create_mock_json('tests/resources/scores.json')
resource = resources.Scores(**mock_response.json())
assert isinstance(resource, resources.Scores)
assert resource.event_type_id == 1
def test_event_timeline(self):
mock_response = create_mock_json('tests/resources/eventtimeline.json')
resource = resources.EventTimeline(**mock_response.json())
assert isinstance(resource, resources.EventTimeline)
assert resource.event_type_id == 1
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
720f9ff24ca6f6b8bfcb316886af1ead65756ab9 | a2d13658503b9b921e27994152ab6adb554725bc | /store/migrations/0065_auto_20210205_1244.py | 4faf2433a9c2ce5bd22224da671c9cd04cea01fd | [] | no_license | avishkakavindu/sushi-chef-django | 40a1d7916d7f8c37ba1290cb717af517d2bce265 | 4c112d806720d903877822baaa26159c32704901 | refs/heads/master | 2023-03-18T11:12:41.721554 | 2021-03-11T08:22:52 | 2021-03-11T08:22:52 | 303,053,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | # Generated by Django 3.1.5 on 2021-02-05 07:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0064_auto_20210205_1227'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='valid_to',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 15, 12, 44, 48, 516859)),
),
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('payhere', 'payhere'), ('cashondelivery', 'cashondelivery')], max_length=50),
),
]
| [
"avishkakavindud@gmail.com"
] | avishkakavindud@gmail.com |
7254d4913dabf70b27f3b77a544a4b9cbf1d9990 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/programming/libs/indilib/actions.py | 6bc591a0b0b40e7bbf3c3b128c1d8577fa1bfce1 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import cmaketools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libindi0-%s" % get.srcVERSION()
def setup():
cmaketools.configure()
def build():
cmaketools.make()
def install():
cmaketools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING.LIB", "NEWS", "README*", "TODO")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
a20349f17c7be0c3eb620b4d2bc9924a7c4380e4 | ac9c04d564d781eab3a5a0e2d7fce8377047a6d5 | /obsoper/test/test_coordinates.py | 510fa864ce079dfd46ddaaecde821ccc965c12dc | [
"BSD-3-Clause"
] | permissive | met-office-ocean/obsoper | ee57fb4bc0f5f06e9126bbb161223aca4d535e45 | 15030dedc3cbdeb67407b940b4f923b054520fc3 | refs/heads/master | 2021-01-11T01:36:07.516059 | 2019-02-01T12:15:54 | 2019-02-01T12:15:54 | 70,684,510 | 0 | 0 | null | 2017-02-06T16:40:37 | 2016-10-12T09:28:36 | Python | UTF-8 | Python | false | false | 1,393 | py | # pylint: disable=missing-docstring, invalid-name
import unittest
import numpy as np
import obsoper
class TestCartesian(unittest.TestCase):
def test_cartesian_given_lists_returns_arrays(self):
x, y, z = obsoper.cartesian([], [])
self.assertIsInstance(x, np.ndarray)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(z, np.ndarray)
def test_cartesian_given_empty_arrays_returns_empty_arrays(self):
result = obsoper.cartesian([], [])
expect = [], [], []
self.assertCoordinatesEqual(expect, result)
def test_cartesian_given_greenwich_equator_returns_unit_x(self):
self.check_cartesian(longitudes=[0], latitudes=[0],
x=[1], y=[0], z=[0])
def test_cartesian_given_north_pole_returns_unit_z(self):
self.check_cartesian(longitudes=[0], latitudes=[90],
x=[0], y=[0], z=[1])
def check_cartesian(self, longitudes, latitudes, x, y, z):
result = obsoper.cartesian(longitudes, latitudes)
expect = x, y, z
self.assertCoordinatesEqual(expect, result)
@staticmethod
def assertCoordinatesEqual(expect, result):
np.testing.assert_array_almost_equal(expect[0], result[0])
np.testing.assert_array_almost_equal(expect[1], result[1])
np.testing.assert_array_almost_equal(expect[2], result[2])
| [
"andrew.ryan@metoffice.gov.uk"
] | andrew.ryan@metoffice.gov.uk |
cf7d79495cf0d5ed570d0a3b691e1abfc9c4f004 | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/quopri.py | f0fe0bb0b515f03975ed67085bd7b4e0fae36cb1 | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,890 | py | <<<<<<< HEAD
<<<<<<< HEAD
#! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
=======
#! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"Weldon@athletech.org"
] | Weldon@athletech.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.