hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ab6b7c8d71f0cda06a229e8ccc03d12604ac015
| 11,871
|
py
|
Python
|
pybind/slxos/v16r_1_00b/rbridge_id/threshold_monitor/security/policy/area/alert/below/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/rbridge_id/threshold_monitor/security/policy/area/alert/below/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/rbridge_id/threshold_monitor/security/policy/area/alert/below/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class below(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/threshold-monitor/security/policy/area/alert/below. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__sec_below_highthresh_action','__sec_below_lowthresh_action',)
_yang_name = 'below'
_rest_name = 'below'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__sec_below_highthresh_action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-highthresh-action", rest_name="highthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'highthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
self.__sec_below_lowthresh_action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-lowthresh-action", rest_name="lowthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'lowthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'threshold-monitor', u'security', u'policy', u'area', u'alert', u'below']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'threshold-monitor', u'security', u'policy', u'area', u'alert', u'below']
def _get_sec_below_highthresh_action(self):
"""
Getter method for sec_below_highthresh_action, mapped from YANG variable /rbridge_id/threshold_monitor/security/policy/area/alert/below/sec_below_highthresh_action (supported-actions)
"""
return self.__sec_below_highthresh_action
def _set_sec_below_highthresh_action(self, v, load=False):
"""
Setter method for sec_below_highthresh_action, mapped from YANG variable /rbridge_id/threshold_monitor/security/policy/area/alert/below/sec_below_highthresh_action (supported-actions)
If this variable is read-only (config: false) in the
source YANG file, then _set_sec_below_highthresh_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sec_below_highthresh_action() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-highthresh-action", rest_name="highthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'highthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sec_below_highthresh_action must be of a type compatible with supported-actions""",
'defined-type': "brocade-threshold-monitor:supported-actions",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-highthresh-action", rest_name="highthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'highthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)""",
})
self.__sec_below_highthresh_action = t
if hasattr(self, '_set'):
self._set()
def _unset_sec_below_highthresh_action(self):
self.__sec_below_highthresh_action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-highthresh-action", rest_name="highthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'highthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
def _get_sec_below_lowthresh_action(self):
"""
Getter method for sec_below_lowthresh_action, mapped from YANG variable /rbridge_id/threshold_monitor/security/policy/area/alert/below/sec_below_lowthresh_action (supported-actions)
"""
return self.__sec_below_lowthresh_action
def _set_sec_below_lowthresh_action(self, v, load=False):
"""
Setter method for sec_below_lowthresh_action, mapped from YANG variable /rbridge_id/threshold_monitor/security/policy/area/alert/below/sec_below_lowthresh_action (supported-actions)
If this variable is read-only (config: false) in the
source YANG file, then _set_sec_below_lowthresh_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sec_below_lowthresh_action() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-lowthresh-action", rest_name="lowthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'lowthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sec_below_lowthresh_action must be of a type compatible with supported-actions""",
'defined-type': "brocade-threshold-monitor:supported-actions",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-lowthresh-action", rest_name="lowthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'lowthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)""",
})
self.__sec_below_lowthresh_action = t
if hasattr(self, '_set'):
self._set()
def _unset_sec_below_lowthresh_action(self):
self.__sec_below_lowthresh_action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loginfo': {'value': 4}, u'none': {'value': 0}, u'all': {'value': 1}, u'raslog': {'value': 3}, u'email': {'value': 2}},), is_leaf=True, yang_name="sec-below-lowthresh-action", rest_name="lowthresh-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'lowthresh-action'}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='supported-actions', is_config=True)
sec_below_highthresh_action = __builtin__.property(_get_sec_below_highthresh_action, _set_sec_below_highthresh_action)
sec_below_lowthresh_action = __builtin__.property(_get_sec_below_lowthresh_action, _set_sec_below_lowthresh_action)
_pyangbind_elements = {'sec_below_highthresh_action': sec_below_highthresh_action, 'sec_below_lowthresh_action': sec_below_lowthresh_action, }
| 75.132911
| 737
| 0.699351
|
b6850b7a6dc846495c5f3d3cfa18563765e5fe7f
| 835
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/indicator/_title.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/validators/indicator/_title.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/validators/indicator/_title.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="indicator", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the title. It
defaults to `center` except for bullet charts
for which it defaults to right.
font
Set the font used to display the title
text
Sets the title of this indicator.
""",
),
**kwargs
)
| 33.4
| 79
| 0.564072
|
ebdb1b16311b68661316bd07d89b2bc3e6fb97ab
| 17,048
|
py
|
Python
|
scripts/parallel.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
scripts/parallel.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
scripts/parallel.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright (C) 2014 Ivan Gregor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Provides convenient functions to run functions and command line commands in parallel.
"""
import os
import sys
import time
import multiprocessing as mp
import subprocess
import tempfile
class TaskThread:
def __init__(self, fun, args):
"""
Defines one function and its arguments to be executed in one thread.
@param fun: a function to be executed
@type fun: function
@param args: arguments of the function
@type args: tuple
"""
self.fun = fun
self.args = args
class TaskCmd:
def __init__(self, cmd, cwd='.', stdin=None, stdout=None, stderr=None):
"""
Defines one task to be executed as a command line command.
@param cmd: command to be executed on a command line
@param cwd: current working directory in which the task will be executed
@param stdin: process standard input
@param stdout: process standard output
@param stderr: process standard err
"""
self.cmd = cmd
self.cwd = cwd
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
class AsyncParallel:
pool = None
max_processes = 1
task_handler_list = {}
def __init__(self, max_processes=mp.cpu_count()):
"""
Execute several functions (threads, processes) in parallel until return values called.
@param max_processes: maximum number of tasks that will be run in parallel at the same time
"""
assert isinstance(max_processes, int)
# prevent overwrite of previous settings
if AsyncParallel.pool is not None:
return
AsyncParallel.pool = mp.Pool(processes=max_processes)
AsyncParallel.max_processes = max_processes
@staticmethod
def add_tasks(thread_task_list, identifier=None):
"""
Execute several functions (threads, processes) in parallel.
@type thread_task_list: list of TaskThread
@return: a list of respective return values
"""
assert isinstance(thread_task_list, list)
if identifier is None:
identifier = len(AsyncParallel.task_handler_list)
# creates a pool of workers, add all tasks to the pool
if AsyncParallel.pool is None:
AsyncParallel.pool = mp.Pool(processes=AsyncParallel.max_processes)
if identifier not in AsyncParallel.task_handler_list:
AsyncParallel.task_handler_list[identifier] = []
for task in thread_task_list:
assert isinstance(task, TaskThread)
AsyncParallel.task_handler_list[identifier].append(AsyncParallel.pool.apply_async(task.fun, task.args))
return identifier
@staticmethod
def add_cmd_tasks(cmd_task_list, identifier=None, stdin_error_lock=mp.Manager().Lock()):
"""
Run several command line commands in parallel.
@attention: use the Manager to get the lock as in this function definition !!!
@type cmd_task_list: list of TaskCmd
@param stdin_error_lock: acquiring the lock enables writing to the stdout and stderr
@return: list of failed commands, dictionary (cmd, task process)
"""
assert isinstance(cmd_task_list, list)
thread_task_list = []
for cmdTask in cmd_task_list:
assert isinstance(cmdTask, TaskCmd)
thread_task_list.append(TaskThread(_runCmd, (cmdTask, stdin_error_lock)))
return AsyncParallel.add_tasks(thread_task_list, identifier)
@staticmethod
def wait(identifier):
for taskHandler in AsyncParallel.task_handler_list[identifier]:
taskHandler.wait()
@staticmethod
def get_results():
if AsyncParallel.pool is None:
return None
# finish all tasks
AsyncParallel.pool.close()
AsyncParallel.pool.join()
# retrieve the return values
return_value_list = []
for identifier in AsyncParallel.task_handler_list:
for taskHandler in AsyncParallel.task_handler_list[identifier]:
taskHandler.wait()
# assert taskHandler.successful()
return_value_list.append(taskHandler.get())
# reset pool
AsyncParallel.pool = None
AsyncParallel.task_handler_list = {}
# return return_value_list
fail_list = []
for return_value in return_value_list:
if not isinstance(return_value, list):
continue
process, task = return_value
if process.returncode is None:
continue
if process.returncode != 0:
fail_list.append(dict(process=process, task=task))
if len(fail_list) > 0:
return fail_list
else:
return None
def runThreadParallel(threadTaskList, maxThreads=mp.cpu_count()):
"""
Execute several functions (threads, processes) in parallel.
@type threadTaskList: list of TaskThread
@param maxThreads: maximum number of tasks that will be run in parallel at the same time
@return: a list of respective return values
"""
assert isinstance(threadTaskList, list)
assert isinstance(maxThreads, int)
# creates a pool of workers, add all tasks to the pool
pool = mp.Pool(processes=maxThreads)
taskHandlerList = []
for task in threadTaskList:
assert isinstance(task, TaskThread)
taskHandlerList.append(pool.apply_async(task.fun, task.args))
# finish all tasks
pool.close()
pool.join()
# retrieve the return values
retValList = []
for taskHandler in taskHandlerList:
taskHandler.wait()
# assert taskHandler.successful()
try:
retValList.append(taskHandler.get())
except Exception:
pass # add to list?
return retValList
def _runCmd(taskCmd, stdInErrLock=None):
"""
Executes a command line task.
@type taskCmd: TaskCmd
@param stdInErrLock: acquiring the lock enables writing to the stdout and stderr (if not None)
@type stdInErrLock: multiprocessing.Lock
@return: a tuple (process, TaskCmd)
"""
# setting up stdin and stdout (to buffer the output)
if taskCmd.stdout is None and stdInErrLock is not None:
stdout = tempfile.TemporaryFile(mode='a+')
stdoutP = stdout
else:
stdout = None
stdoutP = taskCmd.stdout
if taskCmd.stderr is None and stdInErrLock is not None:
stderr = tempfile.TemporaryFile(mode='a+')
stderrP = stderr
else:
stderr = None
stderrP = taskCmd.stderr
# running the command line task
try:
process = subprocess.Popen(
taskCmd.cmd, shell=True, bufsize=-1, cwd=taskCmd.cwd,
stdin=taskCmd.stdin, stdout=stdoutP, stderr=stderrP)
process.wait()
finally:
# exclusive writing to the stdin or stderr (empty the buffers containing stdin or stdout of the run)
if stdout is not None or stderr is not None:
stdInErrLock.acquire()
if stdout is not None:
stdout.flush()
stdout.seek(0)
sys.stdout.write(stdout.read())
sys.stdout.flush()
stdout.close()
if stderr is not None:
stderr.flush()
stderr.seek(0)
sys.stderr.write(stderr.read())
sys.stderr.flush()
stderr.close()
stdInErrLock.release()
return (process, taskCmd)
def runCmdParallel(cmdTaskList, maxProc=mp.cpu_count(), stdInErrLock=mp.Manager().Lock()):
"""
Run several command line commands in parallel.
@attention: use the Manager to get the lock as in this function definition !!!
@param cmdTaskList: list of command line tasks
@type cmdTaskList: list of TaskCmd
@param maxProc: maximum number of tasks that will be run in parallel at the same time
@param stdInErrLock: acquiring the lock enables writing to the stdout and stderr
@return: list of failed commands, dictionary (cmd, task process)
"""
assert isinstance(cmdTaskList, list)
assert isinstance(maxProc, int)
threadTaskList = []
for cmdTask in cmdTaskList:
assert isinstance(cmdTask, TaskCmd)
threadTaskList.append(TaskThread(_runCmd, (cmdTask, stdInErrLock)))
returnValueList = runThreadParallel(threadTaskList, maxProc)
failList = []
for process, task in returnValueList:
if process.returncode != 0:
failList.append(dict(process=process, task=task))
if len(failList) > 0:
return failList
else:
return None
def runCmdSerial(cmdTaskList, verbose=False, stopWhenError=True, stdInErrLock=None):
"""
Run several command line commands one by one.
@attention: Use the Manager to get the lock (mp.Manager().Lock()) if the lock shared among multiple processes!
@param cmdTaskList: list of command line tasks
@type cmdTaskList: list of TaskCmd
@param stdInErrLock: acquiring the lock enables writing to the stdout and stderr
@type stdInErrLock: multiprocessing.Lock()
"""
assert isinstance(cmdTaskList, list)
counter = 0
failList = []
for task in cmdTaskList:
counter += 1
if verbose:
msg = 'Starting "#%s" cmd: %s\n' % (counter, task.cmd)
if stdInErrLock is not None:
stdInErrLock.acquire()
sys.stdout.write(msg)
sys.stdout.flush()
if stdInErrLock is not None:
stdInErrLock.release()
# run command
process, taskCmd = _runCmd(task, stdInErrLock)
if process.returncode != 0:
failList.append(dict(process=process, task=task))
if stopWhenError:
break
if len(failList) > 0:
return failList
else:
return None
def reportFailedCmd(failList):
"""
Report on failed commands.
"""
if failList is not None:
assert isinstance(failList, list)
msgList = []
for task in failList:
assert isinstance(task, dict)
msg = 'Task failed with return code: %s, task: %s\n' % (task['process'].returncode, task['task'].cmd)
msgList.append(msg)
sys.stderr.write(msg)
sys.stderr.flush()
return msgList
else:
return None
# Deprecated implementation!
class _ProcHandlerCmd:
"""
Stores context of one process.
@deprecated: there is a newer implementation !!!
"""
def __init__(self, task, timeout=None):
"""
@type task: TaskCmd
"""
self.process = subprocess.Popen('exec ' + task.cmd, shell=True, bufsize=-1, cwd=task.cwd)
self.cmd = task.cmd
self.runtime = 0.
self.timeout = timeout
def incRuntime(self, timeStep):
self.runtime += timeStep
def isTimeOut(self):
if self.timeout is not None and self.runtime > self.timeout:
return True
return False
def getPid(self):
return self.process.pid
def runCmdParallel0(cmdTaskList, maxProc=mp.cpu_count(), timeout=None, timeStep=0.3):
"""
Run several command line commands in parallel.
@deprecated: there is a newer implementation !!!
@see: runCmdParallel
@warning: use just for testing purposes when making use of the timeout option
@param cmdTaskList: list of command line tasks
@type cmdTaskList: list of TaskCmd
@param maxProc: maximum number of tasks that will be run in parallel at the same time
@param timeout: after this number of seconds, the process will be killed, (None if no timeout set)
@param timeStep: time interval in which processes will be actively checked whether they are running
@return: list of failed commands, tuple (command, return code, runtime, pid)
"""
counter = 0
failList = []
cmdInGen = True
cmdGen = iter(cmdTaskList) # generator of commands
procArray = {}
for i in range(maxProc):
procArray[i] = None
# loop until all processes finish (or are killed)
while True:
# run commands
if cmdInGen:
for i in range(maxProc):
if procArray[i] is None:
try:
task = cmdGen.next()
procArray[i] = _ProcHandlerCmd(task, timeout) # run process
counter += 1
print('Running "%s" cmd: %s' % (counter, task.cmd))
except StopIteration:
cmdInGen = False # there are no processes to be run
# sleep for a while
time.sleep(timeStep)
# check for finished processes and processes passed timeout
for i in range(maxProc):
ph = procArray[i]
if ph is not None: # there is a process in the slot
if ph.process.poll() is None:
# process running
ph.incRuntime(timeStep)
if ph.isTimeOut():
# process over time, kill it!
ph.process.kill()
print("Process (%s): %s killed! (after %ss)" % (ph.getPid(), ph.cmd, ph.runtime))
failList.append((ph.cmd, 9, ph.runtime, ph.getPid()))
procArray[i] = None # free slot
else:
# process finished
ph.process.wait()
if ph.process.returncode != 0:
print('Process(%s): "%s" ended with return code: "%s' % (
ph.getPid(), ph.cmd, ph.process.returncode))
failList.append((ph.cmd, ph.process.returncode, ph.runtime, ph.getPid()))
procArray[i] = None # free slot
# finish if no process is running and there is not process to be run
if len(set(procArray.values())) == 1 and not cmdInGen:
break
return failList
# TESTING !!!
def _testCmd(parallel=True):
print('Start: Test: runCmdParallel')
inDir = '/Users/ivan/Documents/nobackup/hsim01/562/a'
outDir = '/Users/ivan/Documents/nobackup/hsim01/562/b'
MUSCLE_BINARY = '/Users/ivan/Documents/work/tools/muscle/muscle3.8.31_i86darwin64'
assert os.path.isfile(MUSCLE_BINARY), 'Binnary file does not exist: %s' % MUSCLE_BINARY
cmdListA = []
for fileName in os.listdir(inDir):
cmd = '%s -in %s -out %s' % (MUSCLE_BINARY, os.path.join(inDir, fileName), os.path.join(outDir, fileName))
# print cmd
cmdListA.append(TaskCmd(cmd, outDir))
# break
if parallel:
failList = runCmdParallel(cmdListA)
else:
lock = mp.Lock()
failList = runCmdSerial(cmdListA, stdInErrLock=lock)
reportFailedCmd(failList)
print('Stop: Test: runCmdParallel')
def _f(a, t, n, c):
for i in range(n):
print(a)
cc = 333
for j in range(int(10000000*t)):
c /= cc
return t, c
def _testThread():
print('Start: Test: RunThreadParallel')
r = runThreadParallel([
TaskThread(_f, ('thread 1', 0.5, 5, 48749394857384234987)),
TaskThread(_f, ('thread 2', 0.7, 6, 57395769304867332349)),
TaskThread(_f, ('thread 3', 0.8, 7, 87263485768798234987)),
TaskThread(_f, ('thread 4', 0.9, 8, 38573947573957684485)),
TaskThread(_f, ('thread 5', 0.9, 8, 38573947573957684485)),
TaskThread(_f, ('thread 6', 1.0, 8, 38573947573957684485)),
TaskThread(_f, ('thread 7', 1.1, 8, 38573947573957684485))])
print(r)
print('Stop: Test: RunThreadParallel')
def _testMisc():
cmd = 'echo "a; echo "b" >&2'
lock = mp.Manager().Lock()
# print runCmdSerial([TaskCmd(cmd)], stdInErrLock=lock, verbose=True)
t = TaskThread(_runCmd, (TaskCmd(cmd), lock))
print(runThreadParallel([t]))
# if __name__ == "__main__":
# pass
# _testThread()
# _testCmd()
# _testMisc()
| 33.559055
| 118
| 0.612916
|
7fc8fd0d61e0c8cd7eaf0226f6444297d301c2fd
| 1,125
|
py
|
Python
|
KLSelection.py
|
KTloong/MyCookbook_Python
|
80c428fe9fb42c5e8328c666bc027adda61e100a
|
[
"MIT"
] | null | null | null |
KLSelection.py
|
KTloong/MyCookbook_Python
|
80c428fe9fb42c5e8328c666bc027adda61e100a
|
[
"MIT"
] | 1
|
2018-07-15T05:57:43.000Z
|
2018-07-15T05:57:43.000Z
|
KLSelection.py
|
KTloong/MyCookbook_Python
|
80c428fe9fb42c5e8328c666bc027adda61e100a
|
[
"MIT"
] | 3
|
2018-07-15T05:51:31.000Z
|
2018-07-15T08:21:03.000Z
|
import os
from tkinter import filedialog
class KLSelection:
#pop up a window to select a folder
def Sfolder(self):
dirname = filedialog.askdirectory(initialdir=os.getcwd(),title='Please select a folder')
return dirname #type of dirname is str
#pop up a window to select a file
#return a list, [fullname, filename]
def Sfile(self):
rawname = filedialog.askopenfilename(title="Please choose a file").split()#divided by space
fullname = rawname[0]+"/"+rawname[1]
filename = rawname[1]
del rawname
return fullname, filename
#get the filename list in a folder
def Gflist(self,fpath):
flist = os.listdir(fpath)
print(flist)
return flist #flist is a list of all the filename in the folder
#list concatenate fullname
def Catfullname(self,folder,flist):
flistfull = []
for name in flist:
flistfull.append(folder+"/"+name)
return flistfull
KLS = KLSelection()
dirfolder = KLS.Sfolder()
flist = KLS.Gflist(dirfolder)
flist_full = KLS.Catfullname(dirfolder,flist)
print(flist_full)
| 32.142857
| 99
| 0.667556
|
387b57dfe1901f1a52ddd5fa2bae31509c738539
| 917
|
bzl
|
Python
|
third_party/nasm/workspace.bzl
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
third_party/nasm/workspace.bzl
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
third_party/nasm/workspace.bzl
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
"""loads the nasm library, used by TF."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "nasm",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
"http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2",
"http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
],
sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
strip_prefix = "nasm-2.13.03",
build_file = "//third_party/nasm:BUILD.bazel",
system_build_file = "//third_party/nasm:BUILD.system",
)
| 50.944444
| 238
| 0.688113
|
33ed4b4eb6e60554e4b9307e1507bb21a37d62f8
| 4,786
|
py
|
Python
|
nova/objects/instance_fault.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 1
|
2015-11-30T19:44:00.000Z
|
2015-11-30T19:44:00.000Z
|
nova/objects/instance_fault.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/objects/instance_fault.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 7
|
2015-01-20T10:30:08.000Z
|
2020-02-05T10:29:05.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_log import log as logging
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceFault(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added create()
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'code': fields.IntegerField(),
'message': fields.StringField(nullable=True),
'details': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, fault, db_fault):
# NOTE(danms): These are identical right now
for key in fault.fields:
fault[key] = db_fault[key]
fault._context = context
fault.obj_reset_changes()
return fault
@base.remotable_classmethod
def get_latest_for_instance(cls, context, instance_uuid):
db_faults = db.instance_fault_get_by_instance_uuids(context,
[instance_uuid])
if instance_uuid in db_faults and db_faults[instance_uuid]:
return cls._from_db_object(context, cls(),
db_faults[instance_uuid][0])
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
values = {
'instance_uuid': self.instance_uuid,
'code': self.code,
'message': self.message,
'details': self.details,
'host': self.host,
}
db_fault = db.instance_fault_create(self._context, values)
self._from_db_object(self._context, self, db_fault)
self.obj_reset_changes()
# Cells should only try sending a message over to nova-cells
# if cells is enabled and we're not the API cell. Otherwise,
# if the API cell is calling this, we could end up with
# infinite recursion.
if cells_opts.get_cell_type() == 'compute':
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
self._context, db_fault)
except Exception:
LOG.exception(_LE("Failed to notify cells of instance fault"))
@base.NovaObjectRegistry.register
class InstanceFaultList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceFault <= version 1.1
# Version 1.1: InstanceFault version 1.2
# Version 1.2: Added get_latest_by_instance_uuids() method
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('InstanceFault'),
}
@base.remotable_classmethod
def get_latest_by_instance_uuids(cls, context, instance_uuids):
db_faultdict = db.instance_fault_get_by_instance_uuids(context,
instance_uuids,
latest=True)
db_faultlist = itertools.chain(*db_faultdict.values())
return base.obj_make_list(context, cls(context), objects.InstanceFault,
db_faultlist)
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, instance_uuids):
db_faultdict = db.instance_fault_get_by_instance_uuids(context,
instance_uuids)
db_faultlist = itertools.chain(*db_faultdict.values())
return base.obj_make_list(context, cls(context), objects.InstanceFault,
db_faultlist)
| 39.229508
| 79
| 0.629127
|
78181c6e41647c09649d9191662e862ccc9bc77e
| 514
|
py
|
Python
|
docs/core/benchmarks/timer.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 3
|
2020-04-02T06:23:44.000Z
|
2020-08-13T20:32:31.000Z
|
docs/core/benchmarks/timer.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 1
|
2022-03-04T17:40:22.000Z
|
2022-03-04T17:40:22.000Z
|
docs/core/benchmarks/timer.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 1
|
2020-04-02T06:26:10.000Z
|
2020-04-02T06:26:10.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Helper stuff for benchmarks.
"""
import gc
gc.disable()
print 'Disabled GC'
def timeit(func, iter = 1000, *args, **kwargs):
"""
timeit(func, iter = 1000 *args, **kwargs) -> elapsed time
calls func iter times with args and kwargs, returns time elapsed
"""
from time import time as currentTime
r = range(iter)
t = currentTime()
for i in r:
func(*args, **kwargs)
return currentTime() - t
| 20.56
| 68
| 0.634241
|
e30314ce5c4ff22a445a6cdbad4515348391a05d
| 3,596
|
py
|
Python
|
pidtree_bcc/utils.py
|
Yelp/pidtree-bcc
|
ea54752b873687a159091d509df3a4f9dcc4a994
|
[
"BSD-3-Clause"
] | 20
|
2019-11-11T20:03:31.000Z
|
2022-01-26T05:53:34.000Z
|
pidtree_bcc/utils.py
|
Yelp/pidtree-bcc
|
ea54752b873687a159091d509df3a4f9dcc4a994
|
[
"BSD-3-Clause"
] | 29
|
2019-10-21T21:38:33.000Z
|
2022-03-09T14:44:41.000Z
|
pidtree_bcc/utils.py
|
Yelp/pidtree-bcc
|
ea54752b873687a159091d509df3a4f9dcc4a994
|
[
"BSD-3-Clause"
] | 4
|
2019-10-18T22:30:28.000Z
|
2021-09-10T02:36:04.000Z
|
import functools
import importlib
import inspect
import logging
import os
import socket
import struct
import sys
from typing import Callable
from typing import List
from typing import TextIO
from typing import Type
from typing import Union
import psutil
def crawl_process_tree(pid: int) -> List[dict]:
""" Takes a process and returns all process ancestry until the ppid is 0
:param int pid: child process ID
:return: yields dicts with pid, cmdline and username navigating up the tree
"""
result = []
while True:
if pid == 0:
break
proc = psutil.Process(pid)
result.append(
{
'pid': proc.pid,
'cmdline': ' '.join(proc.cmdline()),
'username': proc.username(),
},
)
pid = proc.ppid()
return result
def smart_open(filename: str = None, mode: str = 'r') -> TextIO:
""" File OR stdout open
:param str filename: filename
:param str mode: file opening mode
:return: file handle object
"""
if filename and filename != '-':
return open(filename, mode)
else:
return sys.stdout
def find_subclass(module_path: Union[str, List[str]], base_class: Type) -> Type:
""" Get child class from module
:param Union[str, List[str]] module_path: module path or list of paths in dot-notation
:param Type base_class: class the child class inherits from
:return: imported child class
:raise ImportError: module path not valid
:raise StopIteration: no class found
"""
if isinstance(module_path, str):
module_path = [module_path]
errors = ''
module = None
for path in module_path:
try:
module = importlib.import_module(path)
break
except ImportError as e:
errors += '\n' + str(e)
if module is None:
raise ImportError(
'Unable to load any module from {}: {}'
.format(module_path, errors),
)
return next(
obj for _, obj in inspect.getmembers(module)
if inspect.isclass(obj)
and issubclass(obj, base_class)
and obj != base_class
)
def ip_to_int(network: str) -> int:
""" Takes an IP and returns the unsigned integer encoding of the address
:param str network: ip address
:return: unsigned integer encoding
"""
return struct.unpack('=L', socket.inet_aton(network))[0]
def int_to_ip(encoded_ip: int) -> str:
""" Takes IP in interger representation and makes it human readable
:param int encoded_ip: integer encoded IP
:return: dot-notation IP
"""
return socket.inet_ntoa(struct.pack('<L', encoded_ip))
def never_crash(func: Callable) -> Callable:
""" Decorator for Thread targets which ensures the thread keeps
running by chatching any exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except Exception as e:
logging.error('Error executing {}: {}'.format(func.__name__, e))
return wrapper
def get_network_namespace(pid: int = None) -> int:
""" Get network namespace identifier
:param int pid: process ID (if not provided selects calling process)
:return: network namespace inum
"""
if not pid:
pid = 'self'
try:
ns_link = str(os.readlink('/proc/{}/ns/net'.format(pid)))
# format will be "net:[<inum>]"
return int(ns_link.strip()[5:-1])
except Exception:
return None
| 27.450382
| 90
| 0.619855
|
410755537c75587be76d71185a1b07e41b283385
| 1,023
|
py
|
Python
|
.venv/Scripts/imgmaker-script.py
|
jefferdo/gpt-3-client
|
7acbc5f518fe3fcb55d0bdcbf93fc87b103b1148
|
[
"MIT"
] | null | null | null |
.venv/Scripts/imgmaker-script.py
|
jefferdo/gpt-3-client
|
7acbc5f518fe3fcb55d0bdcbf93fc87b103b1148
|
[
"MIT"
] | null | null | null |
.venv/Scripts/imgmaker-script.py
|
jefferdo/gpt-3-client
|
7acbc5f518fe3fcb55d0bdcbf93fc87b103b1148
|
[
"MIT"
] | null | null | null |
#!"d:\users\jeewaka fernando\documents\github\gpt-3-client\.venv\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'imgmaker==0.2','console_scripts','imgmaker'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'imgmaker==0.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('imgmaker==0.2', 'console_scripts', 'imgmaker')())
| 30.088235
| 84
| 0.706745
|
08f98231fdadff83e46eef63d804abda684028cb
| 4,354
|
py
|
Python
|
experiment/build/gcb_build.py
|
chenju2k6/fuzzbench-1
|
be6d1f2590337e81ede7057ba2028d8b658f9a14
|
[
"Apache-2.0"
] | null | null | null |
experiment/build/gcb_build.py
|
chenju2k6/fuzzbench-1
|
be6d1f2590337e81ede7057ba2028d8b658f9a14
|
[
"Apache-2.0"
] | null | null | null |
experiment/build/gcb_build.py
|
chenju2k6/fuzzbench-1
|
be6d1f2590337e81ede7057ba2028d8b658f9a14
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for building things on Google Cloud Build for use in trials."""
import tempfile
from typing import Dict
from common import logs
from common import new_process
from common import utils
from common import yaml_utils
from experiment.build import build_utils
from experiment.build import docker_images
from experiment.build import generate_cloudbuild
BUILDER_STEP_IDS = [
'build-fuzzer-builder',
'build-fuzzer-benchmark-builder',
'build-fuzzer-benchmark-builder-intermediate',
]
CONFIG_DIR = 'config'
# Maximum time to wait for a GCB config to finish build.
GCB_BUILD_TIMEOUT = 4 * 60 * 60 # 4 hours.
# High cpu and memory configuration, matches OSS-Fuzz.
GCB_MACHINE_TYPE = 'n1-highcpu-32'
logger = logs.Logger('builder') # pylint: disable=invalid-name
def _get_buildable_images(fuzzer=None, benchmark=None):
return docker_images.get_images_to_build([fuzzer], [benchmark])
def build_base_images():
"""Build base images on GCB."""
buildable_images = _get_buildable_images()
image_templates = {
image: buildable_images[image] for image in ['base-image', 'worker']
}
config = generate_cloudbuild.create_cloudbuild_spec(image_templates,
build_base_images=True)
_build(config, 'base-images')
def build_coverage(benchmark):
"""Build coverage image for benchmark on GCB."""
buildable_images = _get_buildable_images(benchmark=benchmark)
image_templates = {
image_name: image_specs
for image_name, image_specs in buildable_images.items()
if (image_name == (benchmark + '-project-builder') or
image_specs['type'] == 'coverage')
}
config = generate_cloudbuild.create_cloudbuild_spec(image_templates,
benchmark=benchmark)
config_name = 'benchmark-{benchmark}-coverage'.format(benchmark=benchmark)
_build(config, config_name)
def _build(
config: Dict,
config_name: str,
timeout_seconds: int = GCB_BUILD_TIMEOUT) -> new_process.ProcessResult:
"""Submit build to GCB."""
with tempfile.NamedTemporaryFile() as config_file:
yaml_utils.write(config_file.name, config)
logger.debug('Using build configuration: %s' % config)
config_arg = '--config=%s' % config_file.name
machine_type_arg = '--machine-type=%s' % GCB_MACHINE_TYPE
# Use "s" suffix to denote seconds.
timeout_arg = '--timeout=%ds' % timeout_seconds
command = [
'gcloud',
'builds',
'submit',
str(utils.ROOT_DIR),
config_arg,
timeout_arg,
machine_type_arg,
]
# Don't write to stdout to make concurrent building faster. Otherwise
# writing becomes the bottleneck.
result = new_process.execute(command,
write_to_stdout=False,
kill_children=True,
timeout=timeout_seconds)
build_utils.store_build_logs(config_name, result)
return result
def build_fuzzer_benchmark(fuzzer: str, benchmark: str):
"""Builds |benchmark| for |fuzzer|."""
image_templates = {}
buildable_images = _get_buildable_images(fuzzer=fuzzer, benchmark=benchmark)
for image_name, image_specs in buildable_images.items():
if image_specs['type'] in ('base', 'coverage', 'dispatcher'):
continue
image_templates[image_name] = image_specs
config = generate_cloudbuild.create_cloudbuild_spec(image_templates)
config_name = 'benchmark-{benchmark}-fuzzer-{fuzzer}'.format(
benchmark=benchmark, fuzzer=fuzzer)
_build(config, config_name)
| 35.983471
| 80
| 0.673174
|
90de1d9fce7117ac822405a1a70d50672b31f0c8
| 25,668
|
py
|
Python
|
src/_pytest/capture.py
|
lewisbelcher/pytest
|
c928550e96ade8993170ad33b52bcae8768d154a
|
[
"MIT"
] | null | null | null |
src/_pytest/capture.py
|
lewisbelcher/pytest
|
c928550e96ade8993170ad33b52bcae8768d154a
|
[
"MIT"
] | null | null | null |
src/_pytest/capture.py
|
lewisbelcher/pytest
|
c928550e96ade8993170ad33b52bcae8768d154a
|
[
"MIT"
] | null | null | null |
"""
per-test stdout/stderr capturing mechanism.
"""
import collections
import contextlib
import io
import os
import sys
from io import UnsupportedOperation
from tempfile import TemporaryFile
from typing import Generator
from typing import Optional
import pytest
from _pytest.compat import CaptureAndPassthroughIO
from _pytest.compat import CaptureIO
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.fixtures import FixtureRequest
if TYPE_CHECKING:
from typing_extensions import Literal
_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"]
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--capture",
action="store",
default="fd",
metavar="method",
choices=["fd", "sys", "no", "tee-sys"],
help="per-test capturing method: one of fd|sys|no|tee-sys.",
)
group._addoption(
"-s",
action="store_const",
const="no",
dest="capture",
help="shortcut for --capture=no.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_load_initial_conftests(early_config: Config):
ns = early_config.known_args_namespace
if ns.capture == "fd":
_py36_windowsconsoleio_workaround(sys.stdout)
_colorama_workaround()
_readline_workaround()
pluginmanager = early_config.pluginmanager
capman = CaptureManager(ns.capture)
pluginmanager.register(capman, "capturemanager")
# make sure that capturemanager is properly reset at final shutdown
early_config.add_cleanup(capman.stop_global_capturing)
# finally trigger conftest loading but while capturing (issue93)
capman.start_global_capturing()
outcome = yield
capman.suspend_global_capture()
if outcome.excinfo is not None:
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def _get_multicapture(method: "_CaptureMethod") -> "MultiCapture":
if method == "fd":
return MultiCapture(out=True, err=True, Capture=FDCapture)
elif method == "sys":
return MultiCapture(out=True, err=True, Capture=SysCapture)
elif method == "no":
return MultiCapture(out=False, err=False, in_=False)
elif method == "tee-sys":
return MultiCapture(out=True, err=True, in_=False, Capture=TeeSysCapture)
raise ValueError("unknown capturing method: {!r}".format(method))
class CaptureManager:
"""
Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
test phase (setup, call, teardown). After each of those points, the captured output is obtained and
attached to the collection/runtest report.
There are two levels of capture:
* global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
during collection and each test phase.
* fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
case special handling is needed to ensure the fixtures take precedence over the global capture.
"""
def __init__(self, method: "_CaptureMethod") -> None:
self._method = method
self._global_capturing = None
self._capture_fixture = None # type: Optional[CaptureFixture]
def __repr__(self):
return "<CaptureManager _method={!r} _global_capturing={!r} _capture_fixture={!r}>".format(
self._method, self._global_capturing, self._capture_fixture
)
def is_capturing(self):
if self.is_globally_capturing():
return "global"
if self._capture_fixture:
return "fixture %s" % self._capture_fixture.request.fixturename
return False
# Global capturing control
def is_globally_capturing(self):
return self._method != "no"
def start_global_capturing(self):
assert self._global_capturing is None
self._global_capturing = _get_multicapture(self._method)
self._global_capturing.start_capturing()
def stop_global_capturing(self):
if self._global_capturing is not None:
self._global_capturing.pop_outerr_to_orig()
self._global_capturing.stop_capturing()
self._global_capturing = None
def resume_global_capture(self):
# During teardown of the python process, and on rare occasions, capture
# attributes can be `None` while trying to resume global capture.
if self._global_capturing is not None:
self._global_capturing.resume_capturing()
def suspend_global_capture(self, in_=False):
cap = getattr(self, "_global_capturing", None)
if cap is not None:
cap.suspend_capturing(in_=in_)
def suspend(self, in_=False):
# Need to undo local capsys-et-al if it exists before disabling global capture.
self.suspend_fixture()
self.suspend_global_capture(in_)
def resume(self):
self.resume_global_capture()
self.resume_fixture()
def read_global_capture(self):
return self._global_capturing.readouterr()
# Fixture Control (it's just forwarding, think about removing this later)
@contextlib.contextmanager
def _capturing_for_request(
self, request: FixtureRequest
) -> Generator["CaptureFixture", None, None]:
"""
Context manager that creates a ``CaptureFixture`` instance for the
given ``request``, ensuring there is only a single one being requested
at the same time.
This is used as a helper with ``capsys``, ``capfd`` etc.
"""
if self._capture_fixture:
other_name = next(
k
for k, v in map_fixname_class.items()
if v is self._capture_fixture.captureclass
)
raise request.raiseerror(
"cannot use {} and {} at the same time".format(
request.fixturename, other_name
)
)
capture_class = map_fixname_class[request.fixturename]
self._capture_fixture = CaptureFixture(capture_class, request)
self.activate_fixture()
yield self._capture_fixture
self._capture_fixture.close()
self._capture_fixture = None
def activate_fixture(self):
"""If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
the global capture.
"""
if self._capture_fixture:
self._capture_fixture._start()
def deactivate_fixture(self):
"""Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
if self._capture_fixture:
self._capture_fixture.close()
def suspend_fixture(self):
if self._capture_fixture:
self._capture_fixture._suspend()
def resume_fixture(self):
if self._capture_fixture:
self._capture_fixture._resume()
# Helper context managers
@contextlib.contextmanager
def global_and_fixture_disabled(self):
"""Context manager to temporarily disable global and current fixture capturing."""
self.suspend()
try:
yield
finally:
self.resume()
@contextlib.contextmanager
def item_capture(self, when, item):
self.resume_global_capture()
self.activate_fixture()
try:
yield
finally:
self.deactivate_fixture()
self.suspend_global_capture(in_=False)
out, err = self.read_global_capture()
item.add_report_section(when, "stdout", out)
item.add_report_section(when, "stderr", err)
# Hooks
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector):
if isinstance(collector, pytest.File):
self.resume_global_capture()
outcome = yield
self.suspend_global_capture()
out, err = self.read_global_capture()
rep = outcome.get_result()
if out:
rep.sections.append(("Captured stdout", out))
if err:
rep.sections.append(("Captured stderr", err))
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self.item_capture("setup", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self.item_capture("call", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self.item_capture("teardown", item):
yield
@pytest.hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(self, excinfo):
self.stop_global_capturing()
@pytest.hookimpl(tryfirst=True)
def pytest_internalerror(self, excinfo):
self.stop_global_capturing()
@pytest.fixture
def capsys(request):
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsys.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
with capman._capturing_for_request(request) as fixture:
yield fixture
@pytest.fixture
def capsysbinary(request):
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsysbinary.readouterr()``
method calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``bytes`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
with capman._capturing_for_request(request) as fixture:
yield fixture
@pytest.fixture
def capfd(request):
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
with capman._capturing_for_request(request) as fixture:
yield fixture
@pytest.fixture
def capfdbinary(request):
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``byte`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
with capman._capturing_for_request(request) as fixture:
yield fixture
class CaptureFixture:
"""
Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
fixtures.
"""
def __init__(self, captureclass, request):
self.captureclass = captureclass
self.request = request
self._capture = None
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
def _start(self):
if self._capture is None:
self._capture = MultiCapture(
out=True, err=True, in_=False, Capture=self.captureclass
)
self._capture.start_capturing()
def close(self):
if self._capture is not None:
out, err = self._capture.pop_outerr_to_orig()
self._captured_out += out
self._captured_err += err
self._capture.stop_capturing()
self._capture = None
def readouterr(self):
"""Read and return the captured output so far, resetting the internal buffer.
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
"""
captured_out, captured_err = self._captured_out, self._captured_err
if self._capture is not None:
out, err = self._capture.readouterr()
captured_out += out
captured_err += err
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
return CaptureResult(captured_out, captured_err)
def _suspend(self):
"""Suspends this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.suspend_capturing()
def _resume(self):
"""Resumes this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.resume_capturing()
@contextlib.contextmanager
def disabled(self):
"""Temporarily disables capture while inside the 'with' block."""
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
yield
class EncodedFile(io.TextIOWrapper):
__slots__ = ()
@property
def name(self) -> str:
# Ensure that file.name is a string. Workaround for a Python bug
# fixed in >=3.7.4: https://bugs.python.org/issue36015
return repr(self.buffer)
@property
def mode(self) -> str:
# TextIOWrapper doesn't expose a mode, but at least some of our
# tests check it.
return self.buffer.mode.replace("b", "")
CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
class MultiCapture:
out = err = in_ = None
_state = None
_in_suspended = False
def __init__(self, out=True, err=True, in_=True, Capture=None):
if in_:
self.in_ = Capture(0)
if out:
self.out = Capture(1)
if err:
self.err = Capture(2)
def __repr__(self):
return "<MultiCapture out={!r} err={!r} in_={!r} _state={!r} _in_suspended={!r}>".format(
self.out, self.err, self.in_, self._state, self._in_suspended,
)
def start_capturing(self):
self._state = "started"
if self.in_:
self.in_.start()
if self.out:
self.out.start()
if self.err:
self.err.start()
def pop_outerr_to_orig(self):
""" pop current snapshot out/err capture and flush to orig streams. """
out, err = self.readouterr()
if out:
self.out.writeorg(out)
if err:
self.err.writeorg(err)
return out, err
def suspend_capturing(self, in_=False):
self._state = "suspended"
if self.out:
self.out.suspend()
if self.err:
self.err.suspend()
if in_ and self.in_:
self.in_.suspend()
self._in_suspended = True
def resume_capturing(self):
self._state = "resumed"
if self.out:
self.out.resume()
if self.err:
self.err.resume()
if self._in_suspended:
self.in_.resume()
self._in_suspended = False
def stop_capturing(self):
""" stop capturing and reset capturing streams """
if self._state == "stopped":
raise ValueError("was already stopped")
self._state = "stopped"
if self.out:
self.out.done()
if self.err:
self.err.done()
if self.in_:
self.in_.done()
def readouterr(self) -> CaptureResult:
if self.out:
out = self.out.snap()
else:
out = ""
if self.err:
err = self.err.snap()
else:
err = ""
return CaptureResult(out, err)
class NoCapture:
EMPTY_BUFFER = None
__init__ = start = done = suspend = resume = lambda *args: None
class FDCaptureBinary:
"""Capture IO to/from a given os-level filedescriptor.
snap() produces `bytes`
"""
EMPTY_BUFFER = b""
_state = None
def __init__(self, targetfd, tmpfile=None):
self.targetfd = targetfd
try:
self.targetfd_save = os.dup(self.targetfd)
except OSError:
self.start = lambda: None
self.done = lambda: None
else:
self.start = self._start
self.done = self._done
if targetfd == 0:
assert not tmpfile, "cannot set tmpfile with stdin"
tmpfile = open(os.devnull, "r")
self.syscapture = SysCapture(targetfd)
else:
if tmpfile is None:
tmpfile = EncodedFile(
TemporaryFile(buffering=0),
encoding="utf-8",
errors="replace",
write_through=True,
)
if targetfd in patchsysdict:
self.syscapture = SysCapture(targetfd, tmpfile)
else:
self.syscapture = NoCapture()
self.tmpfile = tmpfile
self.tmpfile_fd = tmpfile.fileno()
def __repr__(self):
return "<{} {} oldfd={} _state={!r} tmpfile={}>".format(
self.__class__.__name__,
self.targetfd,
getattr(self, "targetfd_save", "<UNSET>"),
self._state,
hasattr(self, "tmpfile") and repr(self.tmpfile) or "<UNSET>",
)
def _start(self):
""" Start capturing on targetfd using memorized tmpfile. """
try:
os.fstat(self.targetfd_save)
except (AttributeError, OSError):
raise ValueError("saved filedescriptor not valid anymore")
os.dup2(self.tmpfile_fd, self.targetfd)
self.syscapture.start()
self._state = "started"
def snap(self):
self.tmpfile.seek(0)
res = self.tmpfile.buffer.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def _done(self):
""" stop capturing, restore streams, return original capture file,
seeked to position zero. """
targetfd_save = self.__dict__.pop("targetfd_save")
os.dup2(targetfd_save, self.targetfd)
os.close(targetfd_save)
self.syscapture.done()
self.tmpfile.close()
self._state = "done"
def suspend(self):
self.syscapture.suspend()
os.dup2(self.targetfd_save, self.targetfd)
self._state = "suspended"
def resume(self):
self.syscapture.resume()
os.dup2(self.tmpfile_fd, self.targetfd)
self._state = "resumed"
def writeorg(self, data):
""" write to original file descriptor. """
os.write(self.targetfd_save, data)
class FDCapture(FDCaptureBinary):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces text
"""
# Ignore type because it doesn't match the type in the superclass (bytes).
EMPTY_BUFFER = str() # type: ignore
def snap(self):
self.tmpfile.seek(0)
res = self.tmpfile.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def writeorg(self, data):
""" write to original file descriptor. """
data = data.encode("utf-8") # XXX use encoding of original stream
os.write(self.targetfd_save, data)
class SysCaptureBinary:
EMPTY_BUFFER = b""
_state = None
def __init__(self, fd, tmpfile=None):
name = patchsysdict[fd]
self._old = getattr(sys, name)
self.name = name
if tmpfile is None:
if name == "stdin":
tmpfile = DontReadFromInput()
else:
tmpfile = CaptureIO()
self.tmpfile = tmpfile
def __repr__(self):
return "<{} {} _old={} _state={!r} tmpfile={!r}>".format(
self.__class__.__name__,
self.name,
hasattr(self, "_old") and repr(self._old) or "<UNSET>",
self._state,
self.tmpfile,
)
def start(self):
setattr(sys, self.name, self.tmpfile)
self._state = "started"
def snap(self):
res = self.tmpfile.buffer.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
setattr(sys, self.name, self._old)
del self._old
self.tmpfile.close()
self._state = "done"
def suspend(self):
setattr(sys, self.name, self._old)
self._state = "suspended"
def resume(self):
setattr(sys, self.name, self.tmpfile)
self._state = "resumed"
def writeorg(self, data):
self._old.flush()
self._old.buffer.write(data)
self._old.buffer.flush()
class SysCapture(SysCaptureBinary):
EMPTY_BUFFER = str() # type: ignore[assignment] # noqa: F821
def snap(self):
res = self.tmpfile.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def writeorg(self, data):
self._old.write(data)
self._old.flush()
class TeeSysCapture(SysCapture):
def __init__(self, fd, tmpfile=None):
name = patchsysdict[fd]
self._old = getattr(sys, name)
self.name = name
if tmpfile is None:
if name == "stdin":
tmpfile = DontReadFromInput()
else:
tmpfile = CaptureAndPassthroughIO(self._old)
self.tmpfile = tmpfile
map_fixname_class = {
"capfd": FDCapture,
"capfdbinary": FDCaptureBinary,
"capsys": SysCapture,
"capsysbinary": SysCaptureBinary,
}
class DontReadFromInput:
encoding = None
def read(self, *args):
raise IOError(
"pytest: reading from stdin while output is captured! Consider using `-s`."
)
readline = read
readlines = read
__next__ = read
def __iter__(self):
return self
def fileno(self):
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
@property
def buffer(self):
return self
def _colorama_workaround():
"""
Ensure colorama is imported so that it attaches to the correct stdio
handles on Windows.
colorama uses the terminal on import time. So if something does the
first import of colorama while I/O capture is active, colorama will
fail in various ways.
"""
if sys.platform.startswith("win32"):
try:
import colorama # noqa: F401
except ImportError:
pass
def _readline_workaround():
"""
Ensure readline is imported so that it attaches to the correct stdio
handles on Windows.
Pdb uses readline support where available--when not running from the Python
prompt, the readline module is not imported until running the pdb REPL. If
running pytest with the --pdb option this means the readline module is not
imported until after I/O capture has been started.
This is a problem for pyreadline, which is often used to implement readline
support on Windows, as it does not attach to the correct handles for stdout
and/or stdin if they have been redirected by the FDCapture mechanism. This
workaround ensures that readline is imported before I/O capture is setup so
that it can attach to the actual stdin/out for the console.
See https://github.com/pytest-dev/pytest/pull/1281
"""
if sys.platform.startswith("win32"):
try:
import readline # noqa: F401
except ImportError:
pass
def _py36_windowsconsoleio_workaround(stream):
"""
Python 3.6 implemented unicode console handling for Windows. This works
by reading/writing to the raw console handle using
``{Read,Write}ConsoleW``.
The problem is that we are going to ``dup2`` over the stdio file
descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
handles used by Python to write to the console. Though there is still some
weirdness and the console handle seems to only be closed randomly and not
on the first call to ``CloseHandle``, or maybe it gets reopened with the
same handle value when we suspend capturing.
The workaround in this case will reopen stdio with a different fd which
also means a different handle by replicating the logic in
"Py_lifecycle.c:initstdio/create_stdio".
:param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
here as parameter for unittesting purposes.
See https://github.com/pytest-dev/py/issues/103
"""
if (
not sys.platform.startswith("win32")
or sys.version_info[:2] < (3, 6)
or hasattr(sys, "pypy_version_info")
):
return
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
if not hasattr(stream, "buffer"):
return
buffered = hasattr(stream.buffer, "raw")
raw_stdout = stream.buffer.raw if buffered else stream.buffer
if not isinstance(raw_stdout, io._WindowsConsoleIO):
return
def _reopen_stdio(f, mode):
if not buffered and mode[0] == "w":
buffering = 0
else:
buffering = -1
return io.TextIOWrapper(
open(os.dup(f.fileno()), mode, buffering),
f.encoding,
f.errors,
f.newlines,
f.line_buffering,
)
sys.stdin = _reopen_stdio(sys.stdin, "rb")
sys.stdout = _reopen_stdio(sys.stdout, "wb")
sys.stderr = _reopen_stdio(sys.stderr, "wb")
| 31.53317
| 117
| 0.624669
|
1a01f00893527289d306e954ceeccdd1de96e786
| 7,040
|
py
|
Python
|
kubernetes/client/models/extensions_v1beta1_pod_security_policy.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2019-10-07T13:54:36.000Z
|
2019-10-07T13:54:36.000Z
|
kubernetes/client/models/extensions_v1beta1_pod_security_policy.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 8
|
2020-12-21T03:18:50.000Z
|
2022-03-02T03:06:30.000Z
|
kubernetes/client/models/extensions_v1beta1_pod_security_policy.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2021-03-16T16:05:33.000Z
|
2021-03-16T16:05:33.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ExtensionsV1beta1PodSecurityPolicy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'ExtensionsV1beta1PodSecurityPolicySpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""ExtensionsV1beta1PodSecurityPolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ExtensionsV1beta1PodSecurityPolicy.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ExtensionsV1beta1PodSecurityPolicy.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:return: The metadata of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ExtensionsV1beta1PodSecurityPolicy.
:param metadata: The metadata of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:return: The spec of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:rtype: ExtensionsV1beta1PodSecurityPolicySpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this ExtensionsV1beta1PodSecurityPolicy.
:param spec: The spec of this ExtensionsV1beta1PodSecurityPolicy. # noqa: E501
:type: ExtensionsV1beta1PodSecurityPolicySpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1PodSecurityPolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExtensionsV1beta1PodSecurityPolicy):
return True
return self.to_dict() != other.to_dict()
| 34.679803
| 312
| 0.644176
|
a22aa61b544b3ee77aba3680dd8549372cc4ded3
| 96
|
py
|
Python
|
fpi/main.py
|
mentix02/3do
|
fc505a012e273af82f1503d62b2c293f6559f1a2
|
[
"MIT"
] | null | null | null |
fpi/main.py
|
mentix02/3do
|
fc505a012e273af82f1503d62b2c293f6559f1a2
|
[
"MIT"
] | null | null | null |
fpi/main.py
|
mentix02/3do
|
fc505a012e273af82f1503d62b2c293f6559f1a2
|
[
"MIT"
] | null | null | null |
import uvicorn
if __name__ == '__main__':
uvicorn.run('app:app', reload=True, debug=True)
| 16
| 51
| 0.6875
|
20601783ce40e0354b43bf9a8c49cc0b83bc7892
| 8,491
|
py
|
Python
|
test/package/test_save_load.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 5
|
2021-08-17T17:44:20.000Z
|
2021-08-21T05:03:42.000Z
|
test/package/test_save_load.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-06-25T22:00:31.000Z
|
2021-06-25T22:00:31.000Z
|
test/package/test_save_load.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-10-05T07:05:26.000Z
|
2021-10-05T07:05:26.000Z
|
import pickle
from io import BytesIO
from textwrap import dedent
from unittest import skipIf
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import run_tests, IS_FBCODE, IS_SANDCASTLE
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
from pathlib import Path
packaging_directory = Path(__file__).parent
class TestSaveLoad(PackageTestCase):
"""Core save_* and loading API tests."""
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_saving_source(self):
filename = self.temp()
with PackageExporter(filename) as he:
he.save_source_file("foo", str(packaging_directory / "module_a.py"))
he.save_source_file("foodir", str(packaging_directory / "package_a"))
hi = PackageImporter(filename)
foo = hi.import_module("foo")
s = hi.import_module("foodir.subpackage")
self.assertEqual(foo.result, "module_a")
self.assertEqual(s.result, "package_a.subpackage")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_saving_string(self):
filename = self.temp()
with PackageExporter(filename) as he:
src = dedent(
"""\
import math
the_math = math
"""
)
he.save_source_string("my_mod", src)
hi = PackageImporter(filename)
m = hi.import_module("math")
import math
self.assertIs(m, math)
my_mod = hi.import_module("my_mod")
self.assertIs(my_mod.math, math)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_save_module(self):
filename = self.temp()
with PackageExporter(filename) as he:
import module_a
import package_a
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
hi = PackageImporter(filename)
module_a_i = hi.import_module("module_a")
self.assertEqual(module_a_i.result, "module_a")
self.assertIsNot(module_a, module_a_i)
package_a_i = hi.import_module("package_a")
self.assertEqual(package_a_i.result, "package_a")
self.assertIsNot(package_a_i, package_a)
def test_dunder_imports(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
import package_b
obj = package_b.PackageBObject
he.intern("**")
he.save_pickle("res", "obj.pkl", obj)
buffer.seek(0)
hi = PackageImporter(buffer)
loaded_obj = hi.load_pickle("res", "obj.pkl")
package_b = hi.import_module("package_b")
self.assertEqual(package_b.result, "package_b")
math = hi.import_module("math")
self.assertEqual(math.__name__, "math")
xml_sub_sub_package = hi.import_module("xml.sax.xmlreader")
self.assertEqual(xml_sub_sub_package.__name__, "xml.sax.xmlreader")
subpackage_1 = hi.import_module("package_b.subpackage_1")
self.assertEqual(subpackage_1.result, "subpackage_1")
subpackage_2 = hi.import_module("package_b.subpackage_2")
self.assertEqual(subpackage_2.result, "subpackage_2")
subsubpackage_0 = hi.import_module("package_b.subpackage_0.subsubpackage_0")
self.assertEqual(subsubpackage_0.result, "subsubpackage_0")
def test_bad_dunder_imports(self):
"""Test to ensure bad __imports__ don't cause PackageExporter to fail."""
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_source_string(
"m", '__import__(these, unresolvable, "things", wont, crash, me)'
)
def test_save_module_binary(self):
f = BytesIO()
with PackageExporter(f) as he:
import module_a
import package_a
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
f.seek(0)
hi = PackageImporter(f)
module_a_i = hi.import_module("module_a")
self.assertEqual(module_a_i.result, "module_a")
self.assertIsNot(module_a, module_a_i)
package_a_i = hi.import_module("package_a")
self.assertEqual(package_a_i.result, "package_a")
self.assertIsNot(package_a_i, package_a)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_pickle(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
filename = self.temp()
with PackageExporter(filename) as he:
he.intern("**")
he.save_pickle("obj", "obj.pkl", obj2)
hi = PackageImporter(filename)
# check we got dependencies
sp = hi.import_module("package_a.subpackage")
# check we didn't get other stuff
with self.assertRaises(ImportError):
hi.import_module("module_a")
obj_loaded = hi.load_pickle("obj", "obj.pkl")
self.assertIsNot(obj2, obj_loaded)
self.assertIsInstance(obj_loaded.obj, sp.PackageASubpackageObject)
self.assertIsNot(
package_a.subpackage.PackageASubpackageObject, sp.PackageASubpackageObject
)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_save_imported_module_fails(self):
"""
Directly saving/requiring an PackageImported module should raise a specific error message.
"""
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
f1 = self.temp()
with PackageExporter(f1) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
importer1 = PackageImporter(f1)
loaded1 = importer1.load_pickle("obj", "obj.pkl")
f2 = self.temp()
pe = PackageExporter(f2, importer=(importer1, sys_importer))
with self.assertRaisesRegex(ModuleNotFoundError, "torch.package"):
pe.save_module(loaded1.__module__)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_exporting_mismatched_code(self):
"""
If an object with the same qualified name is loaded from different
packages, the user should get an error if they try to re-save the
object with the wrong package's source code.
"""
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
f1 = self.temp()
with PackageExporter(f1) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj2)
importer1 = PackageImporter(f1)
loaded1 = importer1.load_pickle("obj", "obj.pkl")
importer2 = PackageImporter(f1)
loaded2 = importer2.load_pickle("obj", "obj.pkl")
f2 = self.temp()
def make_exporter():
pe = PackageExporter(f2, importer=[importer1, sys_importer])
# Ensure that the importer finds the 'PackageAObject' defined in 'importer1' first.
return pe
# This should fail. The 'PackageAObject' type defined from 'importer1'
# is not necessarily the same 'obj2's version of 'PackageAObject'.
pe = make_exporter()
with self.assertRaises(pickle.PicklingError):
pe.save_pickle("obj", "obj.pkl", obj2)
# This should also fail. The 'PackageAObject' type defined from 'importer1'
# is not necessarily the same as the one defined from 'importer2'
pe = make_exporter()
with self.assertRaises(pickle.PicklingError):
pe.save_pickle("obj", "obj.pkl", loaded2)
# This should succeed. The 'PackageAObject' type defined from
# 'importer1' is a match for the one used by loaded1.
pe = make_exporter()
pe.save_pickle("obj", "obj.pkl", loaded1)
if __name__ == "__main__":
run_tests()
| 35.232365
| 98
| 0.639854
|
a8d84983e03933fce2874ff92f2e00e097811130
| 2,666
|
py
|
Python
|
tests/functional/fkey/primary/test_insert_pk_05.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/functional/fkey/primary/test_insert_pk_05.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/functional/fkey/primary/test_insert_pk_05.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: functional.fkey.primary.insert_pk_05
# title: Check correct work fix with foreign key
# decription: Check foreign key work.
# Master transaction modifies primary key and committed
# Detail transaction inserts record in detail_table.
# Expected: no errors
# tracker_id:
# min_versions: []
# versions: 2.1
# qmid: functional.fkey.primary.ins_05
import pytest
from firebird.qa import db_factory, python_act, Action
from firebird.driver import tpb, Isolation
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE MASTER_TABLE (
ID INTEGER PRIMARY KEY,
INT_F INTEGER
);
CREATE TABLE DETAIL_TABLE (
ID INTEGER PRIMARY KEY,
FKEY INTEGER
);
ALTER TABLE DETAIL_TABLE ADD CONSTRAINT FK_DETAIL_TABLE FOREIGN KEY (FKEY) REFERENCES MASTER_TABLE (ID);
COMMIT;
INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10);
commit;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# TPB_master = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
# TPB_detail = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
#
# db_conn.begin(tpb=TPB_master)
# c = db_conn.cursor()
# c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1")
# db_conn.commit()
#
# #Create second connection for change detail table
# con_detail = kdb.connect(
# dsn=dsn.encode(),
# user=user_name.encode(),
# password=user_password.encode()
# )
#
# try:
# con_detail.begin(tpb=TPB_detail)
# c = con_detail.cursor()
# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)")
# con_detail.commit()
# except Exception, e:
# print (e[0])
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.1')
def test_1(act_1: Action):
with act_1.db.connect() as con:
cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0)
con.begin(cust_tpb)
with con.cursor() as c:
c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1")
con.commit()
#Create second connection for change detail table
with act_1.db.connect() as con_detail:
con_detail.begin(cust_tpb)
with con_detail.cursor() as cd:
cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)")
con_detail.commit()
# Passed.
| 29.296703
| 104
| 0.645536
|
bf9951cd928035c785301e49ea0aa2baf342df5d
| 1,897
|
py
|
Python
|
models/proto.py
|
TalSchuster/FewRel
|
af68f52b13977ca29808c38a54995363f76cdcad
|
[
"MIT"
] | null | null | null |
models/proto.py
|
TalSchuster/FewRel
|
af68f52b13977ca29808c38a54995363f76cdcad
|
[
"MIT"
] | null | null | null |
models/proto.py
|
TalSchuster/FewRel
|
af68f52b13977ca29808c38a54995363f76cdcad
|
[
"MIT"
] | null | null | null |
import sys
import FewRel.fewshot_re_kit as fewshot_re_kit
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
class Proto(fewshot_re_kit.framework.FewShotREModel):
def __init__(self, sentence_encoder, dot=False):
fewshot_re_kit.framework.FewShotREModel.__init__(self, sentence_encoder)
# self.fc = nn.Linear(hidden_size, hidden_size)
self.drop = nn.Dropout()
self.dot = dot
def __dist__(self, x, y, dim):
if self.dot:
return (x * y).sum(dim)
else:
return -(torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q):
return self.__dist__(S.unsqueeze(1), Q.unsqueeze(2), 3)
def forward(self, support, query, N, K, total_Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support_emb = self.sentence_encoder(support) # (B * N * K, D), where D is the hidden size
query_emb = self.sentence_encoder(query) # (B * total_Q, D)
hidden_size = support_emb.size(-1)
support = self.drop(support_emb)
query = self.drop(query_emb)
support = support.view(-1, N, K, hidden_size) # (B, N, K, D)
query = query.view(-1, total_Q, hidden_size) # (B, total_Q, D)
# Prototypical Networks
# Ignore NA policy
support = torch.mean(support, 2) # Calculate prototype for each class
logits = self.__batch_dist__(support, query) # (B, total_Q, N)
minn, _ = logits.min(-1)
logits = torch.cat([logits, minn.unsqueeze(2) - 1], 2) # (B, total_Q, N + 1)
_, pred = torch.max(logits.view(-1, N + 1), 1)
return logits, pred
| 35.792453
| 97
| 0.615182
|
1e530612b4ac118fc6f484c3c14cf4848afccba9
| 852
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/addupgradebootentry/actor.py
|
fellipeh/leapp-repository
|
874e480fa84476fee37da4f184b47f2472748929
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/addupgradebootentry/actor.py
|
fellipeh/leapp-repository
|
874e480fa84476fee37da4f184b47f2472748929
|
[
"Apache-2.0"
] | 1
|
2020-04-03T07:41:43.000Z
|
2020-04-03T07:41:43.000Z
|
repos/system_upgrade/el7toel8/actors/addupgradebootentry/actor.py
|
pirat89/leapp-repository
|
aac51ab67ee22413a7ab1da6cec33e54b9357afd
|
[
"Apache-2.0"
] | null | null | null |
from leapp.actors import Actor
from leapp.libraries.actor.addupgradebootentry import add_boot_entry, fix_grub_config_error
from leapp.models import BootContent, GrubConfigError
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
class AddUpgradeBootEntry(Actor):
"""
Add new boot entry for Leapp provided initramfs.
Using new boot entry, Leapp can continue the upgrade process in the initramfs after reboot
"""
name = 'add_upgrade_boot_entry'
consumes = (BootContent, GrubConfigError)
produces = ()
tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
def process(self):
grub_config_error_detected = next(self.consume(GrubConfigError), GrubConfigError()).error_detected
if grub_config_error_detected:
fix_grub_config_error('/etc/default/grub')
add_boot_entry()
| 34.08
| 106
| 0.75939
|
bfc65b84b46b4048c09c35f9e00b226acc9854db
| 332
|
py
|
Python
|
users/urls.py
|
hyeseong-dev/wecode_2nd_project
|
a0bb90818837fcab4607823140251cee35a4d6e5
|
[
"Unlicense"
] | null | null | null |
users/urls.py
|
hyeseong-dev/wecode_2nd_project
|
a0bb90818837fcab4607823140251cee35a4d6e5
|
[
"Unlicense"
] | null | null | null |
users/urls.py
|
hyeseong-dev/wecode_2nd_project
|
a0bb90818837fcab4607823140251cee35a4d6e5
|
[
"Unlicense"
] | null | null | null |
from django.urls import path
from users.views import SendSmSView, VerificationView, MobileSignUp, MobileSignIn
urlpatterns = [
path('/sendsms', SendSmSView.as_view()),
path('/verification', VerificationView.as_view()),
path('/mobile_signup', MobileSignUp.as_view()),
path('/mobile_signin', MobileSignIn.as_view())
]
| 36.888889
| 81
| 0.740964
|
6851647060b612d04854c68a15434ccc60f8df50
| 670
|
py
|
Python
|
iiif_validator/tests/size_error_random.py
|
IIIF/image-api
|
890b120d19a7db1cde8a64da0ae6a986ae13e342
|
[
"Apache-2.0"
] | 20
|
2015-03-25T21:30:15.000Z
|
2016-06-17T07:45:07.000Z
|
iiif_validator/tests/size_error_random.py
|
IIIF/image-validator
|
890b120d19a7db1cde8a64da0ae6a986ae13e342
|
[
"Apache-2.0"
] | 50
|
2016-12-09T16:54:30.000Z
|
2022-03-11T23:16:57.000Z
|
iiif_validator/tests/size_error_random.py
|
IIIF/image-validator
|
890b120d19a7db1cde8a64da0ae6a986ae13e342
|
[
"Apache-2.0"
] | 13
|
2016-10-27T23:38:14.000Z
|
2021-03-15T04:38:26.000Z
|
from .test import BaseTest, ValidatorError
class Test_Size_Error_Random(BaseTest):
label = 'Random size gives 400'
level = 1
category = 3
versions = [u'1.0', u'1.1', u'2.0', u'3.0']
validationInfo = None
def run(self, result):
try:
url = result.make_url({'size': self.validationInfo.make_randomstring(6)})
error = result.fetch(url)
self.validationInfo.check('status', result.last_status, 400, result)
return result
except Exception as error:
raise ValidatorError('url-check', str(error), 400, result, 'Failed to get random size with url {}.'.format(url))
| 37.222222
| 124
| 0.614925
|
97a32bfb8ad4998ccebd4fa5eafb969727fba67d
| 2,980
|
py
|
Python
|
pytest_clearread.py
|
adrianer/pytest-clearread
|
9e0ffcf064bc752ced0056da9e5162590a4888ea
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_clearread.py
|
adrianer/pytest-clearread
|
9e0ffcf064bc752ced0056da9e5162590a4888ea
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_clearread.py
|
adrianer/pytest-clearread
|
9e0ffcf064bc752ced0056da9e5162590a4888ea
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from _pytest.terminal import TerminalReporter
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
'--clear', action="store_true", dest="clear", default=False,
help=(
"make pytest reporting output more readable"
)
)
@pytest.mark.trylast
def pytest_configure(config):
if hasattr(config, 'slaveinput'):
return # xdist slave, we are already active on the master
if config.option.clear:
# Get the standard terminal reporter plugin...
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
clear_reporter = ClearTerminalReporter(standard_reporter)
# ...and replace it with our own clearing reporter.
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(clear_reporter, 'terminalreporter')
def pytest_collection_modifyitems(config, items):
for item in items:
node = item.obj
parent = item.parent.obj
function_comment = node.__doc__ or ''
class_comment = parent.__doc__ or ''
item._nodeid = f"{item.nodeid};;;;;{class_comment};;;;;{function_comment}"
# @pytest.mark.tryfirst
# def pytest_runtest_teardown(item, nextitem):
# # This fixes py.test writing stuff after the progress indication
# print('\n')
class ClearTerminalReporter(TerminalReporter):
def __init__(self, reporter):
TerminalReporter.__init__(self, reporter.config)
self._tw = reporter._tw
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
real_node_id = nodeid.split(";;;;;")[0]
if len(nodeid.split(";;;;;")) > 1:
function_comment = nodeid.split(";;;;;")[-1]
else:
function_comment = None
if len(nodeid.split(";;;;;")) > 2:
class_comment = nodeid.split(";;;;;")[-2]
else:
class_comment = None
if self.showlongtestinfo:
line = self._locationline(real_node_id, *location)
self.write_sep("-", line, bold=True)
if class_comment:
self.write(class_comment)
self._tw.line()
self.write_sep("-", bold=True)
if function_comment:
self.write(function_comment)
self._tw.line()
self.write_sep("-", bold=True)
elif self.showfspath:
fsid = real_node_id.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
report.nodeid = report.nodeid.split(";;;;;")[0]
super().pytest_runtest_logreport(report=report)
self._tw.line()
def pytest_runtest_logfinish(self, nodeid):
nodeid = nodeid.split(";;;;;")[0]
super().pytest_runtest_logfinish(nodeid=nodeid)
| 35.47619
| 82
| 0.62651
|
84edca900cb44546b0f3fd3f7889f0760c6f4938
| 46
|
py
|
Python
|
strainer/widgets/tree/__init__.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 2
|
2020-04-10T22:20:14.000Z
|
2020-05-14T21:35:12.000Z
|
strainer/widgets/tree/__init__.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 31
|
2020-05-21T14:03:53.000Z
|
2022-03-11T12:04:50.000Z
|
strainer/widgets/tree/__init__.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 1
|
2022-03-09T18:19:55.000Z
|
2022-03-09T18:19:55.000Z
|
from .widget import Tree
__all__ = ('Tree',)
| 11.5
| 24
| 0.673913
|
6ad30d3c324870a0b88a37e68ea8317d3efd38a7
| 1,019
|
py
|
Python
|
apistar/server/handlers.py
|
TheCulliganMan/apistarserver
|
faaf9f6fddc97e9bd97a2ea785d041bbaac91ad5
|
[
"BSD-3-Clause"
] | 1
|
2018-09-27T14:50:47.000Z
|
2018-09-27T14:50:47.000Z
|
apistar/server/handlers.py
|
TheCulliganMan/apistar
|
faaf9f6fddc97e9bd97a2ea785d041bbaac91ad5
|
[
"BSD-3-Clause"
] | null | null | null |
apistar/server/handlers.py
|
TheCulliganMan/apistar
|
faaf9f6fddc97e9bd97a2ea785d041bbaac91ad5
|
[
"BSD-3-Clause"
] | null | null | null |
from apistar import App, http
from apistar.codecs import OpenAPICodec
from apistar.server.asgi import ASGIReceive, ASGIScope, ASGISend
from apistar.server.wsgi import WSGIEnviron, WSGIStartResponse
def serve_schema(app: App):
codec = OpenAPICodec()
content = codec.encode(app.document)
headers = {"Content-Type": "application/vnd.oai.openapi"}
return http.Response(content, headers=headers)
def serve_documentation(app: App):
template_name = "apistar/docs/index.html"
code_style = None # pygments_css('emacs')
return app.render_template(
template_name,
document=app.document,
langs=["javascript", "python"],
code_style=code_style,
)
def serve_static_wsgi(
app: App, environ: WSGIEnviron, start_response: WSGIStartResponse
):
return app.statics(environ, start_response)
async def serve_static_asgi(
app: App, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
):
instance = app.statics(scope)
await instance(receive, send)
| 28.305556
| 69
| 0.726202
|
7d1aab75f767995dd91f0f988c0c6806074e42cd
| 1,930
|
py
|
Python
|
var/spack/repos/builtin/packages/py-radical-pilot/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-radical-pilot/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-radical-pilot/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyRadicalPilot(PythonPackage):
"""RADICAL-Pilot is a Pilot system specialized in executing applications
composed of many computational tasks on high performance computing (HPC)
platforms."""
homepage = 'https://radical-cybertools.github.io'
git = 'https://github.com/radical-cybertools/radical.pilot.git'
pypi = 'radical.pilot/radical.pilot-1.11.2.tar.gz'
maintainers = ['andre-merzky']
version('develop', branch='devel')
version('1.11.2', sha256='9d239f747589b8ae5d6faaea90ea5304b6f230a1edfd8d4efb440bc3799c8a9d')
version('1.10.2', sha256='56e9d8b1ce7ed05eff471d7df660e4940f485027e5f353aa36fd17425846a499')
version('1.10.1', sha256='003f4c519b991bded31693026b69dd51547a5a69a5f94355dc8beff766524b3c')
version('1.9.2', sha256='7c872ac9103a2aed0c5cd46057048a182f672191e194e0fd42794b0012e6e947')
version('1.8.0', sha256='a4c3bca163db61206e15a2d820d9a64e888da5c72672448ae975c26768130b9d')
version('1.6.8', sha256='fa8fd3f348a68b54ee8338d5c5cf1a3d99c10c0b6da804424a839239ee0d313d')
version('1.6.7', sha256='6ca0a3bd3cda65034fa756f37fa05681d5a43441c1605408a58364f89c627970')
depends_on('py-radical-utils', type=('build', 'run'))
depends_on('py-radical-saga', type=('build', 'run'))
depends_on('py-radical-utils@1.8.4:', type=('build', 'run'), when='@1.11:')
depends_on('py-radical-saga@1.8.0:', type=('build', 'run'), when='@1.11:')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-pymongo@:3', type=('build', 'run'))
depends_on('py-setproctitle', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| 49.487179
| 97
| 0.706218
|
94927eb998e43a5f1ad4fb129a4bdb3f44a377ca
| 660
|
py
|
Python
|
fabfile.py
|
gsvaldes/toronjil
|
b5e7f063f377449dc375165367aed3025b6bb21f
|
[
"MIT"
] | null | null | null |
fabfile.py
|
gsvaldes/toronjil
|
b5e7f063f377449dc375165367aed3025b6bb21f
|
[
"MIT"
] | null | null | null |
fabfile.py
|
gsvaldes/toronjil
|
b5e7f063f377449dc375165367aed3025b6bb21f
|
[
"MIT"
] | null | null | null |
from __future__ import with_statement
from fabric.api import local, settings, abort, run, cd, put
from fabric.contrib.console import confirm
code_dir = '/home/django/toronjil'
def update_remote():
with cd(code_dir):
run('git pull origin master')
def create_remote_test_file():
with cd(code_dir):
run('touch hello.txt')
def move_local_settings():
with cd(code_dir):
put('toronjil/local_settings.py', 'toronjil')
def move_nginx_config():
with cd('/etc/nginx/'):
put('nginx', 'sites-available/toronjil')
def move_upstart_config():
with cd('/etc/'):
put('upstart', 'init/toronjil.conf')
| 22.758621
| 59
| 0.669697
|
404ce2416f3de51c2fb347fdc5aee2b80d8414aa
| 2,275
|
py
|
Python
|
airflow/providers/amazon/aws/operators/step_function_get_execution_output.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2019-05-07T06:46:55.000Z
|
2019-05-07T06:46:55.000Z
|
airflow/providers/amazon/aws/operators/step_function_get_execution_output.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 7
|
2021-06-28T20:24:56.000Z
|
2022-02-26T02:01:36.000Z
|
airflow/providers/amazon/aws/operators/step_function_get_execution_output.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2019-06-15T08:38:53.000Z
|
2019-06-15T08:38:53.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
from airflow.utils.decorators import apply_defaults
class StepFunctionGetExecutionOutputOperator(BaseOperator):
"""
An Operator that begins execution of an Step Function State Machine
Additional arguments may be specified and are passed down to the underlying BaseOperator.
.. seealso::
:class:`~airflow.models.BaseOperator`
:param execution_arn: ARN of the Step Function State Machine Execution
:type execution_arn: str
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
"""
template_fields = ['execution_arn']
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(self, *, execution_arn: str, aws_conn_id='aws_default', region_name=None, **kwargs):
super().__init__(**kwargs)
self.execution_arn = execution_arn
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def execute(self, context):
hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
execution_status = hook.describe_execution(self.execution_arn)
execution_output = json.loads(execution_status['output']) if 'output' in execution_status else None
self.log.info('Got State Machine Execution output for %s', self.execution_arn)
return execution_output
| 38.559322
| 107
| 0.747253
|
588f08356337c10eebaf999675d45872c6176455
| 1,878
|
py
|
Python
|
tests/test_matchers.py
|
duailibe/asymmetric-matchers
|
854040e2d062360e432ec6fa6ee6bf9b9b4c897c
|
[
"Apache-2.0"
] | 2
|
2021-12-09T23:51:04.000Z
|
2021-12-10T14:27:10.000Z
|
tests/test_matchers.py
|
duailibe/asymmetric-matchers
|
854040e2d062360e432ec6fa6ee6bf9b9b4c897c
|
[
"Apache-2.0"
] | 2
|
2021-12-10T00:33:28.000Z
|
2022-01-23T16:22:18.000Z
|
tests/test_matchers.py
|
duailibe/asymmetric-matchers
|
854040e2d062360e432ec6fa6ee6bf9b9b4c897c
|
[
"Apache-2.0"
] | null | null | null |
import collections
import re
from asymmetric_matchers import (
anything,
any as any_,
string_matching,
list_containing,
dict_containing,
)
def test_anything():
assert 0 == anything()
assert "" == anything()
assert [] == anything()
assert {} == anything()
assert None != anything() # noqa: E711
def test_any():
assert 0 == any_(int)
assert "" == any_(str)
def test_string_matching():
assert "foobarbaz" == string_matching("bar")
assert "string" != string_matching("bar")
assert 1 != string_matching("1")
assert string_matching("bar") == "foobarbaz"
assert "foobar" == string_matching(r"ba[rz]")
assert "foobar" == string_matching(re.compile(r"ba[rz]"))
assert repr(string_matching("foo")) == "'foo'"
assert repr(string_matching(re.compile(r"foo"))) == "'foo'"
def test_dict_containing():
assert {"foo": "bar"} == dict_containing({})
assert {"foo": "bar"} == dict_containing({"foo": anything()})
assert {"notfoo": "bar"} != dict_containing({"foo": anything()})
assert dict_containing({"foo": "bar"}) == dict_containing({"foo": "bar"})
assert repr(dict_containing({"foo": "bar"})) == "dict_containing({'foo': 'bar'})"
assert dict_containing({"foo": "bar"}) != {"foo"}
assert collections.Counter(foo=4) == dict_containing({"foo": anything()})
def test_list_containing():
assert ["foo", "bar"] == list_containing([])
assert ["foo", "bar"] == list_containing(["foo"])
assert ["notfoo", "bar"] != list_containing(["foo"])
assert list_containing(["foo", "bar"]) == list_containing(["bar", "foo"])
assert repr(list_containing(["foo", "bar"])) == "list_containing(['foo', 'bar'])"
assert {"foo"} != list_containing(["foo"])
assert ["foo", [], "bar"] == list_containing([[]])
assert ["foo", [], "bar"] == list_containing(["foo"])
| 29.809524
| 85
| 0.612354
|
6a3c7e80026cadc74be9632abc965b29eb1a8056
| 466
|
py
|
Python
|
2015/day_17/containers_part2.py
|
ceronman/AdventOfCode2015
|
87b6d93df960045b5eff1ded107ac4e2719ee6e6
|
[
"MIT"
] | 4
|
2019-12-03T02:03:23.000Z
|
2019-12-20T11:36:00.000Z
|
2015/day_17/containers_part2.py
|
ceronman/AdventOfCode2015
|
87b6d93df960045b5eff1ded107ac4e2719ee6e6
|
[
"MIT"
] | null | null | null |
2015/day_17/containers_part2.py
|
ceronman/AdventOfCode2015
|
87b6d93df960045b5eff1ded107ac4e2719ee6e6
|
[
"MIT"
] | null | null | null |
def combinations(target, sizes, used):
accumulated = sum(used)
if accumulated < target:
for i, s in enumerate(sizes):
yield from combinations(target, sizes[i+1:], used + [s])
elif accumulated == target:
yield used
sizes = [int(x) for x in open('input.txt')]
all_combinations = list(combinations(150, sizes, []))
min_size = min(len(c) for c in all_combinations)
print(len([c for c in all_combinations if len(c) == min_size]))
| 35.846154
| 68
| 0.656652
|
817ef80a97eb42f2eb9b9bab171575d092b4c49b
| 7,241
|
py
|
Python
|
pubsub/google/cloud/pubsub_v1/subscriber/client.py
|
deryrahman/google-cloud-python
|
b55058c4b2328fde32f29bfd8ea04708fcc578e0
|
[
"Apache-2.0"
] | null | null | null |
pubsub/google/cloud/pubsub_v1/subscriber/client.py
|
deryrahman/google-cloud-python
|
b55058c4b2328fde32f29bfd8ea04708fcc578e0
|
[
"Apache-2.0"
] | null | null | null |
pubsub/google/cloud/pubsub_v1/subscriber/client.py
|
deryrahman/google-cloud-python
|
b55058c4b2328fde32f29bfd8ea04708fcc578e0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pkg_resources
import os
import grpc
from google.api_core import grpc_helpers
from google.cloud.pubsub_v1 import _gapic
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.gapic import subscriber_client
from google.cloud.pubsub_v1.subscriber import futures
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
__version__ = pkg_resources.get_distribution('google-cloud-pubsub').version
@_gapic.add_methods(subscriber_client.SubscriberClient,
blacklist=('streaming_pull',))
class Client(object):
"""A subscriber client for Google Cloud Pub/Sub.
This creates an object that is capable of subscribing to messages.
Generally, you can instantiate this client with no arguments, and you
get sensible defaults.
Args:
kwargs (dict): Any additional arguments provided are sent as keyword
keyword arguments to the underlying
:class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`.
Generally, you should not need to set additional keyword
arguments.
"""
def __init__(self, **kwargs):
# Sanity check: Is our goal to use the emulator?
# If so, create a grpc insecure channel with the emulator host
# as the target.
if os.environ.get('PUBSUB_EMULATOR_HOST'):
kwargs['channel'] = grpc.insecure_channel(
target=os.environ.get('PUBSUB_EMULATOR_HOST'),
)
# Use a custom channel.
# We need this in order to set appropriate default message size and
# keepalive options.
if 'channel' not in kwargs:
kwargs['channel'] = grpc_helpers.create_channel(
credentials=kwargs.pop('credentials', None),
target=self.target,
scopes=subscriber_client.SubscriberClient._DEFAULT_SCOPES,
options={
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
'grpc.keepalive_time_ms': 30000,
}.items(),
)
# Add the metrics headers, and instantiate the underlying GAPIC
# client.
self._api = subscriber_client.SubscriberClient(**kwargs)
@property
def target(self):
"""Return the target (where the API is).
Returns:
str: The location of the API.
"""
return subscriber_client.SubscriberClient.SERVICE_ADDRESS
@property
def api(self):
"""The underlying gapic API client."""
return self._api
def subscribe(
self, subscription, callback, flow_control=(),
scheduler=None):
"""Asynchronously start receiving messages on a given subscription.
This method starts a background thread to begin pulling messages from
a Pub/Sub subscription and scheduling them to be processed using the
provided ``callback``.
The ``callback`` will be called with an individual
:class:`google.cloud.pubsub_v1.subscriber.message.Message`. It is the
responsibility of the callback to either call ``ack()`` or ``nack()``
on the message when it finished processing. If an exception occurs in
the callback during processing, the exception is logged and the message
is ``nack()`` ed.
The ``flow_control`` argument can be used to control the rate of at
which messages are pulled. The settings are relatively conservative by
default to prevent "message hoarding" - a situation where the client
pulls a large number of messages but can not process them fast enough
leading it to "starve" other clients of messages. Increasing these
settings may lead to faster throughput for messages that do not take
a long time to process.
This method starts the receiver in the background and returns a
*Future* representing its execution. Waiting on the future (calling
``result()``) will block forever or until a non-recoverable error
is encountered (such as loss of network connectivity). Cancelling the
future will signal the process to shutdown gracefully and exit.
.. note:: This uses Pub/Sub's *streaming pull* feature. This feature
properties that may be surprising. Please take a look at
https://cloud.google.com/pubsub/docs/pull#streamingpull for
more details on how streaming pull behaves compared to the
synchronous pull method.
Example:
.. code-block:: python
from google.cloud import pubsub_v1
subscriber_client = pubsub_v1.SubscriberClient()
# existing subscription
subscription = subscriber_client.subscription_path(
'my-project-id', 'my-subscription')
def callback(message):
print(message)
message.ack()
future = subscriber.subscribe(
subscription, callback)
try:
future.result()
except KeyboardInterrupt:
future.cancel()
Args:
subscription (str): The name of the subscription. The
subscription should have already been created (for example,
by using :meth:`create_subscription`).
callback (Callable[~.pubsub_v1.subscriber.message.Message]):
The callback function. This function receives the message as
its only argument and will be called from a different thread/
process depending on the scheduling strategy.
flow_control (~.pubsub_v1.types.FlowControl): The flow control
settings. Use this to prevent situations where you are
inundated with too many messages at once.
scheduler (~.pubsub_v1.subscriber.scheduler.Scheduler): An optional
*scheduler* to use when executing the callback. This controls
how callbacks are executed concurrently.
Returns:
google.cloud.pubsub_v1.subscriber.futures.StreamingPullFuture: A
Future object that can be used to manage the background stream.
"""
flow_control = types.FlowControl(*flow_control)
manager = streaming_pull_manager.StreamingPullManager(
self, subscription, flow_control=flow_control, scheduler=scheduler)
future = futures.StreamingPullFuture(manager)
manager.open(callback)
return future
| 40.005525
| 79
| 0.660268
|
3abb2a229447e51ddfd43d2972e420e0676fd476
| 1,044
|
py
|
Python
|
best_api.py
|
adrianobrum/takecareofmyplant_bot
|
30828df1c769c840bcba9d272020995eebe4b1ec
|
[
"MIT"
] | null | null | null |
best_api.py
|
adrianobrum/takecareofmyplant_bot
|
30828df1c769c840bcba9d272020995eebe4b1ec
|
[
"MIT"
] | null | null | null |
best_api.py
|
adrianobrum/takecareofmyplant_bot
|
30828df1c769c840bcba9d272020995eebe4b1ec
|
[
"MIT"
] | null | null | null |
import urllib2
import time
import logfile
import os
import sys
from config import *
import xml.etree.ElementTree as ET
def api():
logfile.logger('debug', 'Geting the data from API...')
url = "http://www.stands4.com/services/v2/quotes.php?uid={0}&tokenid={1}&searchtype=RANDOM"
url = url.format(MYUID, MYTOKEN)
s = urllib2.urlopen(url)
logfile.logger('debug', 'URL content downloaded!')
contents = s.read()
file = open("quotes.xml", "w")
file.write(contents)
logfile.logger('debug', 'Quotes.xml updated')
file.close()
tree = ET.parse('quotes.xml')
logfile.logger('debug', 'XML Parsed')
root = tree.getroot()
control = root[0].text
time.sleep(2)
if (control == 'Daily Usage Exceeded'):
print ("**ERROR** The best API it's not availabe!")
logfile.logger('error', "The best API it's not available!")
return ('false')
else:
quote = root[0][0].text
return quote
logfile.logger('debug', 'Quote script closed...')
| 26.1
| 95
| 0.62069
|
b7f65b4510bbd3e900ebf04d6bee573c482db0db
| 224
|
py
|
Python
|
dns_check/datadog_checks/dns_check/__init__.py
|
seants/integrations-core
|
1e5548915fc24f1bbd095e845f0940c22992b09c
|
[
"BSD-3-Clause"
] | 4
|
2021-06-21T19:21:49.000Z
|
2021-06-23T21:21:55.000Z
|
dns_check/datadog_checks/dns_check/__init__.py
|
seants/integrations-core
|
1e5548915fc24f1bbd095e845f0940c22992b09c
|
[
"BSD-3-Clause"
] | 1
|
2018-08-15T05:50:17.000Z
|
2018-08-15T05:50:17.000Z
|
dns_check/datadog_checks/dns_check/__init__.py
|
seants/integrations-core
|
1e5548915fc24f1bbd095e845f0940c22992b09c
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T19:21:51.000Z
|
2021-06-21T19:21:51.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .dns_check import DNSCheck
__all__ = [
'__version__',
'DNSCheck'
]
| 18.666667
| 59
| 0.71875
|
7effeb3ba00ecad5201f2e19b714046b1533d33e
| 40,215
|
py
|
Python
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from io import BytesIO
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib2 import quote, unquote # type: ignore
import six
from ._quick_query_helper import DataLakeFileQueryReader
from ._shared.base_client import parse_connection_str
from ._shared.request_handlers import get_length, read_length
from ._shared.response_handlers import return_response_headers
from ._shared.uploads import IterStreamer
from ._upload_helper import upload_datalake_file
from ._generated.models import StorageErrorException
from ._download import StorageStreamDownloader
from ._path_client import PathClient
from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \
convert_datetime_to_rfc1123
from ._deserialize import process_storage_error, deserialize_file_properties
from ._models import FileProperties, DataLakeFileQueryError
class DataLakeFileClient(PathClient):
"""A client to interact with the DataLake file, even if the file may not yet exist.
:ivar str url:
The full endpoint URL to the file system, including SAS token if used.
:ivar str primary_endpoint:
The full primary endpoint URL.
:ivar str primary_hostname:
The hostname of the primary endpoint.
:param str account_url:
The URI to the storage account.
:param file_system_name:
The file system for the directory or files.
:type file_system_name: str
:param file_path:
The whole file path, so that to interact with a specific file.
eg. "{directory}/{subdirectory}/{file}"
:type file_path: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string, and account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the URL already has a SAS token, specifying an explicit credential will take priority.
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_instantiate_client.py
:start-after: [START instantiate_file_client_from_conn_str]
:end-before: [END instantiate_file_client_from_conn_str]
:language: python
:dedent: 4
:caption: Creating the DataLakeServiceClient from connection string.
"""
def __init__(
self, account_url, # type: str
file_system_name, # type: str
file_path, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
credential=credential, **kwargs)
@classmethod
def from_connection_string(
cls, conn_str, # type: str
file_system_name, # type: str
file_path, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
): # type: (...) -> DataLakeFileClient
"""
Create DataLakeFileClient from a Connection String.
:param str conn_str:
A connection string to an Azure Storage account.
:param file_system_name: The name of file system to interact with.
:type file_system_name: str
:param directory_name: The name of directory to interact with. The directory is under file system.
:type directory_name: str
:param file_name: The name of file to interact with. The file is under directory.
:type file_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token, or the connection string already has shared
access key values. The value can be a SAS token string, and account shared access
key, or an instance of a TokenCredentials class from azure.identity.
Credentials provided here will take precedence over those in the connection string.
:return a DataLakeFileClient
:rtype ~azure.storage.filedatalake.DataLakeFileClient
"""
account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
return cls(
account_url, file_system_name=file_system_name, file_path=file_path,
credential=credential, **kwargs)
def create_file(self, content_settings=None, # type: Optional[ContentSettings]
metadata=None, # type: Optional[Dict[str, str]]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime]]
"""
Create a new file.
:param ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword str umask:
Optional and only valid if Hierarchical Namespace is enabled for the account.
When creating a file or directory and the parent folder does not have a default ACL,
the umask restricts the permissions of the file or directory to be created.
The resulting permission is given by p & ^u, where p is the permission and u is the umask.
For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
The umask must be specified in 4-digit octal notation (e.g. 0766).
:keyword str permissions:
Optional and only valid if Hierarchical Namespace
is enabled for the account. Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: response dict (Etag and last modified).
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START create_file]
:end-before: [END create_file]
:language: python
:dedent: 4
:caption: Create file.
"""
return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
def delete_file(self, **kwargs):
# type: (...) -> None
"""
Marks the specified file for deletion.
:keyword lease:
Required if the file has an active lease. Value can be a LeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: None
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START delete_file]
:end-before: [END delete_file]
:language: python
:dedent: 4
:caption: Delete file.
"""
return self._delete(**kwargs)
def get_file_properties(self, **kwargs):
# type: (**Any) -> FileProperties
"""Returns all user-defined metadata, standard HTTP properties, and
system properties for the file. It does not return the content of the file.
:keyword lease:
Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: FileProperties
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START get_file_properties]
:end-before: [END get_file_properties]
:language: python
:dedent: 4
:caption: Getting the properties for a file.
"""
return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access
def set_file_expiry(self, expiry_options, # type: str
expires_on=None, # type: Optional[Union[datetime, int]]
**kwargs):
# type: (str, Optional[Union[datetime, int]], **Any) -> None
"""Sets the time a file will expire and be deleted.
:param str expiry_options:
Required. Indicates mode of the expiry time.
Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
:param datetime or int expires_on:
The time to set the file to expiry.
When expiry_options is RelativeTo*, expires_on should be an int in milliseconds.
If the type of expires_on is datetime, it should be in UTC time.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
"""
try:
expires_on = convert_datetime_to_rfc1123(expires_on)
except AttributeError:
expires_on = str(expires_on)
self._datalake_client_for_blob_operation.path \
.set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access
def _upload_options( # pylint:disable=too-many-statements
self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Dict[str, Any]
encoding = kwargs.pop('encoding', 'UTF-8')
if isinstance(data, six.text_type):
data = data.encode(encoding) # type: ignore
if length is None:
length = get_length(data)
if isinstance(data, bytes):
data = data[:length]
if isinstance(data, bytes):
stream = BytesIO(data)
elif hasattr(data, 'read'):
stream = data
elif hasattr(data, '__iter__'):
stream = IterStreamer(data, encoding=encoding)
else:
raise TypeError("Unsupported data type: {}".format(type(data)))
validate_content = kwargs.pop('validate_content', False)
content_settings = kwargs.pop('content_settings', None)
metadata = kwargs.pop('metadata', None)
max_concurrency = kwargs.pop('max_concurrency', 1)
kwargs['properties'] = add_metadata_headers(metadata)
kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
if content_settings:
kwargs['path_http_headers'] = get_path_http_headers(content_settings)
kwargs['stream'] = stream
kwargs['length'] = length
kwargs['validate_content'] = validate_content
kwargs['max_concurrency'] = max_concurrency
kwargs['client'] = self._client.path
return kwargs
def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
overwrite=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Any]
"""
Upload data to a file.
:param data: Content to be uploaded to file
:param int length: Size of the data in bytes.
:param bool overwrite: to overwrite an existing file or not.
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword metadata:
Name-value pairs associated with the blob as metadata.
:paramtype metadata: dict(str, str)
:keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
When creating a file or directory and the parent folder does not have a default ACL,
the umask restricts the permissions of the file or directory to be created.
The resulting permission is given by p & ^u, where p is the permission and u is the umask.
For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
The umask must be specified in 4-digit octal notation (e.g. 0766).
:keyword str permissions: Optional and only valid if Hierarchical Namespace
is enabled for the account. Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword int chunk_size:
The maximum chunk size for uploading a file in chunks.
Defaults to 100*1024*1024, or 100MB.
:return: response dict (Etag and last modified).
"""
options = self._upload_options(
data,
length=length,
overwrite=overwrite,
**kwargs)
return upload_datalake_file(**options)
@staticmethod
def _append_data_options(data, offset, length=None, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
if isinstance(data, six.text_type):
data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
if length is None:
length = get_length(data)
if length is None:
length, data = read_length(data)
if isinstance(data, bytes):
data = data[:length]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
options = {
'body': data,
'position': offset,
'content_length': length,
'lease_access_conditions': access_conditions,
'validate_content': kwargs.pop('validate_content', False),
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
offset, # type: int
length=None, # type: Optional[int]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""Append data to the file.
:param data: Content to be appended to file
:param offset: start position of the data to be appended to.
:param length: Size of the data in bytes.
:keyword bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:return: dict of the response header
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START append_data]
:end-before: [END append_data]
:language: python
:dedent: 4
:caption: Append data to the file.
"""
options = self._append_data_options(
data,
offset,
length=length,
**kwargs)
try:
return self._client.path.append_data(**options)
except StorageErrorException as error:
process_storage_error(error)
@staticmethod
def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_mod_conditions(kwargs)
path_http_headers = None
if content_settings:
path_http_headers = get_path_http_headers(content_settings)
options = {
'position': offset,
'content_length': 0,
'path_http_headers': path_http_headers,
'retain_uncommitted_data': retain_uncommitted_data,
'close': kwargs.pop('close', False),
'lease_access_conditions': access_conditions,
'modified_access_conditions': mod_conditions,
'timeout': kwargs.pop('timeout', None),
'cls': return_response_headers}
options.update(kwargs)
return options
def flush_data(self, offset, # type: int
retain_uncommitted_data=False, # type: Optional[bool]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime]]
""" Commit the previous appended data.
:param offset: offset is equal to the length of the file after commit the
previous appended data.
:param bool retain_uncommitted_data: Valid only for flush operations. If
"true", uncommitted data is retained after the flush operation
completes; otherwise, the uncommitted data is deleted after the flush
operation. The default is false. Data at offsets less than the
specified position are written to the file when flush succeeds, but
this optional parameter allows data after the flush position to be
retained for a future flush operation.
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword bool close: Azure Storage Events allow applications to receive
notifications when files change. When Azure Storage Events are
enabled, a file changed event is raised. This event has a property
indicating whether this is the final change to distinguish the
difference between an intermediate flush to a file stream and the
final close of a file stream. The close query parameter is valid only
when the action is "flush" and change notifications are enabled. If
the value of close is "true" and the flush operation completes
successfully, the service raises a file change notification with a
property indicating that this is the final update (the file stream has
been closed). If "false" a change notification is raised indicating
the file has changed. The default is false. This query parameter is
set to true by the Hadoop ABFS driver to indicate that the file stream
has been closed."
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:return: response header in dict
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_file_system.py
:start-after: [START upload_file_to_file_system]
:end-before: [END upload_file_to_file_system]
:language: python
:dedent: 8
:caption: Commit the previous appended data.
"""
options = self._flush_data_options(
offset,
retain_uncommitted_data=retain_uncommitted_data, **kwargs)
try:
return self._client.path.flush_data(**options)
except StorageErrorException as error:
process_storage_error(error)
def download_file(self, offset=None, length=None, **kwargs):
# type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
"""Downloads a file to the StorageStreamDownloader. The readall() method must
be used to read all the content, or readinto() must be used to download the file into
a stream.
:param int offset:
Start of byte range to use for downloading a section of the file.
Must be set if length is provided.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:keyword lease:
If specified, download only succeeds if the file's lease is active
and matches this ID. Required if the file has an active lease.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:returns: A streaming object (StorageStreamDownloader)
:rtype: ~azure.storage.filedatalake.StorageStreamDownloader
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START read_file]
:end-before: [END read_file]
:language: python
:dedent: 4
:caption: Return the downloaded data.
"""
downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
return StorageStreamDownloader(downloader)
def rename_file(self, new_name, # type: str
**kwargs):
# type: (**Any) -> DataLakeFileClient
"""
Rename the source file.
:param str new_name: the new file name the user want to rename to.
The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
:keyword ~azure.storage.filedatalake.ContentSettings content_settings:
ContentSettings object used to set path properties.
:keyword source_lease: A lease ID for the source path. If specified,
the source path must have an active lease and the leaase ID must
match.
:paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword lease:
Required if the file/directory has an active lease. Value can be a LeaseClient object
or the lease ID as a string.
:type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: the renamed file client
:rtype: DataLakeFileClient
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_upload_download.py
:start-after: [START rename_file]
:end-before: [END rename_file]
:language: python
:dedent: 4
:caption: Rename the source file.
"""
new_name = new_name.strip('/')
new_file_system = new_name.split('/')[0]
new_path_and_token = new_name[len(new_file_system):].strip('/').split('?')
new_path = new_path_and_token[0]
try:
new_file_sas = new_path_and_token[1] or self._query_str.strip('?')
except IndexError:
if not self._raw_credential and new_file_system != self.file_system_name:
raise ValueError("please provide the sas token for the new file")
if not self._raw_credential and new_file_system == self.file_system_name:
new_file_sas = self._query_str.strip('?')
new_file_client = DataLakeFileClient(
"{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path,
credential=self._raw_credential or new_file_sas,
_hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
_location_mode=self._location_mode, require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function
)
new_file_client._rename_path( # pylint: disable=protected-access
'/{}/{}{}'.format(quote(unquote(self.file_system_name)),
quote(unquote(self.path_name)),
self._query_str),
**kwargs)
return new_file_client
def query_file(self, query_expression, **kwargs):
# type: (str, **Any) -> DataLakeFileQueryReader
"""
Enables users to select/project on datalake file data by providing simple query expressions.
This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data.
:param str query_expression:
Required. a query statement.
eg. Select * from DataLakeStorage
:keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error:
A function to be called on any processing errors returned by the service.
:keyword file_format:
Optional. Defines the serialization of the data currently stored in the file. The default is to
treat the file data as CSV data formatted in the default dialect. This can be overridden with
a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect.
:paramtype file_format:
~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect
:keyword output_format:
Optional. Defines the output serialization for the data stream. By default the data will be returned
as it is represented in the file. By providing an output format, the file data will be reformatted
according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect.
:paramtype output_format:
~azure.storage.filedatalake.DelimitedTextDialect, ~azure.storage.filedatalake.DelimitedJsonDialect
or list[~azure.storage.filedatalake.ArrowDialect]
:keyword lease:
Required if the file has an active lease. Value can be a DataLakeLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A streaming object (DataLakeFileQueryReader)
:rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader
.. admonition:: Example:
.. literalinclude:: ../samples/datalake_samples_query.py
:start-after: [START query]
:end-before: [END query]
:language: python
:dedent: 4
:caption: select/project on datalake file data by providing simple query expressions.
"""
query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage")
blob_quick_query_reader = self._blob_client.query_blob(query_expression,
blob_format=kwargs.pop('file_format', None),
error_cls=DataLakeFileQueryError,
**kwargs)
return DataLakeFileQueryReader(blob_quick_query_reader)
| 53.335544
| 119
| 0.647793
|
946459d6576c44f5f5d7703eb602d68af146a0f2
| 2,861
|
py
|
Python
|
postprocess/postprocess_text.py
|
alberto-poncelas/tesseract_postprocess
|
5b44e360a3575c640d68d38d8f7acaf48b5aee2e
|
[
"MIT"
] | 6
|
2020-04-28T07:30:55.000Z
|
2021-07-07T02:47:19.000Z
|
postprocess/postprocess_text.py
|
alberto-poncelas/tesseract_postprocess
|
5b44e360a3575c640d68d38d8f7acaf48b5aee2e
|
[
"MIT"
] | null | null | null |
postprocess/postprocess_text.py
|
alberto-poncelas/tesseract_postprocess
|
5b44e360a3575c640d68d38d8f7acaf48b5aee2e
|
[
"MIT"
] | null | null | null |
import sys
import string
import nltk
import ConfigParser
import io
config = ConfigParser.ConfigParser()
config.readfp(open(r'../config'))
language_model_path = config.get('DEFAULT', 'LM')
input_data = sys.argv[1]
replacement_dict_path = sys.argv[2]
with io.open(input_data,"r", encoding="utf-8") as finp:
data = finp.readlines()
from nltk.tokenize.treebank import TreebankWordDetokenizer
detok=TreebankWordDetokenizer()
#Get Language Model
import kenlm
LMmodel = kenlm.Model(language_model_path)
##Get replacement dict
with io.open(replacement_dict_path,"r", encoding="utf-8") as frep:
alternative_dict_text = frep.readlines()
alternative_elems=[x.strip().split("->") for x in alternative_dict_text]
alternative_dict=dict()
for elem in alternative_elems:
#remove "[" and "]", then split
values=elem[1][2:-2].split("', '")
alternative_dict[elem[0]]=values
unrecognized_words_list=alternative_dict.keys()
#replace the i-th word in the sentence (as list) with the word w
def replace_word_list(listS,i,w):
new_sentence=list(listS)
new_sentence[i]=w
return new_sentence
#Return w2, with the same case as w1
def same_case(w,w2):
if w.isupper():
return w2.upper()
elif w[0].isupper():
return w2.capitalize()
else:
return w2
#Get the alternative sentences for the word i
def candidate_sentences_i(listS,i):
alternative_sentences=[listS]
replacements=[ [] ]
w_orig=listS[i]
w=w_orig.lower()
if w in unrecognized_words_list:
altern=alternative_dict[w]
for alt_lower in altern:
alt=same_case(w_orig,alt_lower) #Same case as the original word
alternative_sentences.append(replace_word_list(listS,i,alt))
replacements.append([w_orig,alt])
return alternative_sentences , replacements
#score a sentence (list of words) according to the LM
def sentence_score(listS):
return LMmodel.score(" ".join(listS).lower())
#get the best candidate according to the LM
def postproc_sentence(s):
listS=list(s.strip().split(" "))
replacements_list=[]
for i in range(0,len(listS)):
candidates ,replacements = candidate_sentences_i(listS,i)
scores=[sentence_score(x) for x in candidates]
best_sentence_idx=scores.index(max(scores))
#Replace the sentence with the best candidate
listS = candidates[best_sentence_idx]
replacements_list.append(replacements[best_sentence_idx] )
#return " ".join(listS),replacements_list
return detok.detokenize(listS),replacements_list
#Print postprocessed sentence (and the dictionaries of replacements done)
def format_replacements_list(replac_list):
replac_list_out=[]
for x in replac_list:
if len(x)>0:
replac_list_out.append(x[0]+"->"+x[1])
return "; ".join(replac_list_out)
for s in data:
post_sent , replac = postproc_sentence(s)
replac_str=format_replacements_list(replac)
print( (post_sent+"\t"+replac_str).encode("UTF8") )
| 20.435714
| 73
| 0.749738
|
f1d6d33396ac0fd163b23cf07e1e987c4cade913
| 1,071
|
py
|
Python
|
stubs.min/Autodesk/Revit/DB/Electrical_parts/PanelConfiguration.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/Autodesk/Revit/DB/Electrical_parts/PanelConfiguration.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/Autodesk/Revit/DB/Electrical_parts/PanelConfiguration.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class PanelConfiguration(Enum,IComparable,IFormattable,IConvertible):
"""
This enum declares the configuration for given panel schedule type.
enum PanelConfiguration,values: OneColumn (0),TwoColumnsCircuitsAcross (1),TwoColumnsCircuitsDown (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
OneColumn=None
TwoColumnsCircuitsAcross=None
TwoColumnsCircuitsDown=None
value__=None
| 30.6
| 215
| 0.691877
|
1eed11a533674cf0fcb436122d44286595322e7c
| 1,139
|
py
|
Python
|
chapter3/question1.py
|
AmatsukiUrato/study_python
|
e06afbc92a6f112ff9d3159bea4abcca6b17d280
|
[
"MIT"
] | null | null | null |
chapter3/question1.py
|
AmatsukiUrato/study_python
|
e06afbc92a6f112ff9d3159bea4abcca6b17d280
|
[
"MIT"
] | null | null | null |
chapter3/question1.py
|
AmatsukiUrato/study_python
|
e06afbc92a6f112ff9d3159bea4abcca6b17d280
|
[
"MIT"
] | 1
|
2019-04-11T04:08:03.000Z
|
2019-04-11T04:08:03.000Z
|
def go_sukiya(wallet):
menu = {
"牛丼ミニ": 290,
"牛丼並盛": 350,
"牛丼中盛": 480
}
print("いらっしゃいませ!")
print("何をご注文なさいますか?")
product = choice(menu)
print("承知いたしました")
if can_pay(wallet, menu[product]):
wallet = wallet - menu[product]
print("お釣りは" + str(wallet) + "です")
print("ありがとうございました!またのご来店をお待ちしております")
else:
print("支払えるだけのお金を持っていないようですね")
wallet = 0
return wallet
def can_pay(wallet, price):
if wallet == 0:
return False
if wallet - price < 0:
return False
return True
def choice(menu):
print(menu)
product = input()
if not product in menu:
print()
print("お店にある商品を選んでください")
product = choice(menu)
return product
if __name__ == "__main__":
wallet = 1000
while (True):
print("現在の所持金:" + str(wallet))
wallet = go_sukiya(wallet)
if wallet == 0:
print("もうすき家にはいけません...")
break
else:
print("もう一度すき家にいきますか?(y/n)")
is_visit = input()
if is_visit == "n":
break
| 19.982456
| 45
| 0.524144
|
a31de5417037e1c6b1be098094a6c94c36dd6007
| 1,180
|
py
|
Python
|
sfdc.py
|
mkesicki/sfdc-batch-dataloader
|
aa34d108c613cfa9bc11b758a767139c20a1308d
|
[
"MIT"
] | null | null | null |
sfdc.py
|
mkesicki/sfdc-batch-dataloader
|
aa34d108c613cfa9bc11b758a767139c20a1308d
|
[
"MIT"
] | null | null | null |
sfdc.py
|
mkesicki/sfdc-batch-dataloader
|
aa34d108c613cfa9bc11b758a767139c20a1308d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3 -u
import requests
import pprint
headers = {}
instance = ""
api_version = "v47.0"
def login(args):
url = "https://" + args.login_url.replace("https://", "")
print("Login to salesforce: " + url)
params = {
"client_id": args.client_id,
"client_secret": args.client_secret,
"username": args.username,
"password": args.password,
"grant_type": "password"
}
r = requests.post(url + "/services/oauth2/token", data=params)
access_token = r.json().get("access_token")
instance_url = r.json().get("instance_url")
global headers
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + access_token
}
print("Salesforce instance: " + instance_url)
global instance
instance = instance_url
return instance_url
def getFields(object):
url = instance + "/services/data/"+ api_version + "/sobjects/" + object + "/describe"
print("Get fields for: " + object)
r = requests.get(url, headers=headers)
fields = []
for field in r.json().get('fields'):
fields.append(field.get('name'))
return fields
| 20.701754
| 90
| 0.611017
|
f5f786ffa8468ca3a8fd0eb5d797b473c17335f4
| 467
|
py
|
Python
|
home/migrations/0013_auto_20200802_1712.py
|
Muia23/Share
|
32f3fa5d75ee147920eb5255de2fb07e960f0b0a
|
[
"Unlicense"
] | null | null | null |
home/migrations/0013_auto_20200802_1712.py
|
Muia23/Share
|
32f3fa5d75ee147920eb5255de2fb07e960f0b0a
|
[
"Unlicense"
] | 8
|
2021-04-08T19:40:41.000Z
|
2022-03-12T00:46:05.000Z
|
home/migrations/0013_auto_20200802_1712.py
|
Muia23/Share
|
32f3fa5d75ee147920eb5255de2fb07e960f0b0a
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-02 14:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0012_auto_20200802_1710'),
]
operations = [
migrations.AlterField(
model_name='post',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Location'),
),
]
| 23.35
| 101
| 0.635974
|
49ddabb28232e2824a0ee360b2cf0cf53e76ca94
| 4,305
|
py
|
Python
|
modules/pymol2/__init__.py
|
telamonian/pymol
|
8192e75bf3d4c1072d6bd399b7dacd065bf78a06
|
[
"CNRI-Python"
] | 1
|
2017-10-05T15:25:56.000Z
|
2017-10-05T15:25:56.000Z
|
modules/pymol2/__init__.py
|
telamonian/pymol
|
8192e75bf3d4c1072d6bd399b7dacd065bf78a06
|
[
"CNRI-Python"
] | null | null | null |
modules/pymol2/__init__.py
|
telamonian/pymol
|
8192e75bf3d4c1072d6bd399b7dacd065bf78a06
|
[
"CNRI-Python"
] | null | null | null |
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2007 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
from __future__ import absolute_import
import pymol
from pymol import _cmd
import threading
import sys
pymol2_lock = threading.RLock()
##
## FIXME: The PyMOL and SingletonPyMOL classes are partly redundant with the
## instance tracking of the "cmd" module (and the pymol2.cmd2.Cmd class),
## which also holds the _COb pointer.
##
class SingletonPyMOL:
'''
Start an exclusive PyMOL instance, only one instance allowed
'''
def idle(self):
return _cmd._idle(self._COb)
def getRedisplay(self, reset=True):
return _cmd._getRedisplay(self._COb, reset)
def reshape(self, width, height, force=0):
_cmd._reshape(self._COb, width, height, force)
def draw(self):
_cmd._draw(self._COb)
def button(self, button, state, x, y, modifiers):
_cmd._button(self._COb, button, state, x, y, modifiers)
def drag(self, x, y, modifiers):
_cmd._drag(self._COb, x, y, modifiers)
def start(self):
cmd = pymol.cmd
if cmd._COb is not None:
raise RuntimeError('can only start SingletonPyMOL once')
with pymol2_lock:
cmd._COb = _cmd._new(pymol, pymol.invocation.options)
_cmd._start(cmd._COb, cmd)
# this instance tracking is redundant with the "cmd" module itself
self._COb = cmd._COb
self.cmd = cmd
def stop(self):
with pymol2_lock:
_cmd._stop(self._COb)
_cmd._del(self._COb)
pymol.cmd._COb = None
class PyMOL(SingletonPyMOL):
'''
Start a non-exclusive PyMOL instance, multiple instances are possible
'''
def __getattr__(self, key):
# Make this a proxy to the "pymol" module.
return getattr(pymol, key)
def __init__(self,scheme=None): # initialize a PyMOL instance
from .cmd2 import Cmd
with pymol2_lock:
pymol._init_internals(self)
self.invocation = self._invocation
options = self.invocation.options
if scheme!=None: #
if scheme == 'presentation':
options.quiet = 0
options.show_splash = 0
options.external_gui = 0
options.internal_feedback = 0
options.no_quit = 1
options.internal_gui = 0
options.presentation = 1
elif scheme == 'widget': # An embedded widget of some type
options.quiet = 0
options.show_splash = 0
options.external_gui = 0
options.internal_feedback = 1
options.no_quit = 1
else:
options.show_splash = 0 # suppress this annoyance by default
self._COb = _cmd._new(self,self.invocation.options)
# initialize the cmd API
self.cmd = Cmd(self,self._COb)
# begin assembling the instance member by member
self.glutThread = None
def __del__(self):
_cmd._del(self._COb)
self.cmd.__dict__.clear()
def start(self):
with pymol2_lock:
_cmd._start(self._COb, self.cmd)
def startWithTclTk(self, gui = None, skin=None):
self.start()
if gui == None:
gui = self.invocation.options.gui
if skin == None:
skin = self.invocation.options.skin
poll = 0
__import__(gui)
sys.modules[gui].__init__(self,poll,skin)
def stop(self):
_cmd._stop(self._COb)
| 30.316901
| 76
| 0.54216
|
b47c7b63d900629404d67f045640c684b5286f80
| 39,422
|
py
|
Python
|
Server.py
|
mdkinney/edk2-email-archive-webhook
|
379ff29ac40f05f09dbf9fac35e8072e09a26a07
|
[
"BSD-2-Clause-Patent"
] | 2
|
2021-01-15T18:37:25.000Z
|
2022-01-12T03:22:57.000Z
|
Server.py
|
mdkinney/edk2-email-archive-webhook
|
379ff29ac40f05f09dbf9fac35e8072e09a26a07
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
Server.py
|
mdkinney/edk2-email-archive-webhook
|
379ff29ac40f05f09dbf9fac35e8072e09a26a07
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
## @file
# Assign reviewers to commits in a GitHub pull request based on assignments
# documented in Maintainers.txt and generate email archive of all review
# activities.
#
# Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
TianoCore GitHub Webhook
'''
from __future__ import print_function
import os
import sys
import argparse
import hmac
import datetime
from json import dumps
from flask import Flask, request, abort
from github import Github
from GetMaintainers import GetMaintainers
from GetMaintainers import ParseMaintainerAddresses
from SendEmails import SendEmails
from FetchPullRequest import FetchPullRequest
from FetchPullRequest import FormatPatch
from FetchPullRequest import FormatPatchSummary
#
# Globals for help information
#
__prog__ = 'TianoCoreGitHubWebHookServer'
__copyright__ = 'Copyright (c) 2020, Intel Corporation. All rights reserved.'
__description__ = 'Assign reviewers to commits in a GitHub pull request based on assignments documented in Maintainers.txt and generate email archive of all review activities.\n'
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
GITHUB_WEBHOOK_SECRET = os.environ['GITHUB_WEBHOOK_SECRET']
GITHUB_WEBHOOK_ROUTE = os.environ['GITHUB_WEBHOOK_ROUTE']
GITHUB_WEBHOOK_PORT_NUMBER = int(os.environ['GITHUB_WEBHOOK_PORT_NUMBER'])
GITHUB_REPO_WHITE_LIST = os.environ['GITHUB_REPO_WHITE_LIST']
REVIEW_REQUEST = '[CodeReview] Review-request @'
REVIEWED_BY = '[CodeReview] Reviewed-by'
SERIES_REVIEWED_BY = '[CodeReview] Series-reviewed-by'
ACKED_BY = '[CodeReview] Acked-by'
TESTED_BY = '[CodeReview] Acked-by'
def UpdatePullRequestCommitReviewers (Commit, GitHubIdList):
#
# Retrieve all review comments for this commit
#
Body = []
for Comment in Commit.get_comments():
if Comment.body is not None:
Body = Body + [Line.strip() for Line in Comment.body.splitlines()]
#
# Determine if any reviewers need to be added to this commit
#
AddReviewers = []
for Reviewer in GitHubIdList:
if REVIEW_REQUEST + Reviewer not in Body:
AddReviewers.append(REVIEW_REQUEST + Reviewer + '\n')
if AddReviewers != []:
for Reviewer in AddReviewers:
print (' ' + ' '.join(AddReviewers))
#
# NOTE: This triggers a recursion into this webhook that needs to be
# ignored
#
Commit.create_comment (''.join(AddReviewers))
#
# Return True if reviewers were added to this commit
#
return AddReviewers != []
def UpdatePullRequestReviewers (Hub, HubRepo, HubPullRequest, PullRequestGitHubIdList):
#
# Get list of reviewers already requested for the pull request
#
RequestedReviewers = HubPullRequest.get_review_requests()[0]
#
# Determine if any reviewers need to be removed
#
RemoveReviewerList = []
for Reviewer in RequestedReviewers:
if Reviewer.login not in PullRequestGitHubIdList:
print ('pr[%d]' % (HubPullRequest.number), 'Remove Reviewer : @' + Reviewer.login)
RemoveReviewerList.append(Reviewer.login)
#
# Determine if any reviewers need to be added
#
AddReviewerList = []
Collaborators = HubRepo.get_collaborators()
for Login in PullRequestGitHubIdList:
Reviewer = Hub.get_user(Login)
if Reviewer == HubPullRequest.user:
print ('pr[%d]' % (HubPullRequest.number), 'Reviewer is Author : @' + Reviewer.login)
elif Reviewer not in RequestedReviewers:
if Reviewer in Collaborators:
print ('pr[%d]' % (HubPullRequest.number), 'Add Reviewer : @' + Reviewer.login)
AddReviewerList.append (Reviewer.login)
else:
print ('pr[%d]' % (HubPullRequest.number), 'Reviewer is not a collaborator : @' + Reviewer.login)
else:
print ('pr[%d]' % (HubPullRequest.number), 'Already Assigned : @' + Reviewer.login)
#
# Update review requests
#
if RemoveReviewerList != []:
#
# NOTE: This may trigger recursion into this webhook
#
HubPullRequest.delete_review_request (RemoveReviewerList)
if AddReviewerList != []:
#
# NOTE: This may trigger recursion into this webhook
#
HubPullRequest.create_review_request (AddReviewerList)
def GetReviewComments(Comment, ReviewComments, CommentIdDict):
if Comment in ReviewComments:
return
ReviewComments.append(Comment)
#
# Add peer comments
#
if Comment.pull_request_review_id:
for PeerComment in CommentIdDict.values():
if PeerComment.pull_request_review_id == Comment.pull_request_review_id:
GetReviewComments (PeerComment, ReviewComments, CommentIdDict)
#
# Add child comments
#
for ChildComment in CommentIdDict.values():
if ChildComment.in_reply_to_id == Comment.id:
GetReviewComments (ChildComment, ReviewComments, CommentIdDict)
#
# Add parent comment
#
if Comment.in_reply_to_id and Comment.in_reply_to_id in CommentIdDict:
ParentComment = CommentIdDict[Comment.in_reply_to_id]
GetReviewComments (ParentComment, ReviewComments, CommentIdDict)
def GetReviewCommentsFromReview(Review, CommentId, CommentInReplyToId, CommentIdDict):
ReviewComments = []
for Comment in CommentIdDict.values():
if Review:
if Comment.pull_request_review_id == Review.id:
GetReviewComments (Comment, ReviewComments, CommentIdDict)
if CommentId:
if Comment.in_reply_to_id == CommentId:
GetReviewComments (Comment, ReviewComments, CommentIdDict)
if CommentInReplyToId:
if Comment.id == CommentInReplyToId:
GetReviewComments (Comment, ReviewComments, CommentIdDict)
return ReviewComments
application = Flask(__name__)
@application.route(GITHUB_WEBHOOK_ROUTE, methods=['GET', 'POST'])
def index():
"""
Main WSGI application entry.
"""
if args.Verbose:
print (request.headers)
# Only POST is implemented
if request.method != 'POST':
print (501, "only POST is supported")
abort(501, "only POST is supported")
# Enforce secret, so not just anybody can trigger these hooks
secret = GITHUB_WEBHOOK_SECRET
if secret:
# Only SHA1 is supported
header_signature = request.headers.get('X-Hub-Signature')
if header_signature is None:
print (403, "No header signature found")
abort(403, "No header signature found")
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
print(501, "Only SHA1 is supported")
abort(501, "Only SHA1 is supported")
# HMAC requires the key to be bytes, but data is string
mac = hmac.new(bytes(secret, 'utf-8'), msg=request.data, digestmod='sha1')
# Python does not have hmac.compare_digest prior to 2.7.7
if sys.hexversion >= 0x020707F0:
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
print(403, "hmac compare digest failed")
abort(403)
else:
# What compare_digest provides is protection against timing
# attacks; we can live without this protection for a web-based
# application
if not str(mac.hexdigest()) == str(signature):
print(403, "hmac compare digest failed")
abort(403)
# Implement ping
event = request.headers.get('X-GitHub-Event', 'ping')
if event == 'ping':
print ('ping request. respond with pong')
return dumps({'msg': 'pong'})
# Implement meta
if event == 'meta':
return dumps({'msg': 'meta'})
# Gather data
try:
payload = request.get_json()
except Exception:
print(400, "Request parsing failed")
abort(400, "Request parsing failed")
#
# Skip push and create events
#
if event in ['push', 'create']:
print ('skip event', event)
return dumps({'status': 'skipped'})
#
# Skip payload that does not provide an action
#
if 'action' not in payload:
print ('skip payload that does not provide an action. event =', event)
return dumps({'status': 'skipped'})
#
# Skip payload that does not provide a repository
#
if 'repository' not in payload:
print ('skip payload that does not provide a repository. event=', event)
return dumps({'status': 'skipped'})
#
# Skip payload that does not provide a repository full name
#
if 'full_name' not in payload['repository']:
print ('skip payload that does not provide a repository full name. event=', event)
return dumps({'status': 'skipped'})
#
# Skip requests that are not in GITHUB_REPO_WHITE_LIST
#
if payload['repository']['full_name'] not in GITHUB_REPO_WHITE_LIST:
print ('skip event for different repo')
return dumps({'status': 'skipped'})
print ('----> Process Event <----', event, payload['action'])
############################################################################
# Process issue comment events
# These are comments against the entire pull request
# Quote Patch #0 Body and add comment below below with commenters GitHubID
############################################################################
if event == 'issue_comment':
action = payload['action']
if action not in ['created', 'edited', 'deleted']:
print ('skip issue_comment event with action other than created or edited')
return dumps({'status': 'skipped'})
if 'pull_request' not in payload['issue']:
print ('skip issue_comment event without an associated pull request')
return dumps({'status': 'skipped'})
#
# Use GitHub API to get Pull Request
#
try:
HubRepo = Hub.get_repo (payload['repository']['full_name'])
HubPullRequest = HubRepo.get_pull(payload['issue']['number'])
except:
#
# Skip requests if the PyGitHub objects can not be retrieved
#
print ('skip issue_comment event for which the PyGitHub objects can not be retrieved')
return dumps({'status': 'skipped'})
#
# Skip pull request that is not open
#
if HubPullRequest.state != 'open':
print ('Skip issue_comment event against a pull request that is not open')
return dumps({'status': 'skipped'})
#
# Skip pull request with a base repo that is different than the expected repo
#
if HubPullRequest.base.repo.full_name != HubRepo.full_name:
print ('Skip issue_comment event against a different repo', HubPullRequest.base.repo.full_name)
return dumps({'status': 'skipped'})
#
# Skip pull requests with a base branch that is not the default branch
#
if HubPullRequest.base.ref != HubRepo.default_branch:
print ('Skip issue_comment event against non-default base branch', HubPullRequest.base.ref)
return dumps({'status': 'skipped'})
#
# Fetch the git commits for the pull request and return a git repo
# object and the contents of Maintainers.txt
#
GitRepo, Maintainers = FetchPullRequest (HubPullRequest)
if GitRepo is None or Maintainers is None:
print ('Skip issue_comment event that can not be fetched')
return dumps({'status': 'skipped'})
#
# Count head_ref_force_pushed and reopened events to determine the
# version of the patch series.
#
PatchSeriesVersion = 1;
Events = HubPullRequest.get_issue_events()
for Event in Events:
if Event.event in ['head_ref_force_pushed', 'reopened']:
PatchSeriesVersion = PatchSeriesVersion + 1;
PullRequestAddressList = []
for Commit in HubPullRequest.get_commits():
#
# Get list of files modifies by commit from GIT repository
#
CommitFiles = GitRepo.commit(Commit.sha).stats.files
#
# Get maintainers and reviewers for all files in this commit
#
Addresses = GetMaintainers (Maintainers, CommitFiles)
AddressList, GitHubIdList, EmailList = ParseMaintainerAddresses(Addresses)
PullRequestAddressList = list(set(PullRequestAddressList + AddressList))
#
# Generate the summary email patch #0 with body of email prefixed with >.
#
UpdateDeltaTime = 0
if action == 'edited':
#
# The delta time is the number of seconds from the time the comment
# was created to the time the comment was edited
#
UpdatedAt = datetime.datetime.strptime(payload['comment']['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
CreatedAt = datetime.datetime.strptime(payload['comment']['created_at'], "%Y-%m-%dT%H:%M:%SZ")
UpdateDeltaTime = (UpdatedAt - CreatedAt).seconds
if action == 'deleted':
UpdateDeltaTime = -1
Summary = FormatPatchSummary (
event,
GitRepo,
HubRepo,
HubPullRequest,
PullRequestAddressList,
PatchSeriesVersion,
CommitRange = HubPullRequest.base.sha + '..' + HubPullRequest.head.sha,
CommentUser = payload['comment']['user']['login'],
CommentId = payload['comment']['id'],
CommentPosition = None,
CommentPath = None,
Prefix = '> ',
UpdateDeltaTime = UpdateDeltaTime
)
#
# Send any generated emails
#
SendEmails (HubPullRequest, [Summary], args.EmailServer)
print ('----> Process Event Done <----', event, payload['action'])
return dumps({'msg': 'issue_comment created or edited'})
############################################################################
# Process commit comment events
# These are comments against a specific commit
# Quote Patch #n commit message and add comment below below with commenters GitHubID
############################################################################
if event == 'commit_comment':
action = payload['action']
if action not in ['created', 'edited']:
print ('skip commit_comment event with action other than created or edited')
return dumps({'status': 'skipped'})
#
# Skip REVIEW_REQUEST comments made by the webhook itself. This same
# information is always present in the patch emails, so filtering these
# comments prevent double emails when a pull request is opened or
# synchronized.
#
Body = payload['comment']['body'].splitlines()
for Line in payload['comment']['body'].splitlines():
if Line.startswith (REVIEW_REQUEST):
print ('skip commit_comment event with review request body from this webhook')
return dumps({'status': 'skipped'})
#
# Search for issues/pull requests that contain the comment's commit_id
#
CommitId = payload['comment']['commit_id']
CommentId = payload['comment']['id']
CommentPosition = payload['comment']['position']
CommentPath = payload['comment']['path']
EmailContents = []
for Issue in Hub.search_issues('SHA:' + CommitId):
#
# Skip Issue for a different repository
#
if Issue.repository.full_name != payload['repository']['full_name']:
print ('Skip commit_comment event against a different repo', HubPullRequest.base.repo.full_name)
continue
#
# Use GitHub API to get Pull Request
#
try:
HubRepo = Issue.repository
HubPullRequest = Issue.as_pull_request()
except:
print ('skip commit_comment event for which the PyGitHub objects can not be retrieved')
continue
#
# Skip pull request that is not open
#
if HubPullRequest.state != 'open':
print ('Skip commit_comment event against a pull request that is not open')
return dumps({'status': 'skipped'})
#
# Skip commit_comment with a base repo that is different than the expected repo
#
if HubPullRequest.base.repo.full_name != HubRepo.full_name:
print ('Skip commit_comment event against a different repo', HubPullRequest.base.repo.full_name)
continue
#
# Skip commit_comment with a base branch that is not the default branch
#
if HubPullRequest.base.ref != HubRepo.default_branch:
print ('Skip commit_comment event against non-default base branch', HubPullRequest.base.ref)
continue
#
# Fetch the git commits for the pull request and return a git repo
# object and the contents of Maintainers.txt
#
GitRepo, Maintainers = FetchPullRequest (HubPullRequest)
if GitRepo is None or Maintainers is None:
print ('Skip commit_comment event that can not be fetched')
continue
#
# Count head_ref_force_pushed and reopened events to determine the
# version of the patch series.
#
PatchSeriesVersion = 1;
Events = HubPullRequest.get_issue_events()
for Event in Events:
if Event.event in ['head_ref_force_pushed', 'reopened']:
PatchSeriesVersion = PatchSeriesVersion + 1;
#
# Determine the patch number of the commit with the comment
#
PatchNumber = 0
for Commit in HubPullRequest.get_commits():
PatchNumber = PatchNumber + 1
if Commit.sha == CommitId:
break
#
# Get commit from GIT repository
#
CommitFiles = GitRepo.commit(Commit.sha).stats.files
#
# Get maintainers and reviewers for all files in this commit
#
Addresses = GetMaintainers (Maintainers, CommitFiles)
AddressList, GitHubIdList, EmailList = ParseMaintainerAddresses(Addresses)
Email = FormatPatch (
event,
GitRepo,
HubRepo,
HubPullRequest,
Commit,
AddressList,
PatchSeriesVersion,
PatchNumber,
CommentUser = payload['comment']['user']['login'],
CommentId = CommentId,
CommentPosition = CommentPosition,
CommentPath = CommentPath,
Prefix = '> '
)
EmailContents.append (Email)
if EmailContents == []:
print ('skip commit_comment that is not for any supported repo')
return dumps({'status': 'skipped'})
#
# Send any generated emails
#
SendEmails (HubPullRequest, EmailContents, args.EmailServer)
print ('----> Process Event Done <----', event, payload['action'])
return dumps({'msg': 'commit_comment created or edited'})
############################################################################
# Process pull_request_review_comment and pull_request_review events
# Quote Patch #0 commit message and patch diff of file comment is against
############################################################################
if event in ['pull_request_review_comment', 'pull_request_review']:
action = payload['action']
Review = None
ReviewComments = []
DeleteId = None
ParentReviewId = None
UpdateDeltaTime = 0
if event in ['pull_request_review_comment']:
if action not in ['edited', 'deleted']:
print ('skip pull_request_review_comment event with action other than edited or deleted')
return dumps({'status': 'skipped'})
#
# Skip REVIEW_REQUEST comments made by the webhook itself. This same
# information is always present in the patch emails, so filtering these
# comments prevent double emails when a pull request is opened or
# synchronized.
#
Body = payload['comment']['body'].splitlines()
for Line in payload['comment']['body'].splitlines():
if Line.startswith (REVIEW_REQUEST):
print ('skip pull_request_review_comment event with review request body from this webhook')
return dumps({'status': 'skipped'})
if event in ['pull_request_review']:
if action not in ['submitted', 'edited']:
print ('skip pull_request_review event with action other than submitted or edited')
return dumps({'status': 'skipped'})
if action == 'edited' and payload['changes'] == {}:
print ('skip pull_request_review event edited action that has no changes')
return dumps({'status': 'skipped'})
EmailContents = []
#
# Use GitHub API to get Pull Request
#
try:
HubRepo = Hub.get_repo (payload['repository']['full_name'])
HubPullRequest = HubRepo.get_pull(payload['pull_request']['number'])
except:
print ('skip pull_request_review_comment event for which the PyGitHub objects can not be retrieved')
return dumps({'status': 'skipped'})
#
# Skip pull request that is not open
#
if HubPullRequest.state != 'open':
print ('Skip pull_request_review_comment event against a pull request that is not open')
return dumps({'status': 'skipped'})
#
# Skip pull_request_review_comment with a base repo that is different than the expected repo
#
if HubPullRequest.base.repo.full_name != HubRepo.full_name:
print ('Skip pull_request_review_comment event against a different repo', HubPullRequest.base.repo.full_name)
return dumps({'status': 'skipped'})
#
# Skip pull_request_review_comment with a base branch that is not the default branch
#
if HubPullRequest.base.ref != HubRepo.default_branch:
print ('Skip pull_request_review_comment event against non-default base branch', HubPullRequest.base.ref)
return dumps({'status': 'skipped'})
#
# Build dictionary of review comments
#
CommentIdDict = {}
for Comment in HubPullRequest.get_review_comments():
Comment.pull_request_review_id = None
if 'pull_request_review_id' in Comment.raw_data:
Comment.pull_request_review_id = Comment.raw_data['pull_request_review_id']
CommentIdDict[Comment.id] = Comment
#
# Determine if review has a parent review, is being deleted, or has
# an update time.
#
if event in ['pull_request_review']:
CommitId = payload['review']['commit_id']
CommentUser = payload['review']['user']['login'],
CommentId = None
CommentPosition = None
CommentPath = None
CommentInReplyToId = None
ReviewId = payload['review']['id']
try:
Review = HubPullRequest.get_review(ReviewId)
except:
Review = None
ReviewComments = GetReviewCommentsFromReview(Review, CommentId, CommentInReplyToId, CommentIdDict)
if payload['action'] == 'submitted':
UpdateDeltaTime = 0
for ReviewComment in ReviewComments:
if ReviewComment.pull_request_review_id == ReviewId:
if ReviewComment.in_reply_to_id and ReviewComment.in_reply_to_id in CommentIdDict:
ParentReviewId = CommentIdDict[ReviewComment.in_reply_to_id].pull_request_review_id
if ParentReviewId and ParentReviewId != ReviewId:
break
if payload['action'] == 'edited' and Review:
UpdatedAt = datetime.datetime.strptime(payload['pull_request']['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
UpdateDeltaTime = (UpdatedAt - Review.submitted_at).seconds
if event in ['pull_request_review_comment']:
CommitId = payload['comment']['commit_id']
CommentId = payload['comment']['id']
CommentUser = payload['comment']['user']['login'],
CommentPosition = payload['comment']['position']
CommentPath = payload['comment']['path']
CommentInReplyToId = None
ReviewId = None
if 'in_reply_to_id' in payload['comment']:
CommentInReplyToId = payload['comment']['in_reply_to_id']
if 'pull_request_review_id' in payload['comment']:
ReviewId = payload['comment']['pull_request_review_id']
try:
Review = HubPullRequest.get_review(ReviewId)
except:
Review = None
ReviewComments = GetReviewCommentsFromReview(Review, CommentId, CommentInReplyToId, CommentIdDict)
if payload['action'] == 'deleted':
UpdateDeltaTime = 0
DeleteId = payload['comment']['id']
if payload['action'] == 'edited' and Review:
UpdatedAt = datetime.datetime.strptime(payload['comment']['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
UpdateDeltaTime = (UpdatedAt - Review.submitted_at).seconds
#
# Fetch the git commits for the pull request and return a git repo
# object and the contents of Maintainers.txt
#
GitRepo, Maintainers = FetchPullRequest (HubPullRequest)
if GitRepo is None or Maintainers is None:
print ('Skip pull_request_review_comment event that can not be fetched')
return dumps({'status': 'skipped'})
#
# Count head_ref_force_pushed and reopened events to determine the
# version of the patch series.
#
PatchSeriesVersion = 1;
Events = HubPullRequest.get_issue_events()
for Event in Events:
if Event.event in ['head_ref_force_pushed', 'reopened']:
PatchSeriesVersion = PatchSeriesVersion + 1;
#
# All pull request review comments are against patch #0
#
PatchNumber = 0
#
# Build dictionary of files in range of commits from the pull request
# base sha up to the commit id of the pull request review comment.
#
CommitFiles = {}
for Commit in HubPullRequest.get_commits():
CommitFiles.update (GitRepo.commit(Commit.sha).stats.files)
#
# Get maintainers and reviewers for all files in this commit
#
Addresses = GetMaintainers (Maintainers, CommitFiles)
AddressList, GitHubIdList, EmailList = ParseMaintainerAddresses(Addresses)
#
# Generate the summary email patch #0 with body of email prefixed with >.
#
Email = FormatPatchSummary (
event,
GitRepo,
HubRepo,
HubPullRequest,
AddressList,
PatchSeriesVersion,
CommitRange = HubPullRequest.base.sha + '..' + HubPullRequest.head.sha,
CommentUser = CommentUser,
CommentId = CommentId,
CommentPosition = CommentPosition,
CommentPath = CommentPath,
Prefix = '> ',
CommentInReplyToId = CommentInReplyToId,
UpdateDeltaTime = UpdateDeltaTime,
Review = Review,
ReviewId = ReviewId,
ReviewComments = ReviewComments,
DeleteId = DeleteId,
ParentReviewId = ParentReviewId
)
EmailContents.append (Email)
#
# Send any generated emails
#
SendEmails (HubPullRequest, EmailContents, args.EmailServer)
print ('----> Process Event Done <----', event, payload['action'])
return dumps({'msg': event + ' created or edited or deleted'})
############################################################################
# Process pull request events
############################################################################
if event == 'pull_request':
action = payload['action']
if action not in ['opened', 'synchronize', 'edited', 'closed', 'reopened']:
print ('skip pull_request event with action other than opened or synchronized')
return dumps({'status': 'skipped'})
#
# Use GitHub API to get Pull Request
#
try:
HubRepo = Hub.get_repo (payload['repository']['full_name'])
HubPullRequest = HubRepo.get_pull(payload['pull_request']['number'])
except:
#
# Skip requests if the PyGitHub objects can not be retrieved
#
print ('skip pull_request event for which the PyGitHub objects can not be retrieved')
return dumps({'status': 'skipped'})
#
# Skip pull request that is not open unless this is the event that is
# closing the pull request
#
if action != 'closed':
if HubPullRequest.state != 'open':
print ('Skip pull_request event against a pull request that is not open')
return dumps({'status': 'skipped'})
#
# Skip pull request with a base repo that is different than the expected repo
#
if HubPullRequest.base.repo.full_name != HubRepo.full_name:
print ('Skip PR event against a different repo', HubPullRequest.base.repo.full_name)
return dumps({'status': 'skipped'})
#
# Skip pull requests with a base branch that is not the default branch
#
if HubPullRequest.base.ref != HubRepo.default_branch:
print ('Skip PR event against non-default base branch', HubPullRequest.base.ref)
return dumps({'status': 'skipped'})
#
# Fetch the git commits for the pull request and return a git repo
# object and the contents of Maintainers.txt
#
GitRepo, Maintainers = FetchPullRequest (HubPullRequest)
if GitRepo is None or Maintainers is None:
print ('Skip pull_request_review event that can not be fetched')
return dumps({'status': 'skipped'})
NewPatchSeries = False
PatchSeriesVersion = 1;
if action in ['opened', 'reopened']:
#
# New pull request was created
#
NewPatchSeries = True
if action in ['synchronize', 'edited', 'closed', 'reopened']:
#
# Existing pull request was updated.
# Commits were added to an existing pull request or an existing pull
# request was forced push. Get events to determine what happened
#
Events = HubPullRequest.get_issue_events()
for Event in Events:
#
# Count head_ref_force_pushed and reopened events to determine
# the version of the patch series.
#
if Event.event in ['head_ref_force_pushed', 'reopened']:
PatchSeriesVersion = PatchSeriesVersion + 1;
if Event.event in ['head_ref_force_pushed']:
#
# If the head_ref_force_pushed event occurred at the exact
# same date/time (or within 2 seconds) that the pull request
# was updated, then this was a forced push and the entire
# patch series should be emailed again.
#
if abs(Event.created_at - HubPullRequest.updated_at).seconds <= 2:
NewPatchSeries = True
PullRequestAddressList = []
PullRequestGitHubIdList = []
PullRequestEmailList = []
EmailContents = []
PatchNumber = 0
for Commit in HubPullRequest.get_commits():
PatchNumber = PatchNumber + 1
#
# Get list of files modified by commit from GIT repository
#
CommitFiles = GitRepo.commit(Commit.sha).stats.files
#
# Get maintainers and reviewers for all files in this commit
#
Addresses = GetMaintainers (Maintainers, CommitFiles)
AddressList, GitHubIdList, EmailList = ParseMaintainerAddresses(Addresses)
PullRequestAddressList = list(set(PullRequestAddressList + AddressList))
PullRequestGitHubIdList = list(set(PullRequestGitHubIdList + GitHubIdList))
PullRequestEmailList = list(set(PullRequestEmailList + EmailList))
if action in ['opened', 'synchronize', 'reopened']:
print ('pr[%d]' % (HubPullRequest.number), Commit.sha, ' @' + ' @'.join(PullRequestGitHubIdList))
#
# Update the list of required reviewers for this commit
#
ReviewersUpdated = UpdatePullRequestCommitReviewers (Commit, GitHubIdList)
#
# Generate email contents for all commits in a pull request if this is
# a new pull request or a forced push was done to an existing pull request.
# Generate email contents for patches that add new reviewers. This
# occurs when when new commits are added to an existing pull request.
#
if NewPatchSeries or ReviewersUpdated:
Email = FormatPatch (
event,
GitRepo,
HubRepo,
HubPullRequest,
Commit,
AddressList,
PatchSeriesVersion,
PatchNumber
)
EmailContents.append (Email)
if action in ['opened', 'synchronize', 'reopened']:
#
# Update the list of required reviewers for the pull request
#
UpdatePullRequestReviewers (Hub, HubRepo, HubPullRequest, PullRequestGitHubIdList)
#
# If this is a new pull request or a forced push on a pull request or an
# edit of the pulle request title or description, then generate the
# summary email patch #0 and add to be beginning of the list of emails
# to send.
#
if NewPatchSeries or action in ['edited', 'closed']:
UpdateDeltaTime = 0
if action in ['edited', 'closed']:
UpdateDeltaTime = (HubPullRequest.updated_at - HubPullRequest.created_at).seconds
Summary = FormatPatchSummary (
event,
GitRepo,
HubRepo,
HubPullRequest,
PullRequestAddressList,
PatchSeriesVersion,
UpdateDeltaTime = UpdateDeltaTime
)
EmailContents.insert (0, Summary)
#
# Send any generated emails
#
SendEmails (HubPullRequest, EmailContents, args.EmailServer)
print ('----> Process Event Done <----', event, payload['action'])
return dumps({'msg': 'pull_request opened or synchronize'})
print ('skip unsupported event')
return dumps({'status': 'skipped'})
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser (prog = __prog__,
description = __description__ + __copyright__,
conflict_handler = 'resolve')
parser.add_argument ("-e", "--email-server", dest = 'EmailServer', choices = ['Off', 'SMTP', 'SendGrid'], default = 'Off',
help = "Email server type used to send emails.")
parser.add_argument ("-v", "--verbose", dest = 'Verbose', action = "store_true",
help = "Increase output messages")
parser.add_argument ("-q", "--quiet", dest = 'Quiet', action = "store_true",
help = "Reduce output messages")
parser.add_argument ("--debug", dest = 'Debug', type = int, metavar = '[0-9]', choices = range (0, 10), default = 0,
help = "Set debug level")
#
# Parse command line arguments
#
args = parser.parse_args ()
#
# Create GitHub object authenticated using GitHub Token for the webhook
#
try:
Hub = Github (GITHUB_TOKEN)
except:
print ('can not access GitHub APIs')
sys.exit(1)
try:
application.run(debug=False, host='localhost', port=GITHUB_WEBHOOK_PORT_NUMBER, threaded=False)
except:
print ('can not create listener for GitHub HTTP requests')
sys.exit(1)
| 42.207709
| 179
| 0.564735
|
2e1b3a03db82d659335a28f85104b595bca2a2f2
| 331
|
py
|
Python
|
algorithms-on-graph/week5_mst/1_connecting_points/connecting_points.py
|
yiping-wang/data-structures-and-algorithms-coursera
|
fc0ba34399c27b358ce52a323a52ab2eb095f156
|
[
"MIT"
] | 7
|
2017-04-01T17:18:35.000Z
|
2022-01-12T05:23:23.000Z
|
algorithms-on-graph/week5_mst/1_connecting_points/connecting_points.py
|
yiping-wang/data-structures-and-algorithms-coursera
|
fc0ba34399c27b358ce52a323a52ab2eb095f156
|
[
"MIT"
] | 6
|
2020-05-24T13:36:50.000Z
|
2022-02-15T06:44:20.000Z
|
algorithms-on-graph/week5_mst/1_connecting_points/connecting_points.py
|
yiping-wang/data-structures-and-algorithms-coursera
|
fc0ba34399c27b358ce52a323a52ab2eb095f156
|
[
"MIT"
] | 2
|
2018-09-20T01:07:39.000Z
|
2019-02-22T14:55:38.000Z
|
#Uses python3
import sys
import math
def minimum_distance(x, y):
result = 0.
#write your code here
return result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
x = data[1::2]
y = data[2::2]
print("{0:.9f}".format(minimum_distance(x, y)))
| 18.388889
| 51
| 0.598187
|
0ffd131cf8343dfa577d9cdc91864271c9ce86b9
| 283
|
py
|
Python
|
itez/beneficiary/migrations/0032_merge_20211220_1405.py
|
Olipa776/itez
|
d6bba80c74d5ce385ae8a304e56546d927d24624
|
[
"MIT"
] | 1
|
2021-12-12T11:46:47.000Z
|
2021-12-12T11:46:47.000Z
|
itez/beneficiary/migrations/0032_merge_20211220_1405.py
|
Olipa776/itez
|
d6bba80c74d5ce385ae8a304e56546d927d24624
|
[
"MIT"
] | 11
|
2021-11-03T10:46:00.000Z
|
2021-12-20T11:35:00.000Z
|
itez/beneficiary/migrations/0032_merge_20211220_1405.py
|
Olipa776/itez
|
d6bba80c74d5ce385ae8a304e56546d927d24624
|
[
"MIT"
] | 7
|
2021-11-01T15:03:02.000Z
|
2021-11-06T13:06:10.000Z
|
# Generated by Django 3.1.13 on 2021-12-20 14:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('beneficiary', '0031_merge_20211220_0030'),
('beneficiary', '0030_auto_20211215_1130'),
]
operations = [
]
| 18.866667
| 52
| 0.660777
|
fca3871cfc6a6b80e9216c7fa169f01a78cdb86c
| 6,922
|
py
|
Python
|
qa/rpc-tests/llmq-chainlocks.py
|
IcomTechRevolution/icomtech-coin
|
bb770b827ac9e65f3e535d72c9b7812d9b4e785a
|
[
"MIT"
] | 1
|
2019-10-28T03:45:14.000Z
|
2019-10-28T03:45:14.000Z
|
qa/rpc-tests/llmq-chainlocks.py
|
icomtech/coin
|
e522302a3d16aac5666893d52114027fae6c330b
|
[
"MIT"
] | 1
|
2020-05-12T16:08:44.000Z
|
2020-05-12T16:08:44.000Z
|
qa/rpc-tests/llmq-chainlocks.py
|
icomtech/coin
|
e522302a3d16aac5666893d52114027fae6c330b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import IcomTechTestFramework
from test_framework.util import *
from time import *
'''
llmq-chainlocks.py
Checks LLMQs based ChainLocks
'''
class LLMQChainLocksTest(IcomTechTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.wait_for_sporks_same()
for i in range(4):
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlock_tip_all_nodes()
# mine many blocks, wait for chainlock
self.nodes[0].generate(20)
self.wait_for_chainlock_tip_all_nodes()
# assert that all blocks up until the tip are chainlocked
for h in range(1, self.nodes[0].getblockcount()):
block = self.nodes[0].getblock(self.nodes[0].getblockhash(h))
assert(block['chainlock'])
# Isolate node, mine on another, and reconnect
isolate_node(self.nodes[0])
node0_tip = self.nodes[0].getbestblockhash()
self.nodes[1].generate(5)
self.wait_for_chainlock_tip(self.nodes[1])
assert(self.nodes[0].getbestblockhash() == node0_tip)
reconnect_isolated_node(self.nodes[0], 1)
self.nodes[1].generate(1)
self.wait_for_chainlock(self.nodes[0], self.nodes[1].getbestblockhash())
# Isolate node, mine on both parts of the network, and reconnect
isolate_node(self.nodes[0])
self.nodes[0].generate(5)
self.nodes[1].generate(1)
good_tip = self.nodes[1].getbestblockhash()
self.wait_for_chainlock_tip(self.nodes[1])
assert(not self.nodes[0].getblock(self.nodes[0].getbestblockhash())["chainlock"])
reconnect_isolated_node(self.nodes[0], 1)
self.nodes[1].generate(1)
self.wait_for_chainlock(self.nodes[0], self.nodes[1].getbestblockhash())
assert(self.nodes[0].getblock(self.nodes[0].getbestblockhash())["previousblockhash"] == good_tip)
assert(self.nodes[1].getblock(self.nodes[1].getbestblockhash())["previousblockhash"] == good_tip)
# Keep node connected and let it try to reorg the chain
good_tip = self.nodes[0].getbestblockhash()
# Restart it so that it forgets all the chainlocks from the past
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 1)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Now try to reorg the chain
self.nodes[0].generate(2)
sleep(6)
assert(self.nodes[1].getbestblockhash() == good_tip)
self.nodes[0].generate(2)
sleep(6)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Now let the node which is on the wrong chain reorg back to the locked chain
self.nodes[0].reconsiderblock(good_tip)
assert(self.nodes[0].getbestblockhash() != good_tip)
self.nodes[1].generate(1)
self.wait_for_chainlock(self.nodes[0], self.nodes[1].getbestblockhash())
assert(self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash())
# Enable LLMQ bases InstantSend, which also enables checks for "safe" transactions
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
# Isolate a node and let it create some transactions which won't get IS locked
isolate_node(self.nodes[0])
txs = []
for i in range(3):
txs.append(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
txs += self.create_chained_txs(self.nodes[0], 1)
# Assert that after block generation these TXs are NOT included (as they are "unsafe")
self.nodes[0].generate(1)
for txid in txs:
tx = self.nodes[0].getrawtransaction(txid, 1)
assert("confirmations" not in tx)
sleep(1)
assert(not self.nodes[0].getblock(self.nodes[0].getbestblockhash())["chainlock"])
# Disable LLMQ based InstantSend for a very short time (this never gets propagated to other nodes)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 4070908800)
# Now the TXs should be included
self.nodes[0].generate(1)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
# Assert that TXs got included now
for txid in txs:
tx = self.nodes[0].getrawtransaction(txid, 1)
assert("confirmations" in tx and tx["confirmations"] > 0)
# Enable network on first node again, which will cause the blocks to propagate and IS locks to happen retroactively
# for the mined TXs, which will then allow the network to create a CLSIG
reconnect_isolated_node(self.nodes[0], 1)
self.wait_for_chainlock(self.nodes[0], self.nodes[1].getbestblockhash())
def wait_for_chainlock_tip_all_nodes(self):
for node in self.nodes:
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock_tip(self, node):
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock(self, node, block_hash):
t = time()
while time() - t < 15:
try:
block = node.getblock(block_hash)
if block["confirmations"] > 0 and block["chainlock"]:
return
except:
# block might not be on the node yet
pass
sleep(0.1)
raise AssertionError("wait_for_chainlock timed out")
def create_chained_txs(self, node, amount):
txid = node.sendtoaddress(node.getnewaddress(), amount)
tx = node.getrawtransaction(txid, 1)
inputs = []
valueIn = 0
for txout in tx["vout"]:
inputs.append({"txid": txid, "vout": txout["n"]})
valueIn += txout["value"]
outputs = {
node.getnewaddress(): round(float(valueIn) - 0.0001, 6)
}
rawtx = node.createrawtransaction(inputs, outputs)
rawtx = node.signrawtransaction(rawtx)
rawtxid = node.sendrawtransaction(rawtx["hex"])
return [txid, rawtxid]
if __name__ == '__main__':
LLMQChainLocksTest().main()
| 41.449102
| 123
| 0.645912
|
375e72a3c340e920a21bd6b0b4d02ca1b2f59c3d
| 990
|
py
|
Python
|
ceph_deploy/lib/vendor/remoto/log.py
|
scibian/ceph-deploy
|
abc2626b8a797bbd1b313c38e1a79dbee8edd408
|
[
"MIT"
] | null | null | null |
ceph_deploy/lib/vendor/remoto/log.py
|
scibian/ceph-deploy
|
abc2626b8a797bbd1b313c38e1a79dbee8edd408
|
[
"MIT"
] | null | null | null |
ceph_deploy/lib/vendor/remoto/log.py
|
scibian/ceph-deploy
|
abc2626b8a797bbd1b313c38e1a79dbee8edd408
|
[
"MIT"
] | null | null | null |
def reporting(conn, result, timeout=None):
timeout = timeout or conn.global_timeout # -1 a.k.a. wait for ever
log_map = {
'debug': conn.logger.debug,
'error': conn.logger.error,
'warning': conn.logger.warning
}
while True:
try:
received = result.receive(timeout)
level_received, message = list(received.items())[0]
if not isinstance(message, str):
message = message.decode('utf-8')
log_map[level_received](message.strip('\n'))
except EOFError:
break
except Exception as err:
# the things we need to do here :(
# because execnet magic, we cannot catch this as
# `except TimeoutError`
if err.__class__.__name__ == 'TimeoutError':
msg = 'No data was received after %s seconds, disconnecting...' % timeout
conn.logger.warning(msg)
break
raise
| 34.137931
| 89
| 0.555556
|
fdd0b16b15c71a6de1fb623296ceefd885fce3ef
| 73,466
|
py
|
Python
|
src/transformers/models/roberta/modeling_roberta.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/roberta/modeling_roberta.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/roberta/modeling_roberta.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model."""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...adapters.composition import adjust_tensors_for_parallel
from ...adapters.context import ForwardContext
from ...adapters.lora import Linear as LoRALinear
from ...adapters.mixins.bert import BertModelAdaptersMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin
from ...adapters.model_mixin import ModelWithHeadsAdaptersMixin
from ...adapters.prefix_tuning import PrefixTuningShim
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, location_key: Optional[str] = None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = LoRALinear(config.hidden_size, self.all_head_size, "selfattn", config)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = LoRALinear(config.hidden_size, self.all_head_size, "selfattn", config)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.prefix_tuning = PrefixTuningShim(location_key + "_prefix" if location_key else None, config)
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
key_layer, value_layer, attention_mask = self.prefix_tuning(key_layer, value_layer, attention_mask)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.modeling_bert.BertSelfOutput
class RobertaSelfOutput(BertSelfOutputAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self._init_adapter_modules()
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.adapter_layer_forward(hidden_states, input_tensor, self.LayerNorm)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, location_key: Optional[str] = None):
super().__init__()
self.self = RobertaSelfAttention(
config, position_embedding_type=position_embedding_type, location_key=location_key
)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = LoRALinear(config.hidden_size, config.intermediate_size, "intermediate", config)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(BertOutputAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = LoRALinear(config.intermediate_size, config.hidden_size, "output", config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self._init_adapter_modules()
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.adapter_layer_forward(hidden_states, input_tensor, self.LayerNorm)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config, location_key="self")
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = RobertaAttention(config, position_embedding_type="absolute", location_key="cross")
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
(attention_mask,) = adjust_tensors_for_parallel(hidden_states, attention_mask)
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RobertaEncoder):
module.gradient_checkpointing = value
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`RobertaConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(BertModelAdaptersMixin, RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
.. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self._init_adapter_modules()
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
@ForwardContext.wrap
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
embedding_output = self.invertible_adapters_forward(embedding_output)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained("roberta-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(
sequence_output,
inv_lang_adapter=self.roberta.get_invertible_adapter(),
)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
expected_output="' Paris'",
expected_loss=0.1,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(
sequence_output,
inv_lang_adapter=self.roberta.get_invertible_adapter(),
)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, inv_lang_adapter=None, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
if inv_lang_adapter:
x = inv_lang_adapter(x, rev=True)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="cardiffnlp/twitter-roberta-base-emotion",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output="'optimism'",
expected_loss=0.08,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="Jean-Baptiste/roberta-large-ner-english",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
expected_loss=0.01,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="deepset/roberta-base-squad2",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
expected_output="' puppet'",
expected_loss=0.86,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| 45.659416
| 198
| 0.677592
|
0056b1dcadca5cdddaebfcaf911c53a91bdad8b8
| 2,575
|
py
|
Python
|
python/experiments/eval_train_test.py
|
Etienne-Meunier/davis
|
8f8d9b6d93ea0454b9754ba8f8bbfdc7f0e57ab6
|
[
"BSD-3-Clause"
] | 135
|
2016-04-17T06:59:38.000Z
|
2022-01-27T03:46:04.000Z
|
python/experiments/eval_train_test.py
|
Etienne-Meunier/davis
|
8f8d9b6d93ea0454b9754ba8f8bbfdc7f0e57ab6
|
[
"BSD-3-Clause"
] | 13
|
2016-08-06T15:13:33.000Z
|
2020-11-13T09:32:52.000Z
|
python/experiments/eval_train_test.py
|
Etienne-Meunier/davis
|
8f8d9b6d93ea0454b9754ba8f8bbfdc7f0e57ab6
|
[
"BSD-3-Clause"
] | 52
|
2016-05-14T14:13:24.000Z
|
2021-01-08T16:30:22.000Z
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# S Benchmark Dataset and Evaluation Methodology for Video Object Segmentation
#-----------------------------------------------------------------------------
# Copyright (c) 2016 Federico Perazzi
# Licensed under the BSD License [see LICENSE for details]
# Written by Federico Perazzi
# ----------------------------------------------------------------------------
"""
Peform evaluation on separately on training and test set.
EXAMPLE:
python tools/eval_all.py
"""
import sys
import h5py
import glob
import argparse
import numpy as np
import os.path as osp
from davis import cfg,log
from davis.dataset import *
from prettytable import PrettyTable as ptable
import matplotlib.pylab as plt
def parse_args():
parser = argparse.ArgumentParser(
description='Split evaluation between training and test set.')
parser.add_argument('--measure', dest='measure',default='J',
help='Measure selected for evaluation.')
parser.add_argument('--statistic', dest='statistic',default='M',
help='Measure statistics: [M]ean,[R]ecall,[D]ecay.')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse command-line arguments
args = parse_args()
db_info = db_read_info()
db_techniques = db_read_techniques()
attributes = db_info.attributes
distr = []
S = []
for t_set in db_info.sets:
log.info("Filtering techniques in: %s"%(t_set))
# Filter sequences tagged with set=`t_set`
X = []
db_sequences = filter(
lambda s: t_set == s.set ,db_info.sequences)
for s in db_sequences:
X.append([1 if attr in s.attributes else 0for attr in attributes ])
distr.append(np.round(np.sum(X,axis=0).astype(np.float32)/np.sum(X),3))
db_eval_dict = db_read_eval(sequence=[s.name for s in db_sequences],
measure=args.measure,raw_eval=False)
statistics_to_id = {'M':0,'O':1,'D':2}
R = []
for t in db_techniques:
R.append(np.vstack(db_eval_dict[t.name][
args.measure].values())[:,statistics_to_id[args.statistic]])
S.append(np.average(np.array(R).T,axis=0))
print "\nAttributes Distribution"
table = ptable(["Set"] + attributes)
for attr,row in zip(db_info.sets,distr):
table.add_row([attr] + \
['{: .2f}'.format(np.round(r,2)) for r in row])
print table
table = ptable(["Set"] +
[t.name for t in db_techniques])
print "\nEvaluation (%s)"%args.measure
for attr,row in zip(db_info.sets,S):
table.add_row([attr] + \
['{: .2f}'.format(np.round(r,2)) for r in row])
print table
| 26.010101
| 78
| 0.636117
|
38a608c0569c842f362154e8ecde65fffa31daa9
| 29,293
|
py
|
Python
|
eslearn/stylesheets/PyQt5_stylesheets/pyqt5_style_DarkOrange_rc.py
|
lichao312214129/easylearn
|
e77b51b26e0c75b3a4d59dd5a71cf1b63ac4347d
|
[
"MIT"
] | 19
|
2020-02-29T06:00:18.000Z
|
2022-01-24T01:30:14.000Z
|
stylesheets/PyQt5_stylesheets/pyqt5_style_DarkOrange_rc.py
|
easylearn-fmri/easylearn
|
102ff264a7672b246244a489e0fbde8e3897c52f
|
[
"MIT"
] | 7
|
2020-04-02T03:05:21.000Z
|
2020-11-11T11:45:05.000Z
|
stylesheets/PyQt5_stylesheets/pyqt5_style_DarkOrange_rc.py
|
easylearn-fmri/easylearn
|
102ff264a7672b246244a489e0fbde8e3897c52f
|
[
"MIT"
] | 11
|
2020-03-03T03:02:15.000Z
|
2020-11-11T14:09:55.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.9)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x08\x77\
\x00\
\x00\x2e\xc8\x78\x9c\xdd\x5a\x6d\x6f\xdb\x36\x10\xfe\x1e\x20\xff\
\x81\x45\xbe\x34\x45\x9c\x48\x8a\xed\x24\xca\xa7\x76\x5d\xbb\x01\
\x1b\xb6\xae\xc1\xfa\xb1\xa0\x25\xd9\xd6\xa2\x88\xaa\x24\x2f\x49\
\x8b\xfd\xf7\xdd\x91\xe2\x9b\x44\xc9\x76\xe2\x14\xd9\x42\xa0\x49\
\xf9\x7a\x7c\xee\xee\xe1\xf1\xa8\x0f\x57\x8c\x65\x57\x69\xb1\xbf\
\xf7\x6d\x7f\x8f\xe0\xcf\x8c\x95\x71\x52\x86\xc4\x2f\xee\x48\xc5\
\xb2\x34\x26\xb3\x8c\x46\xd7\x97\xb2\x19\xfe\x5e\x94\x6c\x95\xc7\
\xa3\x88\x65\x0c\x3a\x1e\xcc\xe7\xd4\x0b\xe6\xb2\x43\x41\xe3\x38\
\xcd\x17\x7c\x82\x4b\x6b\xce\x51\x49\xe3\x74\x55\x85\xe4\x54\xb7\
\xb0\x82\x46\x69\x7d\x0f\xbd\x3d\x0f\xea\xfe\xd9\xdf\xdb\xdf\xfb\
\xf0\x29\x8d\x17\x49\xad\x44\x92\xeb\xcc\x7c\x2c\xcd\x48\x87\x1c\
\xa7\x01\x16\x35\xcb\x55\x99\x24\x7f\xa6\xc9\xed\x11\xf9\xf0\x4b\
\x5a\xd5\xf8\xa7\x9a\xb2\x3b\xba\x4a\xb3\xbf\x93\xb2\x99\xfc\x86\
\x96\x8b\x34\x1f\x65\xc9\xbc\x0e\xc9\x84\x4b\x6b\x4a\x16\xa6\x75\
\x72\x13\x2e\x19\x0c\x18\x98\x11\x56\xcd\x13\x5a\xbe\xc7\x5d\x27\
\x79\xfd\x92\xdc\xf9\x21\xf1\x8e\xc8\xbd\xf8\x75\x17\x88\xff\xc1\
\x2f\xff\x88\x54\x35\x2b\xe0\xff\x12\x4c\x59\xe1\x93\x83\x88\x7a\
\x53\xff\xe2\xf0\xd2\xc6\xc2\xe3\x3f\x4e\xb9\xaa\x24\x4b\xa2\x3a\
\x89\x9f\x5e\xb4\xf8\xec\xdc\xf3\xe9\xa1\x92\xe2\xd7\x24\x5f\xbd\
\xa1\x65\xc8\xe5\x70\x2c\x1f\x92\xba\xa4\x79\x55\xd0\x12\x16\x75\
\x8f\x1a\x92\xbe\x3d\xdc\x6d\xad\x28\x26\x35\xa0\xb1\xa7\x2f\xca\
\xa4\xaa\x7a\x66\x3f\x18\x8f\xc7\xfd\xd3\x0a\xb8\x37\xc3\xb3\x31\
\x6e\xf8\x01\x60\x05\xae\xde\x91\x51\x19\x84\x02\x5e\xdf\xa8\xe4\
\xa8\x02\xa8\x81\x8f\xa5\xdd\xe0\x1d\x8f\xc1\xc0\xc7\x58\x4e\x5e\
\x75\x1b\x03\xd9\xd8\x6d\xf2\x25\x22\xaf\x4e\x44\xdb\xa1\x6d\xe4\
\x33\x56\xd7\xec\x26\x1c\x69\x77\x6d\x3c\x58\xb6\xf8\x86\xfd\x23\
\x98\x1a\xbb\x3e\x90\x74\xdf\x96\x29\x28\x6e\x08\x60\x48\xe0\xe1\
\x3f\xcd\x1f\x8e\x51\x5d\x53\x18\x36\xfe\x38\xad\xe8\x2c\x73\x74\
\x3f\xf7\xb0\x6c\xcc\x1b\xaf\x67\x15\xd8\x59\x54\xff\x0c\x32\xac\
\x21\x8d\x87\xf9\xd1\x38\xc6\xa2\x2a\x50\x43\xd3\x31\x16\xc3\xb5\
\x26\x31\x96\xc3\xf6\x1e\xe7\x2c\x5a\x55\x4a\xa0\x93\x57\x5d\x15\
\xc4\xb4\x04\x21\xe9\xfd\x25\xaa\x5b\x8c\x45\x21\x7f\x8c\xd3\xfa\
\xa9\x37\x32\xb8\x8d\x9e\xa3\xa1\x39\x19\xaa\xfa\x3e\x4b\x42\xb1\
\x85\x7e\x17\xf4\x13\x2c\xf6\x48\x79\xa6\x4c\x14\x54\xbf\xaf\xaa\
\xe5\x9b\x15\x98\x6e\xbe\xed\x09\xf2\x30\x14\x26\x53\x2c\x96\x3a\
\x27\x01\x16\x5d\x35\x01\xac\x12\x2c\xba\xea\x02\xaa\x28\x16\x03\
\x2d\x8e\xde\xf4\xd0\xde\xe0\x6d\x1a\xd7\x4b\x07\x64\x72\x4f\x2e\
\x50\x7a\xe1\x54\x70\x4d\xdb\x2a\xd1\x67\xf2\x9c\xe5\xf5\xa8\x4a\
\xbf\xc2\x04\xfe\x79\x87\x14\xcc\x33\xd1\xac\x2f\xd3\xc5\xd2\x6a\
\xb8\x01\x6e\x69\x64\x1f\x9b\x0e\xae\xd5\x33\x40\xc6\x8f\xd3\x48\
\x10\x63\xb1\x34\x12\xcc\xb0\x58\x1a\x09\x2e\xb0\x58\x1a\x09\xce\
\xb1\x18\x1a\xe1\x7a\x9c\x68\x37\xfc\x81\xdd\xcc\xd8\x1b\x76\xa7\
\x44\x16\x1c\x95\x32\xe0\x51\x67\x60\x44\xb7\x38\x35\x9e\x85\xb9\
\xed\xd2\x13\x25\x5a\x22\x54\x3a\x32\x55\xdf\x0a\x9e\x9a\x15\x02\
\xb5\xc2\xd3\x44\x28\xa6\x4c\x06\x3d\x48\x1b\xe6\xa3\x4e\x7b\x4c\
\x7e\xac\x1d\xf0\x19\x9b\xe9\xe6\x36\x69\xab\x88\x0c\x1c\x7b\x1d\
\xe5\xa8\x33\x66\xfd\x7a\x4f\x14\x69\x2a\x2d\x86\x71\xc9\x8a\x51\
\xcc\x6e\xb5\x36\x49\xb5\x9a\x45\xc0\x61\x25\xcb\x46\x0c\x68\x29\
\xcd\x43\xa9\xca\xcb\x6e\x8f\x82\x55\x29\xca\x0e\xc1\x25\x2b\x08\
\x67\x31\xd9\x4b\x32\xaf\xe0\x34\xfb\x2a\x83\x36\x21\xe9\xcd\xeb\
\x5c\x74\x78\x6b\x83\x40\x0b\x2c\xab\x8b\xe5\x6c\x70\x9c\x93\xbf\
\x56\x55\x4d\x28\x5c\x46\xf2\x45\x96\x90\x0c\xb0\x23\x32\x6c\x93\
\x03\x41\x4c\x41\xb6\xd6\x75\x0a\x07\x57\xf4\x26\x21\xa2\x92\xd0\
\x8a\xd4\xcb\x84\x68\xfd\xb6\xa7\x11\xc1\x9d\x63\x26\xe8\xd6\xc1\
\x18\xe0\x1d\xd1\xb2\x64\xda\x28\x48\x7a\x43\x17\x20\xfa\xaa\xcc\
\x5e\x86\x27\x5f\xaa\xea\x73\x0a\x88\x56\x27\x6f\x61\xb7\xbf\x41\
\x94\xbe\x48\x4e\x70\xd4\x67\x3e\xea\xb8\xc8\x17\x5a\x77\xef\xc1\
\x40\x0a\x93\x43\x07\x82\x18\x2b\x52\x15\xc6\x60\x9e\x25\x72\xaa\
\x56\x5c\x34\x38\x61\x73\x39\x4c\xee\x6a\x8c\x89\xb6\x1f\xf9\x31\
\x02\xb3\xc9\xf0\x46\xb1\x04\xeb\xfa\x0a\x66\x44\x33\xd2\x7f\x79\
\x3e\x08\xf8\x4f\xf7\xfa\xfc\x30\xdf\x38\x06\xef\xe0\x17\x04\x83\
\xee\x03\x07\x23\x8c\xcf\xb1\x48\x46\x20\xcb\x44\x1c\xcf\x67\xda\
\x54\x05\xa8\xdc\x7a\x89\x3f\x85\x7f\x3c\xfe\xcb\xb1\xcf\x70\x49\
\xf3\x18\xec\x54\xef\x57\x9b\xc1\xb6\x1b\xf2\xc5\x86\xbc\x5e\x67\
\xe7\x9c\x27\xdc\xdd\xd8\x8e\xe8\xa3\xb6\xc3\x83\x0b\xb9\xa7\xc0\
\xf0\xbf\xf6\x59\x14\xb8\x37\x04\x6c\x30\x42\xe7\x72\xa9\xd0\x79\
\xdc\x61\xd8\x78\x31\xbc\xc8\xd3\xc0\x61\x71\x1f\xb1\x68\x69\x6c\
\x2c\xeb\x24\x34\x8b\xcc\x5c\xac\x28\x4c\xc0\x05\x10\x74\xfe\x5f\
\x01\xe4\xc4\x07\x29\xd8\xd1\x61\x3d\x3a\x82\x34\x39\xb5\x19\x00\
\x1d\x11\xb3\x0f\xe7\xf7\x76\x17\xd3\x6f\x86\xd3\x6c\xa4\x13\xf6\
\x6b\x27\xf6\x7b\xe0\xbc\x5d\xc2\xb5\xb9\xcf\xdc\x0b\xa4\xeb\x3e\
\x61\x51\xdd\xad\x0e\x3d\x2e\x9e\xb3\xdc\xb5\x04\x04\x73\x75\x1a\
\xed\x94\x18\x1e\xc8\x74\x12\x38\x83\xe9\x14\xd5\x19\x34\x47\xbc\
\xcb\x01\x73\x56\x9c\xdd\x47\x85\x8f\xdf\xef\xda\xa8\xe7\xfb\x12\
\xa1\x6b\x43\xdf\xd1\xcd\xb7\x8b\x01\x5b\xee\xb0\x96\x08\x45\xac\
\xf3\x48\x26\x7c\x7e\x08\xad\x35\x8e\xcd\x11\x82\x19\x1e\x06\xcf\
\xaa\x68\x48\x4e\xc2\x63\x13\x8b\x8e\x1c\x37\xc3\x6f\xb7\x1c\xd8\
\xc3\x82\x6e\x51\x15\x07\xae\xf1\x6d\x8b\x01\x65\x18\x39\x90\xc2\
\x38\x08\xc6\x58\x74\xfe\x23\xa3\x69\xfe\x80\x71\x3f\x25\x14\xc0\
\xc2\x9b\x19\xc8\x2a\xee\x5b\x5b\x24\x4e\x8c\x3c\xb4\x99\x7e\x6e\
\x92\xc4\xe4\x60\xea\x63\xb1\xb8\x67\xe2\x61\xd1\x55\x53\xe0\x5a\
\x9e\x64\x3e\x52\xb9\x6a\x9e\x8a\x98\xb4\xde\x26\xa4\x0a\xb0\xaa\
\xf7\x0a\xdd\xf5\x9c\x69\x84\x45\xdf\xf0\x96\x49\x74\x8d\xa1\xbd\
\x95\xd6\x95\xd8\x8c\x7d\x2c\xaa\xf3\x5b\x16\x5d\x37\x19\xd2\xb0\
\x4e\xeb\x2c\x51\xc8\xd4\x00\xf4\x88\x66\xe9\x02\xac\x38\x02\x20\
\xd4\x13\x4f\x85\x0f\x4f\x32\xf1\xc5\x6f\x4f\xa2\x82\xcc\x92\xfa\
\x36\x49\x72\x82\x29\xe8\x8a\xa4\x39\xbf\x47\xd5\x8c\x65\x80\x72\
\xa9\xae\x51\x8f\x47\x5c\x64\x9d\xed\xab\x3e\xd7\xb8\x86\x57\x74\
\x39\x74\x6e\x33\xca\x58\x95\x8c\x66\x3c\x9b\x02\x86\x6c\x36\xcd\
\x33\x46\xeb\xa6\x69\x0b\x20\xfc\xff\x3e\x10\x4d\xa2\xa9\x1f\x8e\
\xde\x47\xbb\xae\xbf\xf5\xae\xd1\x64\x2d\x87\x56\x69\x27\x36\xcd\
\xc4\x37\x19\xa9\x7f\xac\x97\x15\x60\x85\x4f\x69\x0e\x84\x89\xee\
\x5d\xd0\x92\xd6\x6c\x9b\xd7\xc5\x75\x28\x73\xff\x9e\x5a\x28\xfb\
\x13\x2c\x96\x83\x37\x8f\x4f\x1a\x78\xee\xf1\xbb\x71\xf0\x71\x84\
\xe5\xf1\xee\x37\x80\x97\xa1\xde\x5d\xe1\x66\x1d\xb2\x1c\xb6\xd9\
\x64\x1a\xf9\x67\x0a\x22\xfb\xd0\xdd\x11\x07\xee\x04\x22\x7c\xda\
\x37\x82\x56\x23\x41\xf6\x38\xee\xb3\xdc\x66\x20\x01\x24\x96\xb5\
\x93\x3f\xe2\x79\xaf\x6b\xe0\x2a\x86\xdd\x22\xcb\xfa\xcc\x4c\xde\
\xfa\x68\xc0\xf7\xda\xf5\xd6\x03\x49\x13\x0a\x94\x6c\x81\x4c\x01\
\x4a\x1a\xc8\xb6\x42\x97\x7b\xcb\x64\x74\xa6\x5d\xad\xe1\x64\xf7\
\xce\x22\x40\x64\xcb\x55\x7e\x3d\x14\x74\x08\x73\x6f\x66\x6d\xe2\
\xaf\xe0\xd8\x9f\xb4\x76\xc3\xe1\x34\x76\x72\x45\x67\x7c\xfe\x9a\
\xce\x64\x9e\xc0\xfd\xe6\xe6\xa0\x85\xd6\xa3\xbb\xcc\x4c\x36\xb9\
\xd1\x26\xdc\xea\x11\x57\x3d\xdd\x76\x15\x63\x68\xa0\xf5\x46\xe5\
\x68\xe9\xc9\xfc\x37\x2f\xe0\x86\x55\xda\xca\x1c\xf9\x36\x06\xf2\
\x30\x28\x68\x9e\x90\x6f\x1b\x6c\x59\x44\xee\x6e\x20\xc3\x8c\x56\
\x3a\x40\xb4\x17\xf6\xb8\xdb\xa2\x77\x62\x27\x22\x5f\xca\x09\xe2\
\xbf\xa4\x15\xa0\x56\x2f\xd1\xa1\x6b\x46\x90\x11\x33\x5a\x80\x32\
\xeb\x25\x61\xc2\xa5\xf9\x2c\xfa\x08\x1f\x4a\x2c\xbb\x04\x9b\xa7\
\x65\x55\x87\x2f\xec\xf7\x79\xcb\xfc\xbd\x86\x58\x76\x20\xa1\xa2\
\x72\x2d\x25\xcf\xad\xac\x13\xf2\x45\xef\xe7\x03\x0e\x8b\x6c\x19\
\x9d\xf9\xfa\x65\x26\x9f\x4f\x77\xc5\x4e\x7e\x8b\x77\xf4\xd7\x1d\
\x87\xce\xcd\x74\xbf\x8a\x19\x44\x63\x03\xad\x1a\x5b\x93\x56\xee\
\xad\x43\xb2\x15\x3d\xc9\xcf\x0f\x04\x36\x9a\xb0\xec\x77\xcf\xb6\
\x2f\xe1\xf2\x0e\xcb\x7b\xf8\x2e\x76\xac\x0a\xe3\x4b\x1b\x55\x13\
\x74\x6a\xd4\x57\x35\x5a\x5f\x7f\xc0\x6a\xac\x79\xde\x0c\x21\x32\
\x81\x3b\x24\x86\x24\x11\x5e\x64\x78\xb8\xd8\xd3\x61\x95\x37\x5d\
\x06\xcd\x74\x0d\xf7\x39\x58\xc6\x65\xe6\xea\xf9\xdf\x50\xf5\xb0\
\xdc\x03\x87\xc5\x17\x9c\x8d\x66\x8b\xee\x57\x4f\xd1\x1d\x3f\x20\
\x8e\x48\x74\x2f\xfe\xd0\x4d\x73\xd9\x34\xef\x34\x49\xe1\xfc\x63\
\xaf\xfd\x19\x13\xe6\xfe\x26\x12\x72\x47\xe3\xa9\x84\x43\x34\x19\
\x0f\x85\xf2\x1a\xa9\x77\xf6\x94\x30\x37\x27\xe6\x85\x32\x4e\x19\
\xd7\x5c\xac\x05\xbc\xed\xda\x5b\xa8\x4a\xde\x7a\x1c\x9b\xed\x79\
\x68\xef\xff\x5c\xce\x35\x47\xdb\x14\xc4\xdb\xdf\x40\xe4\xc7\x07\
\xcc\xd8\x9d\x1d\xfb\xb9\xa6\x96\xb7\xfb\x7e\xf7\xe8\x7c\xd6\xd5\
\x7b\xa0\xaa\x9c\x0f\xd4\x41\x87\x10\x14\x09\x9b\x77\x3c\x61\x38\
\x66\x38\x7d\x7d\x7a\x71\x7a\xd1\x52\xda\xb9\x83\x63\xf0\xa2\xe8\
\xf9\xef\xfc\x77\xad\x78\x28\x30\x72\xca\x43\xb9\xd7\x46\xb6\xce\
\x93\x1a\xe9\xb8\xd9\x76\x69\x41\x95\xbd\x53\xd9\x73\xf1\x45\xab\
\x95\x3c\xa7\xe7\x58\x8c\x8c\xe1\x59\x80\xe5\xb0\xdf\xba\x2d\x5c\
\xba\x0f\x2b\x8e\x0c\xa3\x04\x64\x34\xde\x0e\x91\x46\x5b\x32\xfb\
\xb6\xa9\xae\x1a\x99\xb6\x51\x95\x67\xbc\xd8\x6f\xae\xaa\x8e\x60\
\x8f\x7f\xc9\x6d\x34\xd4\x56\xdd\x77\x56\x94\xc7\x55\xb5\x1e\x0f\
\xf9\x81\xc8\xc7\x22\xcd\xf1\x8b\x02\x3b\xb7\xa1\x82\x80\xb5\x21\
\xf4\xe0\x1b\xfb\x80\x14\x5c\x66\xfd\x51\xd9\xc4\x08\x57\xf6\xf7\
\xfe\x05\x9a\x6c\x73\xdd\
\x00\x00\x01\x57\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\
\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\
\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\
\x46\x00\x00\x00\xdd\x49\x44\x41\x54\x78\xda\x5c\x8e\xb1\x4e\x84\
\x40\x18\x84\x67\xef\x4c\x2c\xc8\xd9\x2c\x0d\x58\x50\x1b\x0b\xc3\
\xfa\x24\x77\xbd\x0d\x85\x4f\x40\x0b\xbb\xcb\x3b\xd0\x68\x41\x72\
\xc5\xd2\x28\x4f\x02\xcf\xb1\x97\x40\x61\xd4\xc2\xc4\x62\x2c\xbc\
\x4d\xd0\x49\xfe\xbf\xf8\x32\xff\x3f\x23\x48\xc2\x5a\x3b\x00\x80\
\xd6\xfa\x80\xb3\xac\xb5\x03\x49\x18\x63\x0e\x5b\x21\xc4\x90\xe7\
\xf9\x3e\x49\x92\x9b\xbe\xef\xef\xca\xb2\x7c\xf5\xde\xbf\x04\xe6\
\x9c\xbb\xbd\x20\xf9\x19\xae\x95\x52\xfb\x2c\xcb\xbe\xa5\x94\x01\
\x81\xe4\x9b\x38\xbf\x3c\x2a\xa5\x1e\xf0\x4f\xe3\x38\x3e\x37\x4d\
\xf3\x28\x48\x02\x00\xba\xae\x7b\x97\x52\xee\x82\x61\x59\x96\x8f\
\xa2\x28\xae\x00\x60\x03\x00\xc6\x98\xe3\xda\x00\x00\x71\x1c\xef\
\xb4\xd6\x4f\x00\xb0\x05\xf0\x27\x6a\x9e\x67\x44\x51\x04\x00\x48\
\xd3\xf4\xde\x39\x77\xbd\x21\xf9\xb5\xea\x70\x6a\xdb\xf6\x72\x9a\
\xa6\xd3\xaa\xf8\xef\xaa\xeb\xda\x57\x55\xe5\x49\x22\xcc\x9a\xfd\
\x0c\x00\x24\xab\x6e\xfa\x96\x21\xfc\xb8\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x15\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x06\x08\x06\x00\x00\x00\xe0\xcc\xef\x48\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0a\x4f\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x9d\x53\x67\x54\x53\xe9\x16\x3d\xf7\xde\
\xf4\x42\x4b\x88\x80\x94\x4b\x6f\x52\x15\x08\x20\x52\x42\x8b\x80\
\x14\x91\x26\x2a\x21\x09\x10\x4a\x88\x21\xa1\xd9\x15\x51\xc1\x11\
\x45\x45\x04\x1b\xc8\xa0\x88\x03\x8e\x8e\x80\x8c\x15\x51\x2c\x0c\
\x8a\x0a\xd8\x07\xe4\x21\xa2\x8e\x83\xa3\x88\x8a\xca\xfb\xe1\x7b\
\xa3\x6b\xd6\xbc\xf7\xe6\xcd\xfe\xb5\xd7\x3e\xe7\xac\xf3\x9d\xb3\
\xcf\x07\xc0\x08\x0c\x96\x48\x33\x51\x35\x80\x0c\xa9\x42\x1e\x11\
\xe0\x83\xc7\xc4\xc6\xe1\xe4\x2e\x40\x81\x0a\x24\x70\x00\x10\x08\
\xb3\x64\x21\x73\xfd\x23\x01\x00\xf8\x7e\x3c\x3c\x2b\x22\xc0\x07\
\xbe\x00\x01\x78\xd3\x0b\x08\x00\xc0\x4d\x9b\xc0\x30\x1c\x87\xff\
\x0f\xea\x42\x99\x5c\x01\x80\x84\x01\xc0\x74\x91\x38\x4b\x08\x80\
\x14\x00\x40\x7a\x8e\x42\xa6\x00\x40\x46\x01\x80\x9d\x98\x26\x53\
\x00\xa0\x04\x00\x60\xcb\x63\x62\xe3\x00\x50\x2d\x00\x60\x27\x7f\
\xe6\xd3\x00\x80\x9d\xf8\x99\x7b\x01\x00\x5b\x94\x21\x15\x01\xa0\
\x91\x00\x20\x13\x65\x88\x44\x00\x68\x3b\x00\xac\xcf\x56\x8a\x45\
\x00\x58\x30\x00\x14\x66\x4b\xc4\x39\x00\xd8\x2d\x00\x30\x49\x57\
\x66\x48\x00\xb0\xb7\x00\xc0\xce\x10\x0b\xb2\x00\x08\x0c\x00\x30\
\x51\x88\x85\x29\x00\x04\x7b\x00\x60\xc8\x23\x23\x78\x00\x84\x99\
\x00\x14\x46\xf2\x57\x3c\xf1\x2b\xae\x10\xe7\x2a\x00\x00\x78\x99\
\xb2\x3c\xb9\x24\x39\x45\x81\x5b\x08\x2d\x71\x07\x57\x57\x2e\x1e\
\x28\xce\x49\x17\x2b\x14\x36\x61\x02\x61\x9a\x40\x2e\xc2\x79\x99\
\x19\x32\x81\x34\x0f\xe0\xf3\xcc\x00\x00\xa0\x91\x15\x11\xe0\x83\
\xf3\xfd\x78\xce\x0e\xae\xce\xce\x36\x8e\xb6\x0e\x5f\x2d\xea\xbf\
\x06\xff\x22\x62\x62\xe3\xfe\xe5\xcf\xab\x70\x40\x00\x00\xe1\x74\
\x7e\xd1\xfe\x2c\x2f\xb3\x1a\x80\x3b\x06\x80\x6d\xfe\xa2\x25\xee\
\x04\x68\x5e\x0b\xa0\x75\xf7\x8b\x66\xb2\x0f\x40\xb5\x00\xa0\xe9\
\xda\x57\xf3\x70\xf8\x7e\x3c\x3c\x45\xa1\x90\xb9\xd9\xd9\xe5\xe4\
\xe4\xd8\x4a\xc4\x42\x5b\x61\xca\x57\x7d\xfe\x67\xc2\x5f\xc0\x57\
\xfd\x6c\xf9\x7e\x3c\xfc\xf7\xf5\xe0\xbe\xe2\x24\x81\x32\x5d\x81\
\x47\x04\xf8\xe0\xc2\xcc\xf4\x4c\xa5\x1c\xcf\x92\x09\x84\x62\xdc\
\xe6\x8f\x47\xfc\xb7\x0b\xff\xfc\x1d\xd3\x22\xc4\x49\x62\xb9\x58\
\x2a\x14\xe3\x51\x12\x71\x8e\x44\x9a\x8c\xf3\x32\xa5\x22\x89\x42\
\x92\x29\xc5\x25\xd2\xff\x64\xe2\xdf\x2c\xfb\x03\x3e\xdf\x35\x00\
\xb0\x6a\x3e\x01\x7b\x91\x2d\xa8\x5d\x63\x03\xf6\x4b\x27\x10\x58\
\x74\xc0\xe2\xf7\x00\x00\xf2\xbb\x6f\xc1\xd4\x28\x08\x03\x80\x68\
\x83\xe1\xcf\x77\xff\xef\x3f\xfd\x47\xa0\x25\x00\x80\x66\x49\x92\
\x71\x00\x00\x5e\x44\x24\x2e\x54\xca\xb3\x3f\xc7\x08\x00\x00\x44\
\xa0\x81\x2a\xb0\x41\x1b\xf4\xc1\x18\x2c\xc0\x06\x1c\xc1\x05\xdc\
\xc1\x0b\xfc\x60\x36\x84\x42\x24\xc4\xc2\x42\x10\x42\x0a\x64\x80\
\x1c\x72\x60\x29\xac\x82\x42\x28\x86\xcd\xb0\x1d\x2a\x60\x2f\xd4\
\x40\x1d\x34\xc0\x51\x68\x86\x93\x70\x0e\x2e\xc2\x55\xb8\x0e\x3d\
\x70\x0f\xfa\x61\x08\x9e\xc1\x28\xbc\x81\x09\x04\x41\xc8\x08\x13\
\x61\x21\xda\x88\x01\x62\x8a\x58\x23\x8e\x08\x17\x99\x85\xf8\x21\
\xc1\x48\x04\x12\x8b\x24\x20\xc9\x88\x14\x51\x22\x4b\x91\x35\x48\
\x31\x52\x8a\x54\x20\x55\x48\x1d\xf2\x3d\x72\x02\x39\x87\x5c\x46\
\xba\x91\x3b\xc8\x00\x32\x82\xfc\x86\xbc\x47\x31\x94\x81\xb2\x51\
\x3d\xd4\x0c\xb5\x43\xb9\xa8\x37\x1a\x84\x46\xa2\x0b\xd0\x64\x74\
\x31\x9a\x8f\x16\xa0\x9b\xd0\x72\xb4\x1a\x3d\x8c\x36\xa1\xe7\xd0\
\xab\x68\x0f\xda\x8f\x3e\x43\xc7\x30\xc0\xe8\x18\x07\x33\xc4\x6c\
\x30\x2e\xc6\xc3\x42\xb1\x38\x2c\x09\x93\x63\xcb\xb1\x22\xac\x0c\
\xab\xc6\x1a\xb0\x56\xac\x03\xbb\x89\xf5\x63\xcf\xb1\x77\x04\x12\
\x81\x45\xc0\x09\x36\x04\x77\x42\x20\x61\x1e\x41\x48\x58\x4c\x58\
\x4e\xd8\x48\xa8\x20\x1c\x24\x34\x11\xda\x09\x37\x09\x03\x84\x51\
\xc2\x27\x22\x93\xa8\x4b\xb4\x26\xba\x11\xf9\xc4\x18\x62\x32\x31\
\x87\x58\x48\x2c\x23\xd6\x12\x8f\x13\x2f\x10\x7b\x88\x43\xc4\x37\
\x24\x12\x89\x43\x32\x27\xb9\x90\x02\x49\xb1\xa4\x54\xd2\x12\xd2\
\x46\xd2\x6e\x52\x23\xe9\x2c\xa9\x9b\x34\x48\x1a\x23\x93\xc9\xda\
\x64\x6b\xb2\x07\x39\x94\x2c\x20\x2b\xc8\x85\xe4\x9d\xe4\xc3\xe4\
\x33\xe4\x1b\xe4\x21\xf2\x5b\x0a\x9d\x62\x40\x71\xa4\xf8\x53\xe2\
\x28\x52\xca\x6a\x4a\x19\xe5\x10\xe5\x34\xe5\x06\x65\x98\x32\x41\
\x55\xa3\x9a\x52\xdd\xa8\xa1\x54\x11\x35\x8f\x5a\x42\xad\xa1\xb6\
\x52\xaf\x51\x87\xa8\x13\x34\x75\x9a\x39\xcd\x83\x16\x49\x4b\xa5\
\xad\xa2\x95\xd3\x1a\x68\x17\x68\xf7\x69\xaf\xe8\x74\xba\x11\xdd\
\x95\x1e\x4e\x97\xd0\x57\xd2\xcb\xe9\x47\xe8\x97\xe8\x03\xf4\x77\
\x0c\x0d\x86\x15\x83\xc7\x88\x67\x28\x19\x9b\x18\x07\x18\x67\x19\
\x77\x18\xaf\x98\x4c\xa6\x19\xd3\x8b\x19\xc7\x54\x30\x37\x31\xeb\
\x98\xe7\x99\x0f\x99\x6f\x55\x58\x2a\xb6\x2a\x7c\x15\x91\xca\x0a\
\x95\x4a\x95\x26\x95\x1b\x2a\x2f\x54\xa9\xaa\xa6\xaa\xde\xaa\x0b\
\x55\xf3\x55\xcb\x54\x8f\xa9\x5e\x53\x7d\xae\x46\x55\x33\x53\xe3\
\xa9\x09\xd4\x96\xab\x55\xaa\x9d\x50\xeb\x53\x1b\x53\x67\xa9\x3b\
\xa8\x87\xaa\x67\xa8\x6f\x54\x3f\xa4\x7e\x59\xfd\x89\x06\x59\xc3\
\x4c\xc3\x4f\x43\xa4\x51\xa0\xb1\x5f\xe3\xbc\xc6\x20\x0b\x63\x19\
\xb3\x78\x2c\x21\x6b\x0d\xab\x86\x75\x81\x35\xc4\x26\xb1\xcd\xd9\
\x7c\x76\x2a\xbb\x98\xfd\x1d\xbb\x8b\x3d\xaa\xa9\xa1\x39\x43\x33\
\x4a\x33\x57\xb3\x52\xf3\x94\x66\x3f\x07\xe3\x98\x71\xf8\x9c\x74\
\x4e\x09\xe7\x28\xa7\x97\xf3\x7e\x8a\xde\x14\xef\x29\xe2\x29\x1b\
\xa6\x34\x4c\xb9\x31\x65\x5c\x6b\xaa\x96\x97\x96\x58\xab\x48\xab\
\x51\xab\x47\xeb\xbd\x36\xae\xed\xa7\x9d\xa6\xbd\x45\xbb\x59\xfb\
\x81\x0e\x41\xc7\x4a\x27\x5c\x27\x47\x67\x8f\xce\x05\x9d\xe7\x53\
\xd9\x53\xdd\xa7\x0a\xa7\x16\x4d\x3d\x3a\xf5\xae\x2e\xaa\x6b\xa5\
\x1b\xa1\xbb\x44\x77\xbf\x6e\xa7\xee\x98\x9e\xbe\x5e\x80\x9e\x4c\
\x6f\xa7\xde\x79\xbd\xe7\xfa\x1c\x7d\x2f\xfd\x54\xfd\x6d\xfa\xa7\
\xf5\x47\x0c\x58\x06\xb3\x0c\x24\x06\xdb\x0c\xce\x18\x3c\xc5\x35\
\x71\x6f\x3c\x1d\x2f\xc7\xdb\xf1\x51\x43\x5d\xc3\x40\x43\xa5\x61\
\x95\x61\x97\xe1\x84\x91\xb9\xd1\x3c\xa3\xd5\x46\x8d\x46\x0f\x8c\
\x69\xc6\x5c\xe3\x24\xe3\x6d\xc6\x6d\xc6\xa3\x26\x06\x26\x21\x26\
\x4b\x4d\xea\x4d\xee\x9a\x52\x4d\xb9\xa6\x29\xa6\x3b\x4c\x3b\x4c\
\xc7\xcd\xcc\xcd\xa2\xcd\xd6\x99\x35\x9b\x3d\x31\xd7\x32\xe7\x9b\
\xe7\x9b\xd7\x9b\xdf\xb7\x60\x5a\x78\x5a\x2c\xb6\xa8\xb6\xb8\x65\
\x49\xb2\xe4\x5a\xa6\x59\xee\xb6\xbc\x6e\x85\x5a\x39\x59\xa5\x58\
\x55\x5a\x5d\xb3\x46\xad\x9d\xad\x25\xd6\xbb\xad\xbb\xa7\x11\xa7\
\xb9\x4e\x93\x4e\xab\x9e\xd6\x67\xc3\xb0\xf1\xb6\xc9\xb6\xa9\xb7\
\x19\xb0\xe5\xd8\x06\xdb\xae\xb6\x6d\xb6\x7d\x61\x67\x62\x17\x67\
\xb7\xc5\xae\xc3\xee\x93\xbd\x93\x7d\xba\x7d\x8d\xfd\x3d\x07\x0d\
\x87\xd9\x0e\xab\x1d\x5a\x1d\x7e\x73\xb4\x72\x14\x3a\x56\x3a\xde\
\x9a\xce\x9c\xee\x3f\x7d\xc5\xf4\x96\xe9\x2f\x67\x58\xcf\x10\xcf\
\xd8\x33\xe3\xb6\x13\xcb\x29\xc4\x69\x9d\x53\x9b\xd3\x47\x67\x17\
\x67\xb9\x73\x83\xf3\x88\x8b\x89\x4b\x82\xcb\x2e\x97\x3e\x2e\x9b\
\x1b\xc6\xdd\xc8\xbd\xe4\x4a\x74\xf5\x71\x5d\xe1\x7a\xd2\xf5\x9d\
\x9b\xb3\x9b\xc2\xed\xa8\xdb\xaf\xee\x36\xee\x69\xee\x87\xdc\x9f\
\xcc\x34\x9f\x29\x9e\x59\x33\x73\xd0\xc3\xc8\x43\xe0\x51\xe5\xd1\
\x3f\x0b\x9f\x95\x30\x6b\xdf\xac\x7e\x4f\x43\x4f\x81\x67\xb5\xe7\
\x23\x2f\x63\x2f\x91\x57\xad\xd7\xb0\xb7\xa5\x77\xaa\xf7\x61\xef\
\x17\x3e\xf6\x3e\x72\x9f\xe3\x3e\xe3\x3c\x37\xde\x32\xde\x59\x5f\
\xcc\x37\xc0\xb7\xc8\xb7\xcb\x4f\xc3\x6f\x9e\x5f\x85\xdf\x43\x7f\
\x23\xff\x64\xff\x7a\xff\xd1\x00\xa7\x80\x25\x01\x67\x03\x89\x81\
\x41\x81\x5b\x02\xfb\xf8\x7a\x7c\x21\xbf\x8e\x3f\x3a\xdb\x65\xf6\
\xb2\xd9\xed\x41\x8c\xa0\xb9\x41\x15\x41\x8f\x82\xad\x82\xe5\xc1\
\xad\x21\x68\xc8\xec\x90\xad\x21\xf7\xe7\x98\xce\x91\xce\x69\x0e\
\x85\x50\x7e\xe8\xd6\xd0\x07\x61\xe6\x61\x8b\xc3\x7e\x0c\x27\x85\
\x87\x85\x57\x86\x3f\x8e\x70\x88\x58\x1a\xd1\x31\x97\x35\x77\xd1\
\xdc\x43\x73\xdf\x44\xfa\x44\x96\x44\xde\x9b\x67\x31\x4f\x39\xaf\
\x2d\x4a\x35\x2a\x3e\xaa\x2e\x6a\x3c\xda\x37\xba\x34\xba\x3f\xc6\
\x2e\x66\x59\xcc\xd5\x58\x9d\x58\x49\x6c\x4b\x1c\x39\x2e\x2a\xae\
\x36\x6e\x6c\xbe\xdf\xfc\xed\xf3\x87\xe2\x9d\xe2\x0b\xe3\x7b\x17\
\x98\x2f\xc8\x5d\x70\x79\xa1\xce\xc2\xf4\x85\xa7\x16\xa9\x2e\x12\
\x2c\x3a\x96\x40\x4c\x88\x4e\x38\x94\xf0\x41\x10\x2a\xa8\x16\x8c\
\x25\xf2\x13\x77\x25\x8e\x0a\x79\xc2\x1d\xc2\x67\x22\x2f\xd1\x36\
\xd1\x88\xd8\x43\x5c\x2a\x1e\x4e\xf2\x48\x2a\x4d\x7a\x92\xec\x91\
\xbc\x35\x79\x24\xc5\x33\xa5\x2c\xe5\xb9\x84\x27\xa9\x90\xbc\x4c\
\x0d\x4c\xdd\x9b\x3a\x9e\x16\x9a\x76\x20\x6d\x32\x3d\x3a\xbd\x31\
\x83\x92\x91\x90\x71\x42\xaa\x21\x4d\x93\xb6\x67\xea\x67\xe6\x66\
\x76\xcb\xac\x65\x85\xb2\xfe\xc5\x6e\x8b\xb7\x2f\x1e\x95\x07\xc9\
\x6b\xb3\x90\xac\x05\x59\x2d\x0a\xb6\x42\xa6\xe8\x54\x5a\x28\xd7\
\x2a\x07\xb2\x67\x65\x57\x66\xbf\xcd\x89\xca\x39\x96\xab\x9e\x2b\
\xcd\xed\xcc\xb3\xca\xdb\x90\x37\x9c\xef\x9f\xff\xed\x12\xc2\x12\
\xe1\x92\xb6\xa5\x86\x4b\x57\x2d\x1d\x58\xe6\xbd\xac\x6a\x39\xb2\
\x3c\x71\x79\xdb\x0a\xe3\x15\x05\x2b\x86\x56\x06\xac\x3c\xb8\x8a\
\xb6\x2a\x6d\xd5\x4f\xab\xed\x57\x97\xae\x7e\xbd\x26\x7a\x4d\x6b\
\x81\x5e\xc1\xca\x82\xc1\xb5\x01\x6b\xeb\x0b\x55\x0a\xe5\x85\x7d\
\xeb\xdc\xd7\xed\x5d\x4f\x58\x2f\x59\xdf\xb5\x61\xfa\x86\x9d\x1b\
\x3e\x15\x89\x8a\xae\x14\xdb\x17\x97\x15\x7f\xd8\x28\xdc\x78\xe5\
\x1b\x87\x6f\xca\xbf\x99\xdc\x94\xb4\xa9\xab\xc4\xb9\x64\xcf\x66\
\xd2\x66\xe9\xe6\xde\x2d\x9e\x5b\x0e\x96\xaa\x97\xe6\x97\x0e\x6e\
\x0d\xd9\xda\xb4\x0d\xdf\x56\xb4\xed\xf5\xf6\x45\xdb\x2f\x97\xcd\
\x28\xdb\xbb\x83\xb6\x43\xb9\xa3\xbf\x3c\xb8\xbc\x65\xa7\xc9\xce\
\xcd\x3b\x3f\x54\xa4\x54\xf4\x54\xfa\x54\x36\xee\xd2\xdd\xb5\x61\
\xd7\xf8\x6e\xd1\xee\x1b\x7b\xbc\xf6\x34\xec\xd5\xdb\x5b\xbc\xf7\
\xfd\x3e\xc9\xbe\xdb\x55\x01\x55\x4d\xd5\x66\xd5\x65\xfb\x49\xfb\
\xb3\xf7\x3f\xae\x89\xaa\xe9\xf8\x96\xfb\x6d\x5d\xad\x4e\x6d\x71\
\xed\xc7\x03\xd2\x03\xfd\x07\x23\x0e\xb6\xd7\xb9\xd4\xd5\x1d\xd2\
\x3d\x54\x52\x8f\xd6\x2b\xeb\x47\x0e\xc7\x1f\xbe\xfe\x9d\xef\x77\
\x2d\x0d\x36\x0d\x55\x8d\x9c\xc6\xe2\x23\x70\x44\x79\xe4\xe9\xf7\
\x09\xdf\xf7\x1e\x0d\x3a\xda\x76\x8c\x7b\xac\xe1\x07\xd3\x1f\x76\
\x1d\x67\x1d\x2f\x6a\x42\x9a\xf2\x9a\x46\x9b\x53\x9a\xfb\x5b\x62\
\x5b\xba\x4f\xcc\x3e\xd1\xd6\xea\xde\x7a\xfc\x47\xdb\x1f\x0f\x9c\
\x34\x3c\x59\x79\x4a\xf3\x54\xc9\x69\xda\xe9\x82\xd3\x93\x67\xf2\
\xcf\x8c\x9d\x95\x9d\x7d\x7e\x2e\xf9\xdc\x60\xdb\xa2\xb6\x7b\xe7\
\x63\xce\xdf\x6a\x0f\x6f\xef\xba\x10\x74\xe1\xd2\x45\xff\x8b\xe7\
\x3b\xbc\x3b\xce\x5c\xf2\xb8\x74\xf2\xb2\xdb\xe5\x13\x57\xb8\x57\
\x9a\xaf\x3a\x5f\x6d\xea\x74\xea\x3c\xfe\x93\xd3\x4f\xc7\xbb\x9c\
\xbb\x9a\xae\xb9\x5c\x6b\xb9\xee\x7a\xbd\xb5\x7b\x66\xf7\xe9\x1b\
\x9e\x37\xce\xdd\xf4\xbd\x79\xf1\x16\xff\xd6\xd5\x9e\x39\x3d\xdd\
\xbd\xf3\x7a\x6f\xf7\xc5\xf7\xf5\xdf\x16\xdd\x7e\x72\x27\xfd\xce\
\xcb\xbb\xd9\x77\x27\xee\xad\xbc\x4f\xbc\x5f\xf4\x40\xed\x41\xd9\
\x43\xdd\x87\xd5\x3f\x5b\xfe\xdc\xd8\xef\xdc\x7f\x6a\xc0\x77\xa0\
\xf3\xd1\xdc\x47\xf7\x06\x85\x83\xcf\xfe\x91\xf5\x8f\x0f\x43\x05\
\x8f\x99\x8f\xcb\x86\x0d\x86\xeb\x9e\x38\x3e\x39\x39\xe2\x3f\x72\
\xfd\xe9\xfc\xa7\x43\xcf\x64\xcf\x26\x9e\x17\xfe\xa2\xfe\xcb\xae\
\x17\x16\x2f\x7e\xf8\xd5\xeb\xd7\xce\xd1\x98\xd1\xa1\x97\xf2\x97\
\x93\xbf\x6d\x7c\xa5\xfd\xea\xc0\xeb\x19\xaf\xdb\xc6\xc2\xc6\x1e\
\xbe\xc9\x78\x33\x31\x5e\xf4\x56\xfb\xed\xc1\x77\xdc\x77\x1d\xef\
\xa3\xdf\x0f\x4f\xe4\x7c\x20\x7f\x28\xff\x68\xf9\xb1\xf5\x53\xd0\
\xa7\xfb\x93\x19\x93\x93\xff\x04\x03\x98\xf3\xfc\x63\x33\x2d\xdb\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\x46\x00\x00\x00\x40\
\x49\x44\x41\x54\x78\xda\x5c\x8c\x31\x11\x00\x30\x08\xc4\x42\x2d\
\x20\x03\xfc\x2b\x61\x45\x02\x1a\xe8\x54\xae\x6d\xc6\xcf\x7d\xc4\
\xcc\x1a\x20\x22\x84\x8b\x05\x90\x99\xa8\x6a\xdf\x42\xba\x7b\xc6\
\xaa\x92\x47\x1c\xdc\x7d\xb2\x8b\x8f\x93\x7d\x1e\xc0\x64\xf7\x00\
\xf5\x9f\x1d\xd3\x02\x88\xef\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x04\x00\x00\x00\x23\x93\x3e\x53\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x03\x18\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x63\x60\x60\x9e\xe0\xe8\xe2\xe4\xca\x24\
\xc0\xc0\x50\x50\x54\x52\xe4\x1e\xe4\x18\x19\x11\x19\xa5\xc0\x7e\
\x9e\x81\x8d\x81\x99\x81\x81\x81\x81\x81\x21\x31\xb9\xb8\xc0\x31\
\x20\xc0\x87\x81\x81\x81\x21\x2f\x3f\x2f\x95\x01\x15\x30\x32\x30\
\x7c\xbb\xc6\xc0\xc8\xc0\xc0\xc0\x70\x59\xd7\xd1\xc5\xc9\x95\x81\
\x34\xc0\x9a\x5c\x50\x54\xc2\xc0\xc0\x70\x80\x81\x81\xc1\x28\x25\
\xb5\x38\x99\x81\x81\xe1\x0b\x03\x03\x43\x7a\x79\x49\x41\x09\x03\
\x03\x63\x0c\x03\x03\x83\x48\x52\x76\x41\x09\x03\x03\x63\x01\x03\
\x03\x83\x48\x76\x48\x90\x33\x03\x03\x63\x0b\x03\x03\x13\x4f\x49\
\x6a\x45\x09\x03\x03\x03\x83\x73\x7e\x41\x65\x51\x66\x7a\x46\x89\
\x82\xa1\xa5\xa5\xa5\x82\x63\x4a\x7e\x52\xaa\x42\x70\x65\x71\x49\
\x6a\x6e\xb1\x82\x67\x5e\x72\x7e\x51\x41\x7e\x51\x62\x49\x6a\x0a\
\x03\x03\x03\xd4\x0e\x06\x06\x06\x06\x5e\x97\xfc\x12\x05\xf7\xc4\
\xcc\x3c\x05\x23\x03\x55\x06\x2a\x83\x88\xc8\x28\x05\x08\x0b\x11\
\x3e\x08\x31\x04\x48\x2e\x2d\x2a\x83\x07\x25\x03\x83\x00\x83\x02\
\x83\x01\x83\x03\x43\x00\x43\x22\x43\x3d\xc3\x02\x86\xa3\x0c\x6f\
\x18\xc5\x19\x5d\x18\x4b\x19\x57\x30\xde\x63\x12\x63\x0a\x62\x9a\
\xc0\x74\x81\x59\x98\x39\x92\x79\x21\xf3\x1b\x16\x4b\x96\x0e\x96\
\x5b\xac\x7a\xac\xad\xac\xf7\xd8\x2c\xd9\xa6\xb1\x7d\x63\x0f\x67\
\xdf\xcd\xa1\xc4\xd1\xc5\xf1\x85\x33\x91\xf3\x02\x97\x23\xd7\x16\
\x6e\x4d\xee\x05\x3c\x52\x3c\x53\x79\x85\x78\x27\xf1\x09\xf3\x4d\
\xe3\x97\xe1\x5f\x2c\xa0\x23\xb0\x43\xd0\x55\xf0\x8a\x50\xaa\xd0\
\x0f\xe1\x5e\x11\x15\x91\xbd\xa2\xe1\xa2\x5f\xc4\x26\x89\x1b\x89\
\x5f\x91\xa8\x90\x94\x93\x3c\x26\x95\x2f\x2d\x2d\x7d\x42\xa6\x4c\
\x56\x5d\xf6\x96\x5c\x9f\xbc\x8b\xfc\x1f\x85\xad\x8a\x85\x4a\x7a\
\x4a\x6f\x95\xd7\xaa\x14\xa8\x9a\xa8\xfe\x54\x3b\xa8\xde\xa5\x11\
\xaa\xa9\xa4\xf9\x41\xeb\x80\xf6\x24\x9d\x54\x5d\x2b\x3d\x41\xbd\
\x57\xfa\x47\x0c\x16\x18\xd6\x1a\xc5\x18\xdb\x9a\xc8\x9b\x32\x9b\
\xbe\x34\xbb\x60\xbe\xd3\x62\x89\xe5\x04\xab\x3a\xeb\x5c\x9b\x38\
\xdb\x40\x3b\x57\x7b\x6b\x07\x63\x47\x1d\x27\x35\x67\x25\x17\x05\
\x57\x79\x37\x05\x77\x65\x0f\x75\x4f\x5d\x2f\x13\x6f\x1b\x1f\x77\
\xdf\x60\xbf\x04\xff\xfc\x80\xfa\xc0\x89\x41\x4b\x83\x77\x85\x5c\
\x0c\x7d\x19\xce\x14\x21\x17\x69\x15\x15\x11\x5d\x11\x33\x33\x76\
\x4f\xdc\x83\x04\xb6\x44\xdd\xa4\xb0\xe4\x86\x94\x35\xa9\x37\xd3\
\x39\x32\x2c\x32\x33\xb3\xe6\x66\x5f\xcc\x65\xcf\xb3\xcf\xaf\x28\
\xd8\x54\xf8\xae\x58\xbb\x24\xab\x74\x55\xd9\x9b\x0a\xfd\xca\x92\
\xaa\x5d\x35\x8c\xb5\x5e\x75\x53\xeb\x1f\x36\xea\x35\xd5\x34\x9f\
\x6d\x95\x6b\x2b\x6c\x3f\xda\x29\xdd\x55\xd4\x7d\xba\x57\xb5\xaf\
\xb1\xff\xee\x44\x9b\x49\xb3\x27\xff\x9d\x1a\x3f\xed\xf0\x0c\x8d\
\x99\xfd\xb3\xbe\xcf\x49\x98\x7b\x7a\xbe\xf9\x82\xa5\x8b\x44\x16\
\xb7\x2e\xf9\xb6\x2c\x73\xf9\xbd\x95\x21\xab\x4e\xaf\x71\x59\xbb\
\x6f\xbd\xe5\x86\x6d\x9b\x4c\x36\x6f\xd9\x6a\xb2\x6d\xfb\x0e\xab\
\x9d\xfb\x77\xbb\xee\x39\xbb\x2f\x6c\xff\x83\x83\x39\x87\x7e\x1e\
\x69\x3f\x26\x7e\x7c\xc5\x49\xeb\x53\xe7\xce\x24\x9f\xfd\x75\x7e\
\xd2\x45\xed\x4b\x47\xaf\x24\x5e\xfd\x77\x7d\xce\x4d\x9b\x5b\x77\
\xef\xd4\xdf\x53\xbe\x7f\xe2\x61\xde\x63\xb1\x27\xfb\x9f\x65\xbe\
\x10\x79\x79\xf0\x75\xfe\x5b\xf9\x77\x17\x3e\x34\x7d\x32\xfd\xfc\
\xea\xeb\x82\xef\xe1\x3f\x05\x7e\x9d\xfa\xd3\xfa\xcf\xf1\xff\x7f\
\x00\x0d\x00\x0f\x34\xfa\x96\xf1\x5d\x00\x00\x00\x20\x63\x48\x52\
\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\
\xe9\x00\x00\x75\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\
\x6f\x92\x5f\xc5\x46\x00\x00\x00\x52\x49\x44\x41\x54\x78\xda\x62\
\x58\xf5\xe9\xca\x3f\x18\x5c\xfe\x9e\x21\xd3\xff\xc4\x8f\xab\xbf\
\xaf\xfe\xbe\xfa\xfb\xd0\x97\x68\x63\x86\xff\x0c\x85\x6b\xf7\x7e\
\xdc\xfb\x71\xf3\x87\xcc\xbc\xff\x0c\x0c\xff\x19\x18\x98\x73\xce\
\xce\xbd\x1f\x39\xff\x3f\xc3\x7f\x06\x86\xff\x0c\xff\x19\x14\xdd\
\x2c\xb6\xfe\x67\xf8\xcf\xf0\x9f\x01\x30\x00\x6a\x5f\x2c\x67\x74\
\xda\xec\xfb\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x09\
\x09\x5f\x97\x13\
\x00\x71\
\x00\x73\x00\x73\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x11\
\x0b\x14\x5d\x13\
\x00\x50\
\x00\x79\x00\x51\x00\x74\x00\x35\x00\x5f\x00\x73\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x73\
\
\x00\x14\
\x05\xce\x7b\xe3\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x5f\x00\x44\x00\x61\x00\x72\x00\x6b\x00\x4f\x00\x72\x00\x61\x00\x6e\x00\x67\x00\x65\x00\x2e\
\x00\x71\x00\x73\x00\x73\
\x00\x0d\
\x05\x5f\xba\xa3\
\x00\x44\
\x00\x61\x00\x72\x00\x6b\x00\x4f\x00\x72\x00\x61\x00\x6e\x00\x67\x00\x65\x00\x5f\x00\x72\x00\x63\
\x00\x0c\
\x04\x56\x23\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0b\x2d\x87\xc7\
\x00\x68\
\x00\x61\x00\x6e\x00\x64\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x40\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x6e\x00\x02\x00\x00\x00\x03\x00\x00\x00\x05\
\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x08\x7b\
\x00\x00\x00\xc6\x00\x00\x00\x00\x00\x01\x00\x00\x14\xef\
\x00\x00\x00\xac\x00\x00\x00\x00\x00\x01\x00\x00\x09\xd6\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x75\xb7\x3c\x4d\xb6\
\x00\x00\x00\x6e\x00\x02\x00\x00\x00\x03\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x08\x7b\
\x00\x00\x01\x75\xb7\x3c\x4d\x3b\
\x00\x00\x00\xc6\x00\x00\x00\x00\x00\x01\x00\x00\x14\xef\
\x00\x00\x01\x75\xb7\x3c\x4d\x3b\
\x00\x00\x00\xac\x00\x00\x00\x00\x00\x01\x00\x00\x09\xd6\
\x00\x00\x01\x75\xb7\x3c\x4d\x3f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 58.703407
| 129
| 0.726214
|
11e490a14d1481903ebea580206283464b776d4f
| 1,734
|
py
|
Python
|
rummage/lib/gui/dialogs/error_text_dialog.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 55
|
2015-02-15T08:17:55.000Z
|
2022-03-11T11:55:39.000Z
|
rummage/lib/gui/dialogs/error_text_dialog.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 264
|
2015-01-29T20:27:40.000Z
|
2022-03-03T04:08:48.000Z
|
rummage/lib/gui/dialogs/error_text_dialog.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 12
|
2017-08-30T22:54:20.000Z
|
2022-03-21T01:05:50.000Z
|
"""
Error Text Dialog.
Licensed under MIT
Copyright (c) 2013 - 2015 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
from ..localization import _
from .. import gui
class ErrorTextDialog(gui.ErrorTextDialog):
"""Error text dialog."""
def __init__(self, parent, text):
"""Initialize."""
super().__init__(parent)
self.localize()
self.refresh_localization()
self.m_error_textbox.SetValue(text)
self.m_error_text_panel.Fit()
self.Fit()
self.Centre()
def localize(self):
"""Translate strings."""
self.ERROR = _("Error")
def refresh_localization(self):
"""Localize the dialog."""
self.SetTitle(self.ERROR)
| 35.387755
| 113
| 0.72895
|
5d46332fe2befdc35c2eb293bd6e52f26a734691
| 1,050
|
py
|
Python
|
download_dataset.py
|
PytaichukBohdan/law-generator
|
8bf998c5c3138a540f68b3bf2d68907bd48a9699
|
[
"MIT"
] | null | null | null |
download_dataset.py
|
PytaichukBohdan/law-generator
|
8bf998c5c3138a540f68b3bf2d68907bd48a9699
|
[
"MIT"
] | null | null | null |
download_dataset.py
|
PytaichukBohdan/law-generator
|
8bf998c5c3138a540f68b3bf2d68907bd48a9699
|
[
"MIT"
] | null | null | null |
import os
import sys
import requests
from tqdm import tqdm
subdir = 'data'
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
for ds in [
'webtext',
'small-117M', 'small-117M-k40',
'medium-345M', 'medium-345M-k40',
'large-762M', 'large-762M-k40',
'xl-1542M', 'xl-1542M-k40',
]:
for split in ['train', 'valid', 'test']:
filename = ds + "." + split + '.jsonl'
r = requests.get("https://storage.googleapis.com/gpt-2/output-dataset/v1/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
| 36.206897
| 107
| 0.6
|
67b88d6ecff416e6dc687822e4800ad75b4aa8c4
| 708
|
py
|
Python
|
src/reset_counter.py
|
MatuMikey/gfg-reddit-bot
|
0690bb92a2bb3f6f78b5e7ee9ac90208a652ca34
|
[
"MIT"
] | null | null | null |
src/reset_counter.py
|
MatuMikey/gfg-reddit-bot
|
0690bb92a2bb3f6f78b5e7ee9ac90208a652ca34
|
[
"MIT"
] | null | null | null |
src/reset_counter.py
|
MatuMikey/gfg-reddit-bot
|
0690bb92a2bb3f6f78b5e7ee9ac90208a652ca34
|
[
"MIT"
] | null | null | null |
import common.api as api
from common.models.fixture import Fixture
from common.models.result import Result
from common.models.league import League
from common.models.stat import Stat
from common.models.user import User
from common.config import config
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def reset_counter():
logger.info("Resetting API counter...")
api_call_stat = list(Stat.query("api-call"))[0]
api_stat_dict = api_call_stat.stat_value.as_dict()
api_stat_dict["count"] = 0
api_call_stat.stat_value = api_stat_dict
api_call_stat.save()
def lambda_handler(event, context):
reset_counter()
if __name__ == "__main__":
reset_counter()
| 25.285714
| 54
| 0.761299
|
b7cdbca9a4520d8c42d79710431a3f28a8eceb81
| 85,050
|
py
|
Python
|
scripts/dts/edtlib.py
|
xiongyihui/zephyr
|
0ca84c0eb378fc2f126cdbb51d7195220746f36d
|
[
"Apache-2.0"
] | null | null | null |
scripts/dts/edtlib.py
|
xiongyihui/zephyr
|
0ca84c0eb378fc2f126cdbb51d7195220746f36d
|
[
"Apache-2.0"
] | 3
|
2021-08-11T18:14:01.000Z
|
2021-08-11T22:41:10.000Z
|
scripts/dts/edtlib.py
|
xiongyihui/zephyr
|
0ca84c0eb378fc2f126cdbb51d7195220746f36d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Nordic Semiconductor ASA
# Copyright (c) 2019 Linaro Limited
# SPDX-License-Identifier: BSD-3-Clause
# Tip: You can view just the documentation with 'pydoc3 edtlib'
"""
Library for working with devicetrees at a higher level compared to dtlib. Like
dtlib, this library presents a tree of devicetree nodes, but the nodes are
augmented with information from bindings and include some interpretation of
properties.
Bindings are files that describe devicetree nodes. Devicetree nodes are usually
mapped to bindings via their 'compatible = "..."' property, but a binding can
also come from a 'child-binding:' key in the binding for the parent devicetree
node.
Each devicetree node (dtlib.Node) gets a corresponding edtlib.Node instance,
which has all the information related to the node.
The top-level entry point of the library is the EDT class. EDT.__init__() takes
a .dts file to parse and a list of paths to directories containing bindings.
"""
# NOTE: testedtlib.py is the test suite for this library. It can be run
# directly as a script:
#
# ./testedtlib.py
# Implementation notes
# --------------------
#
# A '_' prefix on an identifier in Python is a convention for marking it private.
# Please do not access private things. Instead, think of what API you need, and
# add it.
#
# This module is not meant to have any global state. It should be possible to
# create several EDT objects with independent binding paths and flags. If you
# need to add a configuration parameter or the like, store it in the EDT
# instance, and initialize it e.g. with a constructor argument.
#
# This library is layered on top of dtlib, and is not meant to expose it to
# clients. This keeps the header generation script simple.
#
# General biased advice:
#
# - Consider using @property for APIs that don't need parameters. It makes
# functions look like attributes, which is less awkward in clients, and makes
# it easy to switch back and forth between variables and functions.
#
# - Think about the data type of the thing you're exposing. Exposing something
# as e.g. a list or a dictionary is often nicer and more flexible than adding
# a function.
#
# - Avoid get_*() prefixes on functions. Name them after the thing they return
# instead. This often makes the code read more naturally in callers.
#
# Also, consider using @property instead of get_*().
#
# - Don't expose dtlib stuff directly.
#
# - Add documentation for any new APIs you add.
#
# The convention here is that docstrings (quoted strings) are used for public
# APIs, and "doc comments" for internal functions.
#
# @properties are documented in the class docstring, as if they were
# variables. See the existing @properties for a template.
#
# - Please use ""-quoted strings instead of ''-quoted strings, just to make
# things consistent (''-quoting is more common otherwise in Python)
from collections import OrderedDict, defaultdict
import os
import re
import sys
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# This makes e.g. gen_defines.py more than twice as fast.
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from dtlib import DT, DTError, to_num, to_nums, TYPE_EMPTY, TYPE_NUMS, \
TYPE_PHANDLE, TYPE_PHANDLES_AND_NUMS
from grutils import Graph
#
# Public classes
#
class EDT:
"""
Represents a devicetree augmented with information from bindings.
These attributes are available on EDT objects:
nodes:
A list of Node objects for the nodes that appear in the devicetree
compat2enabled:
A collections.defaultdict that maps each 'compatible' string that appears
on some enabled Node to a list of enabled Nodes.
For example, edt.compat2enabled["bar"] would include the 'foo' and 'bar'
nodes below.
foo {
compatible = "bar";
status = "okay";
...
};
bar {
compatible = "foo", "bar", "baz";
status = "okay";
...
};
This exists only for the sake of gen_legacy_defines.py. It will probably
be removed following the Zephyr 2.3 release.
compat2nodes:
A collections.defaultdict that maps each 'compatible' string that appears
on some Node to a list of Nodes with that compatible.
compat2okay:
Like compat2nodes, but just for nodes with status 'okay'.
label2node:
A collections.OrderedDict that maps a node label to the node with
that label.
chosen_nodes:
A collections.OrderedDict that maps the properties defined on the
devicetree's /chosen node to their values. 'chosen' is indexed by
property name (a string), and values are converted to Node objects.
Note that properties of the /chosen node which can't be converted
to a Node are not included in the value.
dts_path:
The .dts path passed to __init__()
dts_source:
The final DTS source code of the loaded devicetree after merging nodes
and processing /delete-node/ and /delete-property/, as a string
bindings_dirs:
The bindings directory paths passed to __init__()
"""
def __init__(self, dts, bindings_dirs, warn_file=None,
warn_reg_unit_address_mismatch=True,
default_prop_types=True):
"""
EDT constructor. This is the top-level entry point to the library.
dts:
Path to devicetree .dts file
bindings_dirs:
List of paths to directories containing bindings, in YAML format.
These directories are recursively searched for .yaml files.
warn_file (default: None):
'file' object to write warnings to. If None, sys.stderr is used.
warn_reg_unit_address_mismatch (default: True):
If True, a warning is printed if a node has a 'reg' property where
the address of the first entry does not match the unit address of the
node
default_prop_types (default: True):
If True, default property types will be used when a node has no
bindings.
"""
# Do this indirection with None in case sys.stderr is deliberately
# overridden
self._warn_file = sys.stderr if warn_file is None else warn_file
self._warn_reg_unit_address_mismatch = warn_reg_unit_address_mismatch
self._default_prop_types = default_prop_types
self.dts_path = dts
self.bindings_dirs = bindings_dirs
self._dt = DT(dts)
_check_dt(self._dt)
self._init_compat2binding(bindings_dirs)
self._init_nodes()
self._init_luts()
self._define_order()
def get_node(self, path):
"""
Returns the Node at the DT path or alias 'path'. Raises EDTError if the
path or alias doesn't exist.
"""
try:
return self._node2enode[self._dt.get_node(path)]
except DTError as e:
_err(e)
@property
def chosen_nodes(self):
ret = OrderedDict()
try:
chosen = self._dt.get_node("/chosen")
except DTError:
return ret
for name, prop in chosen.props.items():
try:
node = prop.to_path()
except DTError:
# DTS value is not phandle or string, or path doesn't exist
continue
ret[name] = self._node2enode[node]
return ret
def chosen_node(self, name):
"""
Returns the Node pointed at by the property named 'name' in /chosen, or
None if the property is missing
"""
return self.chosen_nodes.get(name)
@property
def dts_source(self):
return f"{self._dt}"
def __repr__(self):
return "<EDT for '{}', binding directories '{}'>".format(
self.dts_path, self.bindings_dirs)
def scc_order(self):
"""
Returns a list of lists of Nodes where all elements of each list
depend on each other, and the Nodes in any list do not depend
on any Node in a subsequent list. Each list defines a Strongly
Connected Component (SCC) of the graph.
For an acyclic graph each list will be a singleton. Cycles
will be represented by lists with multiple nodes. Cycles are
not expected to be present in devicetree graphs.
"""
try:
return self._graph.scc_order()
except Exception as e:
raise EDTError(e)
def _define_order(self):
# Constructs a graph of dependencies between Node instances,
# then calculates a partial order over the dependencies. The
# algorithm supports detecting dependency loops.
self._graph = Graph()
for node in self.nodes:
# A Node always depends on its parent.
for child in node.children.values():
self._graph.add_edge(child, node)
# A Node depends on any Nodes present in 'phandle',
# 'phandles', or 'phandle-array' property values.
for prop in node.props.values():
if prop.type == 'phandle':
self._graph.add_edge(node, prop.val)
elif prop.type == 'phandles':
for phandle_node in prop.val:
self._graph.add_edge(node, phandle_node)
elif prop.type == 'phandle-array':
for cd in prop.val:
self._graph.add_edge(node, cd.controller)
# A Node depends on whatever supports the interrupts it
# generates.
for intr in node.interrupts:
self._graph.add_edge(node, intr.controller)
# Calculate an order that ensures no node is before any node
# it depends on. This sets the dep_ordinal field in each
# Node.
self.scc_order()
def _init_compat2binding(self, bindings_dirs):
# Creates self._compat2binding. This is a dictionary that maps
# (<compatible>, <bus>) tuples (both strings) to (<binding>, <path>)
# tuples. <binding> is the binding in parsed PyYAML format, and <path>
# the path to the binding (nice for binding-related error messages).
#
# For example, self._compat2binding["company,dev", "can"] contains the
# binding/path for the 'company,dev' device, when it appears on the CAN
# bus.
#
# For bindings that don't specify a bus, <bus> is None, so that e.g.
# self._compat2binding["company,notonbus", None] contains the binding.
#
# Only bindings for 'compatible' strings that appear in the devicetree
# are loaded.
dt_compats = _dt_compats(self._dt)
# Searches for any 'compatible' string mentioned in the devicetree
# files, with a regex
dt_compats_search = re.compile(
"|".join(re.escape(compat) for compat in dt_compats)
).search
self._binding_paths = _binding_paths(bindings_dirs)
self._compat2binding = {}
for binding_path in self._binding_paths:
with open(binding_path, encoding="utf-8") as f:
contents = f.read()
# As an optimization, skip parsing files that don't contain any of
# the .dts 'compatible' strings, which should be reasonably safe
if not dt_compats_search(contents):
continue
# Load the binding and check that it actually matches one of the
# compatibles. Might get false positives above due to comments and
# stuff.
try:
# Parsed PyYAML output (Python lists/dictionaries/strings/etc.,
# representing the file)
binding = yaml.load(contents, Loader=_BindingLoader)
except yaml.YAMLError as e:
self._warn("'{}' appears in binding directories but isn't "
"valid YAML: {}".format(binding_path, e))
continue
binding_compat = self._binding_compat(binding, binding_path)
if binding_compat not in dt_compats:
# Either not a binding (binding_compat is None -- might be a
# binding fragment or a spurious file), or a binding whose
# compatible does not appear in the devicetree (picked up via
# some unrelated text in the binding file that happened to
# match a compatible)
continue
# It's a match. Merge in the included bindings, do sanity checks,
# and register the binding.
binding = self._merge_included_bindings(binding, binding_path)
self._check_binding(binding, binding_path)
on_bus = _on_bus_from_binding(binding)
# Do not allow two different bindings to have the same
# 'compatible:'/'on-bus:' combo
old_binding = self._compat2binding.get((binding_compat, on_bus))
if old_binding:
msg = "both {} and {} have 'compatible: {}'".format(
old_binding[1], binding_path, binding_compat)
if on_bus is not None:
msg += " and 'on-bus: {}'".format(on_bus)
_err(msg)
self._compat2binding[binding_compat, on_bus] = (binding, binding_path)
def _binding_compat(self, binding, binding_path):
# Returns the string listed in 'compatible:' in 'binding', or None if
# no compatible is found. Only takes 'self' for the sake of
# self._warn().
#
# Also searches for legacy compatibles on the form
#
# properties:
# compatible:
# constraint: <string>
def new_style_compat():
# New-style 'compatible: "foo"' compatible
if binding is None or "compatible" not in binding:
# Empty file, binding fragment, spurious file, or old-style
# compat
return None
compatible = binding["compatible"]
if not isinstance(compatible, str):
_err("malformed 'compatible: {}' field in {} - "
"should be a string, not {}"
.format(compatible, binding_path,
type(compatible).__name__))
return compatible
def old_style_compat():
# Old-style 'constraint: "foo"' compatible
try:
return binding["properties"]["compatible"]["constraint"]
except Exception:
return None
new_compat = new_style_compat()
old_compat = old_style_compat()
if old_compat:
self._warn("The 'properties: compatible: constraint: ...' way of "
"specifying the compatible in {} is deprecated. Put "
"'compatible: \"{}\"' at the top level of the binding "
"instead.".format(binding_path, old_compat))
if new_compat:
_err("compatibles for {} should be specified with either "
"'compatible:' at the top level or with the legacy "
"'properties: compatible: constraint: ...' field, not "
"both".format(binding_path))
return old_compat
return new_compat
def _merge_included_bindings(self, binding, binding_path):
# Merges any bindings listed in the 'include:' section of 'binding'
# into the top level of 'binding'. Also supports the legacy
# 'inherits: !include ...' syntax for including bindings.
#
# Properties in 'binding' take precedence over properties from included
# bindings.
fnames = []
if "include" in binding:
include = binding.pop("include")
if isinstance(include, str):
fnames.append(include)
elif isinstance(include, list):
if not all(isinstance(elm, str) for elm in include):
_err("all elements in 'include:' in {} should be strings"
.format(binding_path))
fnames += include
else:
_err("'include:' in {} should be a string or a list of strings"
.format(binding_path))
if "child-binding" in binding and "include" in binding["child-binding"]:
self._merge_included_bindings(binding["child-binding"], binding_path)
# Legacy syntax
if "inherits" in binding:
self._warn("the 'inherits:' syntax in {} is deprecated and will "
"be removed - please use 'include: foo.yaml' or "
"'include: [foo.yaml, bar.yaml]' instead"
.format(binding_path))
inherits = binding.pop("inherits")
if not isinstance(inherits, list) or \
not all(isinstance(elm, str) for elm in inherits):
_err("malformed 'inherits:' in " + binding_path)
fnames += inherits
if not fnames:
return binding
# Got a list of included files in 'fnames'. Now we need to merge them
# together and then merge them into 'binding'.
# First, merge the included files together. If more than one included
# file has a 'required:' for a particular property, OR the values
# together, so that 'required: true' wins.
merged_included = self._load_binding(fnames[0])
for fname in fnames[1:]:
included = self._load_binding(fname)
_merge_props(merged_included, included, None, binding_path,
check_required=False)
# Next, merge the merged included files into 'binding'. Error out if
# 'binding' has 'required: false' while the merged included files have
# 'required: true'.
_merge_props(binding, merged_included, None, binding_path,
check_required=True)
return binding
def _load_binding(self, fname):
# Returns the contents of the binding given by 'fname' after merging
# any bindings it lists in 'include:' into it. 'fname' is just the
# basename of the file, so we check that there aren't multiple
# candidates.
paths = [path for path in self._binding_paths
if os.path.basename(path) == fname]
if not paths:
_err("'{}' not found".format(fname))
if len(paths) > 1:
_err("multiple candidates for included file '{}': {}"
.format(fname, ", ".join(paths)))
with open(paths[0], encoding="utf-8") as f:
return self._merge_included_bindings(
yaml.load(f, Loader=_BindingLoader),
paths[0])
def _init_nodes(self):
# Creates a list of edtlib.Node objects from the dtlib.Node objects, in
# self.nodes
# Maps each dtlib.Node to its corresponding edtlib.Node
self._node2enode = {}
self.nodes = []
for dt_node in self._dt.node_iter():
# Warning: We depend on parent Nodes being created before their
# children. This is guaranteed by node_iter().
node = Node()
node.edt = self
node._node = dt_node
node.bus_node = node._bus_node()
node._init_binding()
node._init_regs()
self.nodes.append(node)
self._node2enode[dt_node] = node
for node in self.nodes:
# These depend on all Node objects having been created, because
# they (either always or sometimes) reference other nodes, so we
# run them separately
node._init_props(default_prop_types=self._default_prop_types)
node._init_interrupts()
node._init_pinctrls()
def _init_luts(self):
# Initialize node lookup tables (LUTs).
self.label2node = OrderedDict()
self.compat2enabled = defaultdict(list)
self.compat2nodes = defaultdict(list)
self.compat2okay = defaultdict(list)
for node in self.nodes:
for label in node.labels:
self.label2node[label] = node
for compat in node.compats:
self.compat2nodes[compat].append(node)
if node.enabled:
self.compat2enabled[compat].append(node)
if node.status == "okay":
self.compat2okay[compat].append(node)
def _check_binding(self, binding, binding_path):
# Does sanity checking on 'binding'. Only takes 'self' for the sake of
# self._warn().
if "title" in binding:
# This message is the message that people copy-pasting the old
# format will see in practice
self._warn("'title:' in {} is deprecated and will be removed (and "
"was never used). Just put a 'description:' that "
"describes the device instead. Use other bindings as "
"a reference, and note that all bindings were updated "
"recently. Think about what information would be "
"useful to other people (e.g. explanations of "
"acronyms, or datasheet links), and put that in as "
"well. The description text shows up as a comment "
"in the generated header. See yaml-multiline.info for "
"how to deal with multiple lines. You probably want "
"'description: |'.".format(binding_path))
if "description" not in binding:
_err("missing 'description' property in " + binding_path)
for prop in "title", "description":
if prop in binding and (not isinstance(binding[prop], str) or
not binding[prop]):
_err("malformed or empty '{}' in {}"
.format(prop, binding_path))
ok_top = {"title", "description", "compatible", "properties", "#cells",
"bus", "on-bus", "parent-bus", "child-bus", "parent", "child",
"child-binding", "sub-node"}
for prop in binding:
if prop not in ok_top and not prop.endswith("-cells"):
_err("unknown key '{}' in {}, expected one of {}, or *-cells"
.format(prop, binding_path, ", ".join(ok_top)))
for bus_key in "bus", "on-bus":
if bus_key in binding and \
not isinstance(binding[bus_key], str):
_err("malformed '{}:' value in {}, expected string"
.format(bus_key, binding_path))
# There are two legacy syntaxes for 'bus:' and 'on-bus:':
#
# child/parent-bus: foo
# child/parent: bus: foo
#
# We support both, with deprecation warnings.
for pc in "parent", "child":
# Legacy 'parent/child-bus:' keys
bus_key = pc + "-bus"
if bus_key in binding:
self._warn("'{}:' in {} is deprecated and will be removed - "
"please use a top-level '{}:' key instead (see "
"binding-template.yaml)"
.format(bus_key, binding_path,
"bus" if bus_key == "child-bus" else "on-bus"))
if not isinstance(binding[bus_key], str):
_err("malformed '{}:' value in {}, expected string"
.format(bus_key, binding_path))
# Legacy 'child/parent: bus: ...' keys
if pc in binding:
self._warn("'{}: bus: ...' in {} is deprecated and will be "
"removed - please use a top-level '{}' key instead "
"(see binding-template.yaml)"
.format(pc, binding_path,
"bus" if pc == "child" else "on-bus:"))
# Just 'bus:' is expected
if binding[pc].keys() != {"bus"}:
_err("expected (just) 'bus:' in '{}:' in {}"
.format(pc, binding_path))
if not isinstance(binding[pc]["bus"], str):
_err("malformed '{}: bus:' value in {}, expected string"
.format(pc, binding_path))
self._check_binding_properties(binding, binding_path)
if "child-binding" in binding:
if not isinstance(binding["child-binding"], dict):
_err("malformed 'child-binding:' in {}, expected a binding "
"(dictionary with keys/values)".format(binding_path))
self._check_binding(binding["child-binding"], binding_path)
if "sub-node" in binding:
self._warn("'sub-node: properties: ...' in {} is deprecated and "
"will be removed - please give a full binding for the "
"child node in 'child-binding:' instead (see "
"binding-template.yaml)".format(binding_path))
if binding["sub-node"].keys() != {"properties"}:
_err("expected (just) 'properties:' in 'sub-node:' in {}"
.format(binding_path))
self._check_binding_properties(binding["sub-node"], binding_path)
if "#cells" in binding:
self._warn('"#cells:" in {} is deprecated and will be removed - '
"please put 'interrupt-cells:', 'pwm-cells:', "
"'gpio-cells:', etc., instead. The name should match "
"the name of the corresponding phandle-array property "
"(see binding-template.yaml)".format(binding_path))
def ok_cells_val(val):
# Returns True if 'val' is an okay value for '*-cells:' (or the
# legacy '#cells:')
return isinstance(val, list) and \
all(isinstance(elm, str) for elm in val)
for key, val in binding.items():
if key.endswith("-cells") or key == "#cells":
if not ok_cells_val(val):
_err("malformed '{}:' in {}, expected a list of strings"
.format(key, binding_path))
def _check_binding_properties(self, binding, binding_path):
# _check_binding() helper for checking the contents of 'properties:'.
# Only takes 'self' for the sake of self._warn().
if "properties" not in binding:
return
ok_prop_keys = {"description", "type", "required", "category",
"constraint", "enum", "const", "default"}
for prop_name, options in binding["properties"].items():
for key in options:
if key == "category":
self._warn(
"please put 'required: {}' instead of 'category: {}' "
"in properties: {}: ...' in {} - 'category' will be "
"removed".format(
"true" if options["category"] == "required"
else "false",
options["category"], prop_name, binding_path))
if key not in ok_prop_keys:
_err("unknown setting '{}' in 'properties: {}: ...' in {}, "
"expected one of {}".format(
key, prop_name, binding_path,
", ".join(ok_prop_keys)))
_check_prop_type_and_default(
prop_name, options.get("type"),
options.get("required") or options.get("category") == "required",
options.get("default"), binding_path)
if "required" in options and not isinstance(options["required"], bool):
_err("malformed 'required:' setting '{}' for '{}' in 'properties' "
"in {}, expected true/false"
.format(options["required"], prop_name, binding_path))
if "description" in options and \
not isinstance(options["description"], str):
_err("missing, malformed, or empty 'description' for '{}' in "
"'properties' in {}".format(prop_name, binding_path))
if "enum" in options and not isinstance(options["enum"], list):
_err("enum in {} for property '{}' is not a list"
.format(binding_path, prop_name))
if "const" in options and not isinstance(options["const"], (int, str)):
_err("const in {} for property '{}' is not a scalar"
.format(binding_path, prop_name))
def _warn(self, msg):
print("warning: " + msg, file=self._warn_file)
class Node:
"""
Represents a devicetree node, augmented with information from bindings, and
with some interpretation of devicetree properties. There's a one-to-one
correspondence between devicetree nodes and Nodes.
These attributes are available on Node objects:
edt:
The EDT instance this node is from
name:
The name of the node
unit_addr:
An integer with the ...@<unit-address> portion of the node name,
translated through any 'ranges' properties on parent nodes, or None if
the node name has no unit-address portion
description:
The description string from the binding for the node, or None if the node
has no binding. Leading and trailing whitespace (including newlines) is
removed.
path:
The devicetree path of the node
label:
The text from the 'label' property on the node, or None if the node has
no 'label'
labels:
A list of all of the devicetree labels for the node, in the same order
as the labels appear, but with duplicates removed.
This corresponds to the actual devicetree source labels, unlike the
"label" attribute, which is the value of a devicetree property named
"label".
parent:
The Node instance for the devicetree parent of the Node, or None if the
node is the root node
children:
A dictionary with the Node instances for the devicetree children of the
node, indexed by name
dep_ordinal:
A non-negative integer value such that the value for a Node is
less than the value for all Nodes that depend on it.
The ordinal is defined for all Nodes including those that are not
'enabled', and is unique among nodes in its EDT 'nodes' list.
required_by:
A list with the nodes that directly depend on the node
depends_on:
A list with the nodes that the node directly depends on
status:
The node's status property value, as a string, or "okay" if the node
has no status property set. If the node's status property is "ok",
it is converted to "okay" for consistency.
enabled:
True unless the node has 'status = "disabled"'
This exists only for the sake of gen_legacy_defines.py. It will probably
be removed following the Zephyr 2.3 release.
read_only:
True if the node has a 'read-only' property, and False otherwise
matching_compat:
The 'compatible' string for the binding that matched the node, or None if
the node has no binding
binding_path:
The path to the binding file for the node, or None if the node has no
binding
compats:
A list of 'compatible' strings for the node, in the same order that
they're listed in the .dts file
regs:
A list of Register objects for the node's registers
props:
A collections.OrderedDict that maps property names to Property objects.
Property objects are created for all devicetree properties on the node
that are mentioned in 'properties:' in the binding.
aliases:
A list of aliases for the node. This is fetched from the /aliases node.
interrupts:
A list of ControllerAndData objects for the interrupts generated by the
node. The list is empty if the node does not generate interrupts.
pinctrls:
A list of PinCtrl objects for the pinctrl-<index> properties on the
node, sorted by index. The list is empty if the node does not have any
pinctrl-<index> properties.
bus:
If the node is a bus node (has a 'bus:' key in its binding), then this
attribute holds the bus type, e.g. "i2c" or "spi". If the node is not a
bus node, then this attribute is None.
on_bus:
The bus the node appears on, e.g. "i2c" or "spi". The bus is determined
by searching upwards for a parent node whose binding has a 'bus:' key,
returning the value of the first 'bus:' key found. If none of the node's
parents has a 'bus:' key, this attribute is None.
bus_node:
Like on_bus, but contains the Node for the bus controller, or None if the
node is not on a bus.
flash_controller:
The flash controller for the node. Only meaningful for nodes representing
flash partitions.
spi_cs_gpio:
The device's SPI GPIO chip select as a ControllerAndData instance, if it
exists, and None otherwise. See
Documentation/devicetree/bindings/spi/spi-controller.yaml in the Linux kernel.
"""
@property
def name(self):
"See the class docstring"
return self._node.name
@property
def unit_addr(self):
"See the class docstring"
# TODO: Return a plain string here later, like dtlib.Node.unit_addr?
if "@" not in self.name:
return None
try:
addr = int(self.name.split("@", 1)[1], 16)
except ValueError:
_err("{!r} has non-hex unit address".format(self))
addr = _translate(addr, self._node)
# Matches the simple_bus_reg warning in dtc
if self.edt._warn_reg_unit_address_mismatch and \
self.regs and self.regs[0].addr != addr:
self.edt._warn("unit address and first address in 'reg' "
f"(0x{self.regs[0].addr:x}) don't match for "
f"{self.path}")
return addr
@property
def description(self):
"See the class docstring."
if self._binding and "description" in self._binding:
return self._binding["description"].strip()
return None
@property
def path(self):
"See the class docstring"
return self._node.path
@property
def label(self):
"See the class docstring"
if "label" in self._node.props:
return self._node.props["label"].to_string()
return None
@property
def labels(self):
"See the class docstring"
return self._node.labels
@property
def parent(self):
"See the class docstring"
return self.edt._node2enode.get(self._node.parent)
@property
def children(self):
"See the class docstring"
# Could be initialized statically too to preserve identity, but not
# sure if needed. Parent nodes being initialized before their children
# would need to be kept in mind.
return OrderedDict((name, self.edt._node2enode[node])
for name, node in self._node.nodes.items())
@property
def required_by(self):
"See the class docstring"
return self.edt._graph.required_by(self)
@property
def depends_on(self):
"See the class docstring"
return self.edt._graph.depends_on(self)
@property
def status(self):
"See the class docstring"
status = self._node.props.get("status")
if status is None:
as_string = "okay"
else:
as_string = status.to_string()
if as_string == "ok":
as_string = "okay"
return as_string
@property
def enabled(self):
"See the class docstring"
return "status" not in self._node.props or self.status != "disabled"
@property
def read_only(self):
"See the class docstring"
return "read-only" in self._node.props
@property
def aliases(self):
"See the class docstring"
return [alias for alias, node in self._node.dt.alias2node.items()
if node is self._node]
@property
def bus(self):
"See the class docstring"
binding = self._binding
if not binding:
return None
if "bus" in binding:
return binding["bus"]
# Legacy key
if "child-bus" in binding:
return binding["child-bus"]
# Legacy key
if "child" in binding:
# _check_binding() has checked that the "bus" key exists
return binding["child"]["bus"]
return None
@property
def on_bus(self):
"See the class docstring"
bus_node = self.bus_node
return bus_node.bus if bus_node else None
@property
def flash_controller(self):
"See the class docstring"
# The node path might be something like
# /flash-controller@4001E000/flash@0/partitions/partition@fc000. We go
# up two levels to get the flash and check its compat. The flash
# controller might be the flash itself (for cases like NOR flashes).
# For the case of 'soc-nv-flash', we assume the controller is the
# parent of the flash node.
if not self.parent or not self.parent.parent:
_err("flash partition {!r} lacks parent or grandparent node"
.format(self))
controller = self.parent.parent
if controller.matching_compat == "soc-nv-flash":
return controller.parent
return controller
@property
def spi_cs_gpio(self):
"See the class docstring"
if not (self.on_bus == "spi" and "cs-gpios" in self.bus_node.props):
return None
if not self.regs:
_err("{!r} needs a 'reg' property, to look up the chip select index "
"for SPI".format(self))
parent_cs_lst = self.bus_node.props["cs-gpios"].val
# cs-gpios is indexed by the unit address
cs_index = self.regs[0].addr
if cs_index >= len(parent_cs_lst):
_err("index from 'regs' in {!r} ({}) is >= number of cs-gpios "
"in {!r} ({})".format(
self, cs_index, self.bus_node, len(parent_cs_lst)))
return parent_cs_lst[cs_index]
def __repr__(self):
return "<Node {} in '{}', {}>".format(
self.path, self.edt.dts_path,
"binding " + self.binding_path if self.binding_path
else "no binding")
def _init_binding(self):
# Initializes Node.matching_compat, Node._binding, and
# Node.binding_path.
#
# Node._binding holds the data from the node's binding file, in the
# format returned by PyYAML (plain Python lists, dicts, etc.), or None
# if the node has no binding.
# This relies on the parent of the node having already been
# initialized, which is guaranteed by going through the nodes in
# node_iter() order.
if "compatible" in self._node.props:
self.compats = self._node.props["compatible"].to_strings()
on_bus = self.on_bus
for compat in self.compats:
if (compat, on_bus) in self.edt._compat2binding:
# Binding found
self.matching_compat = compat
self._binding, self.binding_path = \
self.edt._compat2binding[compat, on_bus]
return
else:
# No 'compatible' property. See if the parent binding has a
# 'child-binding:' key that gives the binding (or a legacy
# 'sub-node:' key).
self.compats = []
binding_from_parent = self._binding_from_parent()
if binding_from_parent:
self._binding = binding_from_parent
self.binding_path = self.parent.binding_path
self.matching_compat = self.parent.matching_compat
return
# No binding found
self._binding = self.binding_path = self.matching_compat = None
def _binding_from_parent(self):
# Returns the binding from 'child-binding:' in the parent node's
# binding (or from the legacy 'sub-node:' key), or None if missing
if not self.parent:
return None
pbinding = self.parent._binding
if not pbinding:
return None
if "child-binding" in pbinding:
return pbinding["child-binding"]
# Backwards compatibility
if "sub-node" in pbinding:
return {"title": pbinding["title"],
"description": pbinding["description"],
"properties": pbinding["sub-node"]["properties"]}
return None
def _bus_node(self):
# Returns the value for self.bus_node. Relies on parent nodes being
# initialized before their children.
if not self.parent:
# This is the root node
return None
if self.parent.bus:
# The parent node is a bus node
return self.parent
# Same bus node as parent (possibly None)
return self.parent.bus_node
def _init_props(self, default_prop_types=False):
# Creates self.props. See the class docstring. Also checks that all
# properties on the node are declared in its binding.
self.props = OrderedDict()
node = self._node
if self._binding:
binding_props = self._binding.get("properties")
else:
binding_props = None
# Initialize self.props
if binding_props:
for name, options in binding_props.items():
self._init_prop(name, options)
self._check_undeclared_props()
elif default_prop_types:
for name in node.props:
if name in _DEFAULT_PROP_TYPES:
prop_type = _DEFAULT_PROP_TYPES[name]
val = self._prop_val(name, prop_type, False, None)
prop = Property()
prop.node = self
prop.name = name
prop.description = None
prop.val = val
prop.type = prop_type
# We don't set enum_index for "compatible"
prop.enum_index = None
self.props[name] = prop
def _init_prop(self, name, options):
# _init_props() helper for initializing a single property
prop_type = options.get("type")
if not prop_type:
_err("'{}' in {} lacks 'type'".format(name, self.binding_path))
val = self._prop_val(
name, prop_type,
options.get("required") or options.get("category") == "required",
options.get("default"))
if val is None:
# 'required: false' property that wasn't there, or a property type
# for which we store no data.
return
enum = options.get("enum")
if enum and val not in enum:
_err("value of property '{}' on {} in {} ({!r}) is not in 'enum' "
"list in {} ({!r})"
.format(name, self.path, self.edt.dts_path, val,
self.binding_path, enum))
const = options.get("const")
if const is not None and val != const:
_err("value of property '{}' on {} in {} ({!r}) is different from "
"the 'const' value specified in {} ({!r})"
.format(name, self.path, self.edt.dts_path, val,
self.binding_path, const))
# Skip properties that start with '#', like '#size-cells', and mapping
# properties like 'gpio-map'/'interrupt-map'
if name[0] == "#" or name.endswith("-map"):
return
prop = Property()
prop.node = self
prop.name = name
prop.description = options.get("description")
if prop.description:
prop.description = prop.description.strip()
prop.val = val
prop.type = prop_type
prop.enum_index = None if enum is None else enum.index(val)
self.props[name] = prop
def _prop_val(self, name, prop_type, required, default):
# _init_prop() helper for getting the property's value
#
# name:
# Property name from binding
#
# prop_type:
# Property type from binding (a string like "int")
#
# optional:
# True if the property isn't required to exist
#
# default:
# Default value to use when the property doesn't exist, or None if
# the binding doesn't give a default value
node = self._node
prop = node.props.get(name)
if not prop:
if required and self.enabled:
_err("'{}' is marked as required in 'properties:' in {}, but "
"does not appear in {!r}".format(
name, self.binding_path, node))
if default is not None:
# YAML doesn't have a native format for byte arrays. We need to
# convert those from an array like [0x12, 0x34, ...]. The
# format has already been checked in
# _check_prop_type_and_default().
if prop_type == "uint8-array":
return bytes(default)
return default
return False if prop_type == "boolean" else None
if prop_type == "boolean":
if prop.type is not TYPE_EMPTY:
_err("'{0}' in {1!r} is defined with 'type: boolean' in {2}, "
"but is assigned a value ('{3}') instead of being empty "
"('{0};')".format(name, node, self.binding_path, prop))
return True
if prop_type == "int":
return prop.to_num()
if prop_type == "array":
return prop.to_nums()
if prop_type == "uint8-array":
return prop.to_bytes()
if prop_type == "string":
return prop.to_string()
if prop_type == "string-array":
return prop.to_strings()
if prop_type == "phandle":
return self.edt._node2enode[prop.to_node()]
if prop_type == "phandles":
return [self.edt._node2enode[node] for node in prop.to_nodes()]
if prop_type == "phandle-array":
# This type is a bit high-level for dtlib as it involves
# information from bindings and *-names properties, so there's no
# to_phandle_array() in dtlib. Do the type check ourselves.
if prop.type not in (TYPE_PHANDLE, TYPE_PHANDLES_AND_NUMS):
_err("expected property '{0}' in {1} in {2} to be assigned "
"with '{0} = < &foo 1 2 ... &bar 3 4 ... >' (a mix of "
"phandles and numbers), not '{3}'"
.format(name, node.path, node.dt.filename, prop))
return self._standard_phandle_val_list(prop)
if prop_type == "path":
return self.edt._node2enode[prop.to_path()]
# prop_type == "compound". We have already checked that the 'type:'
# value is valid, in _check_binding().
#
# 'compound' is a dummy type for properties that don't fit any of the
# patterns above, so that we can require all entries in 'properties:'
# to have a 'type: ...'. No Property object is created for it.
return None
def _check_undeclared_props(self):
# Checks that all properties are declared in the binding
if "properties" in self._binding:
declared_props = self._binding["properties"].keys()
else:
declared_props = set()
for prop_name in self._node.props:
# Allow a few special properties to not be declared in the binding
if prop_name.endswith("-controller") or \
prop_name.startswith("#") or \
prop_name.startswith("pinctrl-") or \
prop_name in {
"compatible", "status", "ranges", "phandle",
"interrupt-parent", "interrupts-extended", "device_type"}:
continue
if prop_name not in declared_props:
_err("'{}' appears in {} in {}, but is not declared in "
"'properties:' in {}"
.format(prop_name, self._node.path, self.edt.dts_path,
self.binding_path))
def _init_regs(self):
# Initializes self.regs
node = self._node
self.regs = []
if "reg" not in node.props:
return
address_cells = _address_cells(node)
size_cells = _size_cells(node)
for raw_reg in _slice(node, "reg", 4*(address_cells + size_cells),
"4*(<#address-cells> (= {}) + <#size-cells> (= {}))"
.format(address_cells, size_cells)):
reg = Register()
reg.node = self
if address_cells == 0:
reg.addr = None
else:
reg.addr = _translate(to_num(raw_reg[:4*address_cells]), node)
if size_cells == 0:
reg.size = None
else:
reg.size = to_num(raw_reg[4*address_cells:])
if size_cells != 0 and reg.size == 0:
_err("zero-sized 'reg' in {!r} seems meaningless (maybe you "
"want a size of one or #size-cells = 0 instead)"
.format(self._node))
self.regs.append(reg)
_add_names(node, "reg", self.regs)
def _init_pinctrls(self):
# Initializes self.pinctrls from any pinctrl-<index> properties
node = self._node
# pinctrl-<index> properties
pinctrl_props = [prop for name, prop in node.props.items()
if re.match("pinctrl-[0-9]+", name)]
# Sort by index
pinctrl_props.sort(key=lambda prop: prop.name)
# Check indices
for i, prop in enumerate(pinctrl_props):
if prop.name != "pinctrl-" + str(i):
_err("missing 'pinctrl-{}' property on {!r} - indices should "
"be contiguous and start from zero".format(i, node))
self.pinctrls = []
for prop in pinctrl_props:
pinctrl = PinCtrl()
pinctrl.node = self
pinctrl.conf_nodes = [
self.edt._node2enode[node] for node in prop.to_nodes()
]
self.pinctrls.append(pinctrl)
_add_names(node, "pinctrl", self.pinctrls)
def _init_interrupts(self):
# Initializes self.interrupts
node = self._node
self.interrupts = []
for controller_node, data in _interrupts(node):
interrupt = ControllerAndData()
interrupt.node = self
interrupt.controller = self.edt._node2enode[controller_node]
interrupt.data = self._named_cells(interrupt.controller, data,
"interrupt")
self.interrupts.append(interrupt)
_add_names(node, "interrupt", self.interrupts)
def _standard_phandle_val_list(self, prop):
# Parses a property like
#
# <name>s = <phandle value phandle value ...>
# (e.g., pwms = <&foo 1 2 &bar 3 4>)
#
# , where each phandle points to a node that has a
#
# #<name>-cells = <size>
#
# property that gives the number of cells in the value after the
# phandle. These values are given names in *-cells in the binding for
# the controller.
#
# Also parses any
#
# <name>-names = "...", "...", ...
#
# Returns a list of ControllerAndData instances.
if prop.name.endswith("gpios"):
# There's some slight special-casing for *-gpios properties in that
# e.g. foo-gpios still maps to #gpio-cells rather than
# #foo-gpio-cells
basename = "gpio"
else:
# Strip -s. We've already checked that the property names end in -s
# in _check_binding().
basename = prop.name[:-1]
res = []
for controller_node, data in _phandle_val_list(prop, basename):
mapped_controller, mapped_data = \
_map_phandle_array_entry(prop.node, controller_node, data,
basename)
entry = ControllerAndData()
entry.node = self
entry.controller = self.edt._node2enode[mapped_controller]
entry.data = self._named_cells(entry.controller, mapped_data,
basename)
res.append(entry)
_add_names(self._node, basename, res)
return res
def _named_cells(self, controller, data, basename):
# Returns a dictionary that maps <basename>-cells names given in the
# binding for 'controller' to cell values. 'data' is the raw data, as a
# byte array.
if not controller._binding:
_err("{} controller {!r} for {!r} lacks binding"
.format(basename, controller._node, self._node))
if basename + "-cells" in controller._binding:
cell_names = controller._binding[basename + "-cells"]
elif "#cells" in controller._binding:
# Backwards compatibility
cell_names = controller._binding["#cells"]
else:
# Treat no *-cells in the binding the same as an empty *-cells, so
# that bindings don't have to have e.g. an empty 'clock-cells:' for
# '#clock-cells = <0>'.
cell_names = []
data_list = to_nums(data)
if len(data_list) != len(cell_names):
_err("unexpected '{}-cells:' length in binding for {!r} - {} "
"instead of {}"
.format(basename, controller._node, len(cell_names),
len(data_list)))
return OrderedDict(zip(cell_names, data_list))
class Register:
"""
Represents a register on a node.
These attributes are available on Register objects:
node:
The Node instance this register is from
name:
The name of the register as given in the 'reg-names' property, or None if
there is no 'reg-names' property
addr:
The starting address of the register, in the parent address space, or None
if #address-cells is zero. Any 'ranges' properties are taken into account.
size:
The length of the register in bytes
"""
def __repr__(self):
fields = []
if self.name is not None:
fields.append("name: " + self.name)
if self.addr is not None:
fields.append("addr: " + hex(self.addr))
if self.size is not None:
fields.append("size: " + hex(self.size))
return "<Register, {}>".format(", ".join(fields))
class ControllerAndData:
"""
Represents an entry in an 'interrupts' or 'type: phandle-array' property
value, e.g. <&ctrl-1 4 0> in
cs-gpios = <&ctrl-1 4 0 &ctrl-2 3 4>;
These attributes are available on ControllerAndData objects:
node:
The Node instance the property appears on
controller:
The Node instance for the controller (e.g. the controller the interrupt
gets sent to for interrupts)
data:
A dictionary that maps names from the *-cells key in the binding for the
controller to data values, e.g. {"pin": 4, "flags": 0} for the example
above.
'interrupts = <1 2>' might give {"irq": 1, "level": 2}.
name:
The name of the entry as given in
'interrupt-names'/'gpio-names'/'pwm-names'/etc., or None if there is no
*-names property
"""
def __repr__(self):
fields = []
if self.name is not None:
fields.append("name: " + self.name)
fields.append("controller: {}".format(self.controller))
fields.append("data: {}".format(self.data))
return "<ControllerAndData, {}>".format(", ".join(fields))
class PinCtrl:
"""
Represents a pin control configuration for a set of pins on a device,
e.g. pinctrl-0 or pinctrl-1.
These attributes are available on PinCtrl objects:
node:
The Node instance the pinctrl-* property is on
name:
The name of the configuration, as given in pinctrl-names, or None if
there is no pinctrl-names property
conf_nodes:
A list of Node instances for the pin configuration nodes, e.g.
the nodes pointed at by &state_1 and &state_2 in
pinctrl-0 = <&state_1 &state_2>;
"""
def __repr__(self):
fields = []
if self.name is not None:
fields.append("name: " + self.name)
fields.append("configuration nodes: " + str(self.conf_nodes))
return "<PinCtrl, {}>".format(", ".join(fields))
class Property:
"""
Represents a property on a Node, as set in its DT node and with
additional info from the 'properties:' section of the binding.
Only properties mentioned in 'properties:' get created. Properties of type
'compound' currently do not get Property instances, as I'm not sure what
information to store for them.
These attributes are available on Property objects:
node:
The Node instance the property is on
name:
The name of the property
description:
The description string from the property as given in the binding, or None
if missing. Leading and trailing whitespace (including newlines) is
removed.
type:
A string with the type of the property, as given in the binding.
val:
The value of the property, with the format determined by the 'type:' key
from the binding.
- For 'type: int/array/string/string-array', 'val' is what you'd expect
(a Python integer or string, or a list of them)
- For 'type: phandle' and 'type: path', 'val' is the pointed-to Node
instance
- For 'type: phandles', 'val' is a list of the pointed-to Node
instances
- For 'type: phandle-array', 'val' is a list of ControllerAndData
instances. See the documentation for that class.
enum_index:
The index of the property's value in the 'enum:' list in the binding, or
None if the binding has no 'enum:'
"""
def __repr__(self):
fields = ["name: " + self.name,
# repr() to deal with lists
"type: " + self.type,
"value: " + repr(self.val)]
if self.enum_index is not None:
fields.append("enum index: {}".format(self.enum_index))
return "<Property, {}>".format(", ".join(fields))
class EDTError(Exception):
"Exception raised for devicetree- and binding-related errors"
#
# Private global functions
#
def _dt_compats(dt):
# Returns a set() with all 'compatible' strings in the devicetree
# represented by dt (a dtlib.DT instance)
return {compat
for node in dt.node_iter()
if "compatible" in node.props
for compat in node.props["compatible"].to_strings()}
def _binding_paths(bindings_dirs):
# Returns a list with the paths to all bindings (.yaml files) in
# 'bindings_dirs'
binding_paths = []
for bindings_dir in bindings_dirs:
for root, _, filenames in os.walk(bindings_dir):
for filename in filenames:
if filename.endswith(".yaml"):
binding_paths.append(os.path.join(root, filename))
return binding_paths
def _on_bus_from_binding(binding):
# Returns the bus specified by 'on-bus:' in the binding (or the
# legacy 'parent-bus:' and 'parent: bus:'), or None if missing
if not binding:
return None
if "on-bus" in binding:
return binding["on-bus"]
# Legacy key
if "parent-bus" in binding:
return binding["parent-bus"]
# Legacy key
if "parent" in binding:
# _check_binding() has checked that the "bus" key exists
return binding["parent"]["bus"]
return None
def _binding_inc_error(msg):
# Helper for reporting errors in the !include implementation
raise yaml.constructor.ConstructorError(None, None, "error: " + msg)
def _merge_props(to_dict, from_dict, parent, binding_path, check_required):
# Recursively merges 'from_dict' into 'to_dict', to implement 'include:'.
#
# If 'from_dict' and 'to_dict' contain a 'required:' key for the same
# property, then the values are ORed together.
#
# If 'check_required' is True, then an error is raised if 'from_dict' has
# 'required: true' while 'to_dict' has 'required: false'. This prevents
# bindings from "downgrading" requirements from bindings they include,
# which might help keep bindings well-organized.
#
# It's an error for most other keys to appear in both 'from_dict' and
# 'to_dict'. When it's not an error, the value in 'to_dict' takes
# precedence.
#
# 'parent' is the name of the parent key containing 'to_dict' and
# 'from_dict', and 'binding_path' is the path to the top-level binding.
# These are used to generate errors for sketchy property overwrites.
for prop in from_dict:
if isinstance(to_dict.get(prop), dict) and \
isinstance(from_dict[prop], dict):
_merge_props(to_dict[prop], from_dict[prop], prop, binding_path,
check_required)
elif prop not in to_dict:
to_dict[prop] = from_dict[prop]
elif _bad_overwrite(to_dict, from_dict, prop, check_required):
_err("{} (in '{}'): '{}' from included file overwritten "
"('{}' replaced with '{}')".format(
binding_path, parent, prop, from_dict[prop],
to_dict[prop]))
elif prop == "required":
# Need a separate check here, because this code runs before
# _check_binding()
if not (isinstance(from_dict["required"], bool) and
isinstance(to_dict["required"], bool)):
_err("malformed 'required:' setting for '{}' in 'properties' "
"in {}, expected true/false".format(parent, binding_path))
# 'required: true' takes precedence
to_dict["required"] = to_dict["required"] or from_dict["required"]
elif prop == "category":
# Legacy property key. 'category: required' takes precedence.
if "required" in (to_dict["category"], from_dict["category"]):
to_dict["category"] = "required"
def _bad_overwrite(to_dict, from_dict, prop, check_required):
# _merge_props() helper. Returns True in cases where it's bad that
# to_dict[prop] takes precedence over from_dict[prop].
if to_dict[prop] == from_dict[prop]:
return False
# These are overridden deliberately
if prop in {"title", "description", "compatible"}:
return False
if prop == "required":
if not check_required:
return False
return from_dict[prop] and not to_dict[prop]
# Legacy property key
if prop == "category":
if not check_required:
return False
return from_dict[prop] == "required" and to_dict[prop] == "optional"
return True
def _binding_include(loader, node):
# Implements !include, for backwards compatibility. '!include [foo, bar]'
# just becomes [foo, bar].
if isinstance(node, yaml.ScalarNode):
# !include foo.yaml
return [loader.construct_scalar(node)]
if isinstance(node, yaml.SequenceNode):
# !include [foo.yaml, bar.yaml]
return loader.construct_sequence(node)
_binding_inc_error("unrecognised node type in !include statement")
def _check_prop_type_and_default(prop_name, prop_type, required, default,
binding_path):
# _check_binding() helper. Checks 'type:' and 'default:' for the property
# named 'prop_name'
if prop_type is None:
_err("missing 'type:' for '{}' in 'properties' in {}"
.format(prop_name, binding_path))
ok_types = {"boolean", "int", "array", "uint8-array", "string",
"string-array", "phandle", "phandles", "phandle-array",
"path", "compound"}
if prop_type not in ok_types:
_err("'{}' in 'properties:' in {} has unknown type '{}', expected one "
"of {}".format(prop_name, binding_path, prop_type,
", ".join(ok_types)))
if prop_type == "phandle-array" and not prop_name.endswith("s"):
_err("'{}' in 'properties:' in {} is 'type: phandle-array', but its "
"name does not end in -s. This is required since property names "
"like '#pwm-cells' and 'pwm-names' get derived from 'pwms', for "
"example.".format(prop_name, binding_path))
# Check default
if default is None:
return
if prop_type in {"boolean", "compound", "phandle", "phandles",
"phandle-array", "path"}:
_err("'default:' can't be combined with 'type: {}' for '{}' in "
"'properties:' in {}".format(prop_type, prop_name, binding_path))
def ok_default():
# Returns True if 'default' is an okay default for the property's type
if prop_type == "int" and isinstance(default, int) or \
prop_type == "string" and isinstance(default, str):
return True
# array, uint8-array, or string-array
if not isinstance(default, list):
return False
if prop_type == "array" and \
all(isinstance(val, int) for val in default):
return True
if prop_type == "uint8-array" and \
all(isinstance(val, int) and 0 <= val <= 255 for val in default):
return True
# string-array
return all(isinstance(val, str) for val in default)
if not ok_default():
_err("'default: {}' is invalid for '{}' in 'properties:' in {}, which "
"has type {}".format(default, prop_name, binding_path, prop_type))
def _translate(addr, node):
# Recursively translates 'addr' on 'node' to the address space(s) of its
# parent(s), by looking at 'ranges' properties. Returns the translated
# address.
#
# node:
# dtlib.Node instance
if not node.parent or "ranges" not in node.parent.props:
# No translation
return addr
if not node.parent.props["ranges"].value:
# DT spec.: "If the property is defined with an <empty> value, it
# specifies that the parent and child address space is identical, and
# no address translation is required."
#
# Treat this the same as a 'range' that explicitly does a one-to-one
# mapping, as opposed to there not being any translation.
return _translate(addr, node.parent)
# Gives the size of each component in a translation 3-tuple in 'ranges'
child_address_cells = _address_cells(node)
parent_address_cells = _address_cells(node.parent)
child_size_cells = _size_cells(node)
# Number of cells for one translation 3-tuple in 'ranges'
entry_cells = child_address_cells + parent_address_cells + child_size_cells
for raw_range in _slice(node.parent, "ranges", 4*entry_cells,
"4*(<#address-cells> (= {}) + "
"<#address-cells for parent> (= {}) + "
"<#size-cells> (= {}))"
.format(child_address_cells, parent_address_cells,
child_size_cells)):
child_addr = to_num(raw_range[:4*child_address_cells])
raw_range = raw_range[4*child_address_cells:]
parent_addr = to_num(raw_range[:4*parent_address_cells])
raw_range = raw_range[4*parent_address_cells:]
child_len = to_num(raw_range)
if child_addr <= addr < child_addr + child_len:
# 'addr' is within range of a translation in 'ranges'. Recursively
# translate it and return the result.
return _translate(parent_addr + addr - child_addr, node.parent)
# 'addr' is not within range of any translation in 'ranges'
return addr
def _add_names(node, names_ident, objs):
# Helper for registering names from <foo>-names properties.
#
# node:
# edtlib.Node instance
#
# names-ident:
# The <foo> part of <foo>-names, e.g. "reg" for "reg-names"
#
# objs:
# list of objects whose .name field should be set
full_names_ident = names_ident + "-names"
if full_names_ident in node.props:
names = node.props[full_names_ident].to_strings()
if len(names) != len(objs):
_err("{} property in {} in {} has {} strings, expected {} strings"
.format(full_names_ident, node.path, node.dt.filename,
len(names), len(objs)))
for obj, name in zip(objs, names):
obj.name = name
else:
for obj in objs:
obj.name = None
def _interrupt_parent(node):
# Returns the node pointed at by the closest 'interrupt-parent', searching
# the parents of 'node'. As of writing, this behavior isn't specified in
# the DT spec., but seems to match what some .dts files except.
while node:
if "interrupt-parent" in node.props:
return node.props["interrupt-parent"].to_node()
node = node.parent
_err("{!r} has an 'interrupts' property, but neither the node nor any "
"of its parents has an 'interrupt-parent' property".format(node))
def _interrupts(node):
# Returns a list of (<controller>, <data>) tuples, with one tuple per
# interrupt generated by 'node'. <controller> is the destination of the
# interrupt (possibly after mapping through an 'interrupt-map'), and <data>
# the data associated with the interrupt (as a 'bytes' object).
# Takes precedence over 'interrupts' if both are present
if "interrupts-extended" in node.props:
prop = node.props["interrupts-extended"]
return [_map_interrupt(node, iparent, spec)
for iparent, spec in _phandle_val_list(prop, "interrupt")]
if "interrupts" in node.props:
# Treat 'interrupts' as a special case of 'interrupts-extended', with
# the same interrupt parent for all interrupts
iparent = _interrupt_parent(node)
interrupt_cells = _interrupt_cells(iparent)
return [_map_interrupt(node, iparent, raw)
for raw in _slice(node, "interrupts", 4*interrupt_cells,
"4*<#interrupt-cells>")]
return []
def _map_interrupt(child, parent, child_spec):
# Translates an interrupt headed from 'child' to 'parent' with data
# 'child_spec' through any 'interrupt-map' properties. Returns a
# (<controller>, <data>) tuple with the final destination after mapping.
if "interrupt-controller" in parent.props:
return (parent, child_spec)
def own_address_cells(node):
# Used for parents pointed at by 'interrupt-map'. We can't use
# _address_cells(), because it's the #address-cells property on 'node'
# itself that matters.
address_cells = node.props.get("#address-cells")
if not address_cells:
_err("missing #address-cells on {!r} (while handling interrupt-map)"
.format(node))
return address_cells.to_num()
def spec_len_fn(node):
# Can't use _address_cells() here, because it's the #address-cells
# property on 'node' itself that matters
return own_address_cells(node) + _interrupt_cells(node)
parent, raw_spec = _map(
"interrupt", child, parent, _raw_unit_addr(child) + child_spec,
spec_len_fn, require_controller=True)
# Strip the parent unit address part, if any
return (parent, raw_spec[4*own_address_cells(parent):])
def _map_phandle_array_entry(child, parent, child_spec, basename):
# Returns a (<controller>, <data>) tuple with the final destination after
# mapping through any '<basename>-map' (e.g. gpio-map) properties. See
# _map_interrupt().
def spec_len_fn(node):
prop_name = "#{}-cells".format(basename)
if prop_name not in node.props:
_err("expected '{}' property on {!r} (referenced by {!r})"
.format(prop_name, node, child))
return node.props[prop_name].to_num()
# Do not require <prefix>-controller for anything but interrupts for now
return _map(basename, child, parent, child_spec, spec_len_fn,
require_controller=False)
def _map(prefix, child, parent, child_spec, spec_len_fn, require_controller):
# Common code for mapping through <prefix>-map properties, e.g.
# interrupt-map and gpio-map.
#
# prefix:
# The prefix, e.g. "interrupt" or "gpio"
#
# child:
# The "sender", e.g. the node with 'interrupts = <...>'
#
# parent:
# The "receiver", e.g. a node with 'interrupt-map = <...>' or
# 'interrupt-controller' (no mapping)
#
# child_spec:
# The data associated with the interrupt/GPIO/etc., as a 'bytes' object,
# e.g. <1 2> for 'foo-gpios = <&gpio1 1 2>'.
#
# spec_len_fn:
# Function called on a parent specified in a *-map property to get the
# length of the parent specifier (data after phandle in *-map), in cells
#
# require_controller:
# If True, the final controller node after mapping is required to have
# to have a <prefix>-controller property.
map_prop = parent.props.get(prefix + "-map")
if not map_prop:
if require_controller and prefix + "-controller" not in parent.props:
_err("expected '{}-controller' property on {!r} "
"(referenced by {!r})".format(prefix, parent, child))
# No mapping
return (parent, child_spec)
masked_child_spec = _mask(prefix, child, parent, child_spec)
raw = map_prop.value
while raw:
if len(raw) < len(child_spec):
_err("bad value for {!r}, missing/truncated child data"
.format(map_prop))
child_spec_entry = raw[:len(child_spec)]
raw = raw[len(child_spec):]
if len(raw) < 4:
_err("bad value for {!r}, missing/truncated phandle"
.format(map_prop))
phandle = to_num(raw[:4])
raw = raw[4:]
# Parent specified in *-map
map_parent = parent.dt.phandle2node.get(phandle)
if not map_parent:
_err("bad phandle ({}) in {!r}".format(phandle, map_prop))
map_parent_spec_len = 4*spec_len_fn(map_parent)
if len(raw) < map_parent_spec_len:
_err("bad value for {!r}, missing/truncated parent data"
.format(map_prop))
parent_spec = raw[:map_parent_spec_len]
raw = raw[map_parent_spec_len:]
# Got one *-map row. Check if it matches the child data.
if child_spec_entry == masked_child_spec:
# Handle *-map-pass-thru
parent_spec = _pass_thru(
prefix, child, parent, child_spec, parent_spec)
# Found match. Recursively map and return it.
return _map(prefix, parent, map_parent, parent_spec, spec_len_fn,
require_controller)
_err("child specifier for {!r} ({}) does not appear in {!r}"
.format(child, child_spec, map_prop))
def _mask(prefix, child, parent, child_spec):
# Common code for handling <prefix>-mask properties, e.g. interrupt-mask.
# See _map() for the parameters.
mask_prop = parent.props.get(prefix + "-map-mask")
if not mask_prop:
# No mask
return child_spec
mask = mask_prop.value
if len(mask) != len(child_spec):
_err("{!r}: expected '{}-mask' in {!r} to be {} bytes, is {} bytes"
.format(child, prefix, parent, len(child_spec), len(mask)))
return _and(child_spec, mask)
def _pass_thru(prefix, child, parent, child_spec, parent_spec):
# Common code for handling <prefix>-map-thru properties, e.g.
# interrupt-pass-thru.
#
# parent_spec:
# The parent data from the matched entry in the <prefix>-map property
#
# See _map() for the other parameters.
pass_thru_prop = parent.props.get(prefix + "-map-pass-thru")
if not pass_thru_prop:
# No pass-thru
return parent_spec
pass_thru = pass_thru_prop.value
if len(pass_thru) != len(child_spec):
_err("{!r}: expected '{}-map-pass-thru' in {!r} to be {} bytes, is {} bytes"
.format(child, prefix, parent, len(child_spec), len(pass_thru)))
res = _or(_and(child_spec, pass_thru),
_and(parent_spec, _not(pass_thru)))
# Truncate to length of parent spec.
return res[-len(parent_spec):]
def _raw_unit_addr(node):
# _map_interrupt() helper. Returns the unit address (derived from 'reg' and
# #address-cells) as a raw 'bytes'
if 'reg' not in node.props:
_err("{!r} lacks 'reg' property (needed for 'interrupt-map' unit "
"address lookup)".format(node))
addr_len = 4*_address_cells(node)
if len(node.props['reg'].value) < addr_len:
_err("{!r} has too short 'reg' property (while doing 'interrupt-map' "
"unit address lookup)".format(node))
return node.props['reg'].value[:addr_len]
def _and(b1, b2):
# Returns the bitwise AND of the two 'bytes' objects b1 and b2. Pads
# with ones on the left if the lengths are not equal.
# Pad on the left, to equal length
maxlen = max(len(b1), len(b2))
return bytes(x & y for x, y in zip(b1.rjust(maxlen, b'\xff'),
b2.rjust(maxlen, b'\xff')))
def _or(b1, b2):
# Returns the bitwise OR of the two 'bytes' objects b1 and b2. Pads with
# zeros on the left if the lengths are not equal.
# Pad on the left, to equal length
maxlen = max(len(b1), len(b2))
return bytes(x | y for x, y in zip(b1.rjust(maxlen, b'\x00'),
b2.rjust(maxlen, b'\x00')))
def _not(b):
# Returns the bitwise not of the 'bytes' object 'b'
# ANDing with 0xFF avoids negative numbers
return bytes(~x & 0xFF for x in b)
def _phandle_val_list(prop, n_cells_name):
# Parses a '<phandle> <value> <phandle> <value> ...' value. The number of
# cells that make up each <value> is derived from the node pointed at by
# the preceding <phandle>.
#
# prop:
# dtlib.Property with value to parse
#
# n_cells_name:
# The <name> part of the #<name>-cells property to look for on the nodes
# the phandles point to, e.g. "gpio" for #gpio-cells.
#
# Returns a list of (<node>, <value>) tuples, where <node> is the node
# pointed at by <phandle>.
full_n_cells_name = "#{}-cells".format(n_cells_name)
res = []
raw = prop.value
while raw:
if len(raw) < 4:
# Not enough room for phandle
_err("bad value for " + repr(prop))
phandle = to_num(raw[:4])
raw = raw[4:]
node = prop.node.dt.phandle2node.get(phandle)
if not node:
_err("bad phandle in " + repr(prop))
if full_n_cells_name not in node.props:
_err("{!r} lacks {}".format(node, full_n_cells_name))
n_cells = node.props[full_n_cells_name].to_num()
if len(raw) < 4*n_cells:
_err("missing data after phandle in " + repr(prop))
res.append((node, raw[:4*n_cells]))
raw = raw[4*n_cells:]
return res
def _address_cells(node):
# Returns the #address-cells setting for 'node', giving the number of <u32>
# cells used to encode the address in the 'reg' property
if "#address-cells" in node.parent.props:
return node.parent.props["#address-cells"].to_num()
return 2 # Default value per DT spec.
def _size_cells(node):
# Returns the #size-cells setting for 'node', giving the number of <u32>
# cells used to encode the size in the 'reg' property
if "#size-cells" in node.parent.props:
return node.parent.props["#size-cells"].to_num()
return 1 # Default value per DT spec.
def _interrupt_cells(node):
# Returns the #interrupt-cells property value on 'node', erroring out if
# 'node' has no #interrupt-cells property
if "#interrupt-cells" not in node.props:
_err("{!r} lacks #interrupt-cells".format(node))
return node.props["#interrupt-cells"].to_num()
def _slice(node, prop_name, size, size_hint):
# Splits node.props[prop_name].value into 'size'-sized chunks, returning a
# list of chunks. Raises EDTError if the length of the property is not
# evenly divisible by 'size'. 'size_hint' is a string shown on errors that
# gives a hint on how 'size' was calculated.
raw = node.props[prop_name].value
if len(raw) % size:
_err("'{}' property in {!r} has length {}, which is not evenly "
"divisible by {} (= {}). Note that #*-cells "
"properties come either from the parent node or from the "
"controller (in the case of 'interrupts')."
.format(prop_name, node, len(raw), size, size_hint))
return [raw[i:i + size] for i in range(0, len(raw), size)]
def _check_dt(dt):
# Does devicetree sanity checks. dtlib is meant to be general and
# anything-goes except for very special properties like phandle, but in
# edtlib we can be pickier.
# Check that 'status' has one of the values given in the devicetree spec.
# Accept "ok" for backwards compatibility
ok_status = {"ok", "okay", "disabled", "reserved", "fail", "fail-sss"}
for node in dt.node_iter():
if "status" in node.props:
try:
status_val = node.props["status"].to_string()
except DTError as e:
# The error message gives the path
_err(str(e))
if status_val not in ok_status:
_err("unknown 'status' value \"{}\" in {} in {}, expected one "
"of {} (see the devicetree specification)"
.format(status_val, node.path, node.dt.filename,
", ".join(ok_status)))
ranges_prop = node.props.get("ranges")
if ranges_prop:
if ranges_prop.type not in (TYPE_EMPTY, TYPE_NUMS):
_err("expected 'ranges = < ... >;' in {} in {}, not '{}' "
"(see the devicetree specification)"
.format(node.path, node.dt.filename, ranges_prop))
def _err(msg):
raise EDTError(msg)
# Custom PyYAML binding loader class to avoid modifying yaml.Loader directly,
# which could interfere with YAML loading in clients
class _BindingLoader(Loader):
pass
# Add legacy '!include foo.yaml' handling
_BindingLoader.add_constructor("!include", _binding_include)
# Use OrderedDict instead of plain dict for YAML mappings, to preserve
# insertion order on Python 3.5 and earlier (plain dicts only preserve
# insertion order on Python 3.6+). This makes testing easier and avoids
# surprises.
#
# Adapted from
# https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts.
# Hopefully this API stays stable.
_BindingLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: OrderedDict(loader.construct_pairs(node)))
_DEFAULT_PROP_TYPES = {
"compatible": "string-array",
"status": "string",
"reg": "array",
"reg-names": "string-array",
"label": "string",
"interrupt": "array",
"interrupts-extended": "compound",
"interrupt-names": "string-array",
"interrupt-controller": "boolean",
}
| 36.346154
| 103
| 0.593216
|
60b069c93be56b6237f05c129061341d2a2dd8d2
| 4,356
|
py
|
Python
|
pcg_gazebo/parsers/types/custom.py
|
argenos/pcg_gazebo
|
643a2669b60419ce34d700d5c75fcca7273fd137
|
[
"ECL-2.0",
"Apache-2.0"
] | 40
|
2020-02-04T18:16:49.000Z
|
2022-02-22T11:36:34.000Z
|
pcg_gazebo/parsers/types/custom.py
|
argenos/pcg_gazebo
|
643a2669b60419ce34d700d5c75fcca7273fd137
|
[
"ECL-2.0",
"Apache-2.0"
] | 75
|
2020-01-23T13:40:50.000Z
|
2022-02-09T07:26:01.000Z
|
pcg_gazebo/parsers/types/custom.py
|
argenos/pcg_gazebo
|
643a2669b60419ce34d700d5c75fcca7273fd137
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2020-09-10T06:35:41.000Z
|
2022-02-20T19:08:17.000Z
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import XMLBase
from lxml.etree import Element, SubElement
class XMLCustom(XMLBase):
_NAME = ''
def __init__(self, default=dict()):
XMLBase.__init__(self)
assert isinstance(default, dict), 'The default input must be a dict'
self._default = default
self._value = default
def _set_value(self, value):
assert isinstance(
value, dict), 'Input value must be a dict,' \
' type={}, input={}'.format(
self._NAME, value)
self._value = value
def _get_elem_as_xml(self, xml_elem, value):
if isinstance(value, dict):
for tag in value:
if isinstance(value[tag], dict) and \
'attributes' in value[tag] and \
'value' in value[tag]:
child = Element(tag, attrib=value[tag]['attributes'])
self._get_elem_as_xml(child, value[tag]['value'])
xml_elem.append(child)
elif tag.startswith('@'):
xml_elem.set(tag.replace('@', ''), value[tag])
else:
child = Element(tag)
self._get_elem_as_xml(child, value[tag])
xml_elem.append(child)
elif isinstance(value, bool) or isinstance(value, int):
xml_elem.text = '{}'.format(int(value))
elif isinstance(value, float) or isinstance(value, str):
xml_elem.text = '{}'.format(value)
elif isinstance(value, list):
output_str = ' '.join(['{}'] * len(value))
xml_elem.text = output_str.format(*value)
return xml_elem
def reset(self, mode=None, with_optional_elements=False):
self._value = self._default
XMLBase.reset(self)
def is_valid(self):
if not isinstance(self._value, dict):
print('Value must be a dict')
return False
return True
def get_formatted_value_as_str(self):
assert self.is_valid(), 'Invalid scalar value'
return '{}'.format(self._value)
def to_xml(self, root=None, version='1.6'):
assert self.is_valid(), 'XML data is invalid'
if root is None:
base = Element(self._NAME, attrib=self.attributes)
else:
base = SubElement(root, self._NAME, attrib=self.attributes)
self._get_elem_as_xml(base, self._value)
return base
def is_value(self, tag):
return tag in self._value
def find_values(self, pattern):
output_tags = list()
for tag in self._value:
if pattern == self._value[tag]:
output_tags.append(tag)
return output_tags
def replace_parameter_value(self, old_value, new_value):
self._replace_value_in_dict(self._value, old_value, new_value)
def from_dict(self, sdf_data, ignore_tags=list()):
# For custom XML blocks that contain attributes but
# are not standard SDF/URDF elements
if 'attributes' in sdf_data and 'value' in sdf_data:
for tag in sdf_data['attributes']:
self._attributes[tag] = str(sdf_data['attributes'][tag])
# Set value
setattr(self, 'value', sdf_data['value'])
else:
super().from_dict(sdf_data, ignore_tags)
@staticmethod
def _replace_value_in_dict(data, old_value, new_value):
for tag in data:
if isinstance(data[tag], dict):
XMLCustom._replace_value_in_dict(
data[tag], old_value, new_value)
elif data[tag] == old_value:
data[tag] = new_value
| 36.915254
| 76
| 0.605601
|
68bdf01996f3bba2f10b0e63e9f397d5a946c651
| 570
|
py
|
Python
|
python/decorator/amount_with_properties.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/decorator/amount_with_properties.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/decorator/amount_with_properties.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
class Amount:
def __init__(self):
# 私有属性
self._amount = None
def get_amount(self):
return self._amount
def set_amount(self, value):
if isinstance(value, int) or isinstance(value, float):
self._amount = value
else:
print(f'值必须是int或float')
amount = property(get_amount, set_amount)
if __name__ == '__main__':
amt = Amount()
print(f'当前的amount值 {amt.amount}')
amt.amount = 'the'
print(f'当前的amount值 {amt.amount}')
amt.amount = 5.5
print(f'当前的amount值 {amt.amount}')
| 21.923077
| 62
| 0.598246
|
62b147580e59bc279ea79bedfb8fb0b6742faecc
| 12,097
|
py
|
Python
|
examples/experiments/freezeout/batch.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | 101
|
2017-06-05T07:33:54.000Z
|
2018-10-28T04:55:23.000Z
|
examples/experiments/freezeout/batch.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | 243
|
2018-11-29T02:03:55.000Z
|
2022-02-21T08:28:29.000Z
|
examples/experiments/freezeout/batch.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | 35
|
2019-01-29T14:26:14.000Z
|
2021-12-30T01:39:02.000Z
|
""" File with class batch with resnet network """
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer_conv2d as xavier
from batchflow import action, model, Batch # pylint: disable=no-name-in-module
def conv_block(input_tensor, kernel, filters, name, strides=(2, 2)):
""" Function to create block of ResNet network which include
three convolution layers and one skip-connection layer.
Args:
input_tensor: input tensorflow layer
kernel: tuple of kernel size in convolution layer
filters: list of nums filters in convolution layers
name: name of block
strides: typle of strides in convolution layer
Output:
x: Block output layer """
filters1, filters2, filters3 = filters
x = tf.layers.conv2d(input_tensor, filters1, (1, 1), strides, name='convfir' + name, activation=tf.nn.relu,\
kernel_initializer=xavier())
x = tf.layers.conv2d(x, filters2, kernel, name='convsec' + name, activation=tf.nn.relu, padding='SAME',\
kernel_initializer=xavier())
x = tf.layers.conv2d(x, filters3, (1, 1), name='convthr' + name,\
kernel_initializer=xavier())
shortcut = tf.layers.conv2d(input_tensor, filters3, (1, 1), strides, name='short' + name, \
kernel_initializer=xavier())
x = tf.concat([x, shortcut], axis=1)
x = tf.nn.relu(x)
return x
def identity_block(input_tensor, kernel, filters, name):
""" Function to create block of ResNet network which include
three convolution layers.
Args:
input_tensor: input tensorflow layer.
kernel: tuple of kernel size in convolution layer.
filters: list of nums filters in convolution layers.
name: name of block.
Output:
x: Block output layer """
filters1, filters2, filters3 = filters
x = tf.layers.conv2d(input_tensor, filters1, (1, 1), name='convfir' + name, activation=tf.nn.relu,\
kernel_initializer=xavier())
x = tf.layers.conv2d(x, filters2, kernel, name='convsec' + name, activation=tf.nn.relu, padding='SAME',\
kernel_initializer=xavier())
x = tf.layers.conv2d(x, filters3, (1, 1), name='convthr' + name,\
kernel_initializer=xavier())
x = tf.concat([x, input_tensor], axis=1)
x = tf.nn.relu(x)
return x
def create_train(opt, src, global_step, loss, it, global_it, learn, scaled):
""" Function for create optimizer to each layer.
Args:
src: name of layer which be optimize.
glogal_step: tenforflow Variable. Need to count train steps.
loss: loss function.
it: number of last iteraion for current layer.
global_it: number of last interation for all layers.
learn: Basic learning rate for current layer.
scaled: method of disable layers.
Output:
New optimizer. """
def learning_rate(last, src, global_it, learn, scaled):
""" Function for create step of changing learning rate.
Args:
last: number of last iteration.
src: mane of layer which be optimize.
global_it: number of last interation for all layers.
learn: Basic learning rate for current layer.
scaled: method of disable layers.
Output:
bound: list of bounders - number of iteration, after which learning rate will change.
values: list of new learnings rates.
var: name of optimize layers"""
last = int(last)
if scaled is True:
values = [0.5 * learn/last * (1 + np.cos(np.pi * i / last)) for i in range(2, last+1)] + [1e-2]
else:
values = [0.5 * learn * (1 + np.cos(np.pi * i / last)) for i in range(2, last+1)] + [1e-2]
bound = list(np.linspace(0, last, len(range(2, last+1)), dtype=np.int32)) + [global_it]
var = [i for i in tf.trainable_variables() if src in i.name or 'dense' in i.name]
return list(np.int32(bound)), list(np.float32(values)), var
b, val, var = learning_rate(it, src, global_it, learn, scaled)
learning_rate = tf.train.piecewise_constant(global_step, b, val)
return opt(learning_rate, 0.9, use_nesterov=True).minimize(loss, global_step, var)
class ResBatch(Batch):
""" Batch to train models with and without FreezeOut """
def __init__(self, index, *args, **kwargs):
""" Init function """
super().__init__(index, *args, **kwargs)
@property
def components(self):
""" Define componentis. """
return 'images', 'lables'
@model(mode='dynamic')
def freeznet(self, config=None):
""" Simple implementation of ResNet with FreezeOut method.
Args:
config: dict with params:
-iteartions: Total number iteration for train model.
-degree: 1 or 3.
-learning_rate: initial learning rate.
-scaled: True or False.
Outputs:
Method return list with len = 2 and some params:
[0][0]: indices - Plcaeholder which takes batch indices.
[0][1]: all_data - Placeholder which takes all images.
[0][2]; all_lables - Placeholder for lables.
[0][3]: loss - Value of loss function.
[0][4]: train - List of train optimizers.
[0][5]: prob - softmax output, need to prediction.
[1][0]: accuracy - Current accuracy
[1][1]: session - tf session """
iteration = config['iteration']
learning_rate = config['learning_rate']
scaled = config['scaled']
with tf.Graph().as_default():
indices = tf.placeholder(tf.int32, shape=[None, 1], name='indices')
all_data = tf.placeholder(tf.float32, shape=[50000, 28, 28], name='all_data')
input_batch = tf.gather_nd(all_data, indices, name='input_batch')
input_batch = tf.reshape(input_batch, shape=[-1, 28, 28, 1], name='x_to_tens')
net = tf.layers.conv2d(input_batch, 32, (7, 7), strides=(2, 2), padding='SAME', activation=tf.nn.relu, \
kernel_initializer=xavier(), name='1')
net = tf.layers.max_pooling2d(net, (2, 2), (2, 2), name='max_pool')
net = conv_block(net, 3, [32, 32, 128], name='2', strides=(1, 1))
net = identity_block(net, 3, [32, 32, 128], name='3')
net = conv_block(net, 3, [64, 64, 256], name='4', strides=(1, 1))
net = identity_block(net, 3, [64, 64, 256], name='5')
net = tf.layers.average_pooling2d(net, (7, 7), strides=(1, 1))
net = tf.contrib.layers.flatten(net)
with tf.variable_scope('dense'):
net = tf.layers.dense(net, 10, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='dense')
prob = tf.nn.softmax(net, name='soft')
all_labels = tf.placeholder(tf.float32, [None, 10], name='all_labels')
y = tf.gather_nd(all_labels, indices, name='y')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=y), name='loss')
global_steps = []
train = []
for i in range(1, 6):
global_steps.append(tf.Variable(0, trainable=False, name='var_{}'.format(i)))
train.append(create_train(tf.train.MomentumOptimizer, str(i), \
global_steps[-1], loss, iteration * (i / 10 + 0.5) ** config['degree'], \
iteration, learning_rate, scaled))
lables_hat = tf.cast(tf.argmax(net, axis=1), tf.float32, name='lables_hat')
lables = tf.cast(tf.argmax(y, axis=1), tf.float32, name='lables')
accuracy = tf.reduce_mean(tf.cast(tf.equal(lables_hat, lables), tf.float32, name='accuracy'))
session = tf.Session()
session.run(tf.global_variables_initializer())
return [[indices, all_data, all_labels, loss, train, prob], [accuracy, session]]
@action(model='freeznet')
def train_freez(self, models, train_loss, data, lables):
""" Function for traning ResNet with freezeout method.
Args:
sess: tensorflow session.
train_loss: list with info of train loss.
train_acc: list with info of train accuracy.
Output:
self """
indices, all_data, all_lables, loss, train, _ = models[0]
session = models[1][1]
loss, _ = session.run([loss, train], feed_dict={indices:self.indices.reshape(-1, 1), all_lables:lables, \
all_data:data})
train_loss.append(loss)
return self
@model(mode='dynamic')
def resnet(self):
""" Simple implementation of Resnet.
Args:
self
Outputs:
Method return list with len = 2 and some params:
[0][0]: indices - Placeholder which takes batch indices.
[0][1]: all_data - Placeholder which takes all images.
[0][2]; all_lables - Placeholder for lables.
[0][3]: loss - Value of loss function.
[0][4]: train - List of train optimizers.
[0][5]: prob - softmax output, need to prediction.
[1][0]: accuracy - Current accuracy
[1][1]: session - tf session """
with tf.Graph().as_default():
indices = tf.placeholder(tf.int32, shape=[None, 1])
all_data = tf.placeholder(tf.float32, shape=[50000, 28, 28])
input_batch = tf.gather_nd(all_data, indices)
x1_to_tens = tf.reshape(input_batch, shape=[-1, 28, 28, 1])
net1 = tf.layers.conv2d(x1_to_tens, 32, (7, 7), strides=(2, 2), padding='SAME', activation=tf.nn.relu, \
kernel_initializer=xavier(), name='11')
net1 = tf.layers.max_pooling2d(net1, (2, 2), (2, 2))
net1 = conv_block(net1, 3, [32, 32, 128], name='22', strides=(1, 1))
net1 = identity_block(net1, 3, [32, 32, 128], name='33')
net1 = conv_block(net1, 3, [64, 64, 256], name='53', strides=(1, 1))
net1 = identity_block(net1, 3, [64, 64, 256], name='63')
net1 = tf.layers.average_pooling2d(net1, (7, 7), strides=(1, 1))
net1 = tf.contrib.layers.flatten(net1)
with tf.variable_scope('dense3'):
net1 = tf.layers.dense(net1, 10, kernel_initializer=tf.contrib.layers.xavier_initializer())
prob1 = tf.nn.softmax(net1)
all_lables = tf.placeholder(tf.float32, [None, 10])
y = tf.gather_nd(all_lables, indices)
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net1, labels=y), name='loss3')
train1 = tf.train.MomentumOptimizer(0.03, 0.8, use_nesterov=True).minimize(loss1)
lables_hat1 = tf.cast(tf.argmax(net1, axis=1), tf.float32, name='lables_3at')
lables1 = tf.cast(tf.argmax(y, axis=1), tf.float32, name='labl3es')
accuracy1 = tf.reduce_mean(tf.cast(tf.equal(lables_hat1, lables1), tf.float32, name='a3ccuracy'))
session = tf.Session()
session.run(tf.global_variables_initializer())
return [[indices, all_data, all_lables, loss1, train1, prob1], [accuracy1, session]]
@action(model='resnet')
def train_res(self, models, train_loss, data, lables):
""" Function for traning ResNet.
Args:
sess: tensorflow session.
train_loss: list with info of train loss.
train_acc: list with info of train accuracy.
Output:
self """
session = models[1][1]
indices, all_data, all_lables, loss, train, _ = models[0]
loss, _ = session.run([loss, train], feed_dict={indices:self.indices.reshape(-1, 1),\
all_lables:lables, all_data:data})
train_loss.append(loss)
return self
| 43.203571
| 119
| 0.592874
|
7482ad770d17f5d76af28b6be399539b42e00895
| 294
|
py
|
Python
|
ukpsummarizer-be/cplex/python/docplex/docplex/mp/sktrans/__init__.py
|
avineshpvs/vldb2018-sherlock
|
5e116f42f44c50bcb289be3c4b4b76e29b238c18
|
[
"Apache-2.0"
] | 2
|
2019-01-13T08:41:00.000Z
|
2021-03-27T22:55:10.000Z
|
ukpsummarizer-be/cplex/python/docplex/docplex/mp/sktrans/__init__.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | null | null | null |
ukpsummarizer-be/cplex/python/docplex/docplex/mp/sktrans/__init__.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | 4
|
2018-11-06T16:12:55.000Z
|
2019-08-21T13:22:32.000Z
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2017
# --------------------------------------------------------------------------
| 58.8
| 77
| 0.343537
|
6d62173d80c07076f85258ceb8bd0e36a053c8b9
| 7,620
|
py
|
Python
|
apps/vote_summary.py
|
max-lutz/open-data-french-national-assembly
|
518c81618513a79a88200d392faa53d88c787c94
|
[
"MIT"
] | null | null | null |
apps/vote_summary.py
|
max-lutz/open-data-french-national-assembly
|
518c81618513a79a88200d392faa53d88c787c94
|
[
"MIT"
] | null | null | null |
apps/vote_summary.py
|
max-lutz/open-data-french-national-assembly
|
518c81618513a79a88200d392faa53d88c787c94
|
[
"MIT"
] | 1
|
2022-02-16T04:02:23.000Z
|
2022-02-16T04:02:23.000Z
|
import streamlit as st
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import os
from matplotlib.backends.backend_agg import RendererAgg
#Loading the data
@st.cache
def get_data_votes():
df = pd.read_csv(os.path.join(os.getcwd(), 'data', 'df_vote_descr.csv'))
df['year'] = df['date'].astype(str).str[0:4]
df['month'] = df['date'].astype(str).str[5:7]
df['day'] = df['date'].astype(str).str[8:10]
df['datetime'] = pd.to_datetime(df[['year', 'month', 'day']], errors = 'coerce')
df['percentage of votes in favor'] = 100*df['pour']/df['nb votants']
df['accepted'] = 'no'
df.loc[df['pour'] >= df['requis'], 'accepted'] = 'yes'
df = df.drop(columns=['date'])
df.columns = df.columns.str.replace('demandeur ', '')
return df
@st.cache
def get_data_deputies():
df = pd.read_csv(os.path.join(os.getcwd(), 'data', 'df_dep.csv'))
df = df.drop(columns=['family name', 'first name', 'date of birth'])
return df
@st.cache
def get_data_political_parties():
df = pd.read_csv(os.path.join(os.getcwd(), 'data', 'df_polpar.csv'))
df = df.drop(columns=['code'])
return df
def app():
#configuration of the page
#st.set_page_config(layout="wide")
matplotlib.use("agg")
_lock = RendererAgg.lock
SPACER = .2
ROW = 1
df_votes = get_data_votes()
df_polpar = get_data_political_parties()
# Sidebar
#selection box for the different features
st.sidebar.header('Select what to display')
nb_voters = st.sidebar.slider("Number of voters", int(df_votes['nb votants'].min()), int(df_votes['nb votants'].max()), (int(df_votes['nb votants'].min()), int(df_votes['nb votants'].max())), 1)
#creates masks from the sidebar selection widgets
mask_nb_voters = df_votes['nb votants'].between(nb_voters[0], nb_voters[1])
df_votes_selected = df_votes[mask_nb_voters]
title_spacer1, title, title_spacer_2 = st.beta_columns((.1,ROW,.1))
with title:
st.title('Vote vizualisation tool')
row0_spacer1, row0_1, row0_spacer2 = st.beta_columns((SPACER/2,ROW, SPACER/2))
with row0_1:
st.header('Data (all the votes from June 2017 to mid March 2021)')
#st.write(df_votes_selected)
### Vote repartition
row1_spacer1, row1_1, row1_spacer2, row1_2, row1_spacer3 = st.beta_columns((SPACER,ROW, SPACER,ROW, SPACER))
with row1_1, _lock:
st.header('Repartition of vote presence')
fig, ax = plt.subplots(figsize=(5, 5))
ax = sns.histplot(data=df_votes_selected, x="nb votants", hue="accepted", bins=40)
ax.set_xlabel('Number of deputies voting')
st.pyplot(fig)
with row1_2, _lock:
st.header('Repartition of votes in favor')
fig, ax = plt.subplots(figsize=(5, 5))
ax = sns.histplot(data=df_votes_selected, x="percentage of votes in favor", bins=40)
#ax = sns.scatterplot(data=df_votes_selected, x="nb votants", y="percentage of votes in favor")
st.pyplot(fig)
#heatmap (12;31) with a year selector and a data selector (nb of votes or presence)
title_spacer2, title_2, title_spacer_2 = st.beta_columns((.1,ROW,.1))
with title_2:
st.header('Heatmap of the votes during the year')
row2_spacer1, row2_1, row2_spacer2, row2_2, row2_spacer3 = st.beta_columns((SPACER,ROW, SPACER,ROW, SPACER))
with row2_1, _lock:
year_selected = st.selectbox('Select year', ['2017','2018','2019', '2020', '2021'], key='1')
with row2_2, _lock:
data_selected = st.selectbox('Select data', ['Nb of votes','Deputy presence'], key='2')
df_heatmap = df_votes_selected.drop(columns=['code', 'type', 'titre', 'demandeur', 'requis', 'pour', 'contre', 'abstentions', 'datetime'])
df_heatmap = df_heatmap.loc[df_heatmap['year'] == year_selected]
df_heatmap['count'] = 1
df_heatmap['nb votants'] = df_heatmap['nb votants']/574
df_heatmap['nb votants'] = (df_heatmap['nb votants']*100).astype(int)
df_heatmap = df_heatmap.groupby(['year','month','day']).agg({'nb votants':'mean','count':'sum'})
if(data_selected == 'Nb of votes'):
df_heatmap = df_heatmap.reset_index().pivot(index='month', columns='day', values='count')
heatmap_title = 'Number of votes at the national assembly on a particular day'
elif(data_selected == 'Deputy presence'):
df_heatmap = df_heatmap.reset_index().pivot(index='month', columns='day', values='nb votants').astype(float)
heatmap_title = 'Percentage of deputy presence at the national assembly on a particular day'
df_heatmap.fillna(0, inplace=True)
df_heatmap.columns = df_heatmap.columns.astype(int)
df_heatmap.index = df_heatmap.index.astype(int)
# Ensure all month in table
df_heatmap = df_heatmap.reindex(range(1,13), axis=0, fill_value=0)
# Ensure all days in table
df_heatmap = df_heatmap.reindex(range(1,32), axis=1, fill_value=0).astype(int)
row3_spacer1, row3_1, row3_spacer2 = st.beta_columns((SPACER, ROW, SPACER))
palette = sns.color_palette("Greens",12)
palette[0] = (1,1,1)
with row3_1, _lock:
fig, ax = plt.subplots(figsize=(10, 4))
ax = sns.heatmap(df_heatmap,
cmap=palette, # Choose a squential colormap
annot_kws={'fontsize':11}, # Reduce size of label to fit
fmt='', # Interpret labels as strings
square=True, # Force square cells
linewidth=0.01, # Add gridlines
linecolor="#222", # Adjust gridline color
robust = True
)
ax.set_title(heatmap_title)
ax.set_ylabel('Month of the year')
ax.set_xlabel('Days of the month')
plt.tight_layout()
st.pyplot(fig)
row4_spacer1, row4_1, row4_spacer2, row4_2, row4_spacer3 = st.beta_columns((SPACER,ROW, SPACER,ROW, SPACER))
#get the total number of demand from each party
df_demandeur = df_votes_selected.drop(df_votes_selected.columns[0:10], axis=1)
df_demandeur = df_demandeur.drop(df_demandeur.columns[-7:-1], axis=1)
df_demandeur = df_demandeur.sum(axis=0)
df_demandeur = df_demandeur.drop(labels=['ND', 'GOV', 'EELV', 'MODEM', 'CDP'])
#merge the dataframe with the number of demand with the polpar df to get colors and nb of members
df = df_polpar.set_index('abreviated_name').merge(pd.DataFrame(data = [df_demandeur.values], columns = df_demandeur.index).T, left_index=True, right_index=True)
df.columns = ['name', 'members', 'color', 'demand']
df['demand per deputy'] = df['demand']/df['members']
with row4_1, _lock:
st.header('Number of law propositions')
st.text('')
st.text('')
fig, ax = plt.subplots(figsize=(5, 5))
ax.pie(df['demand'], labels=(df.index + ' (' + df['demand'].map(str) + ')'),
wedgeprops = { 'linewidth' : 7, 'edgecolor' : 'white'}, colors=df['color'].to_list())
p = plt.gcf()
p.gca().add_artist(plt.Circle( (0,0), 0.7, color='white'))
st.pyplot(fig)
with row4_2, _lock:
st.header('Average number of law propositions per deputy')
st.text('')
fig, ax = plt.subplots(figsize=(5, 5))
ax.pie(df['demand per deputy'], labels=(df.index + ' (' + round(df['demand per deputy'].map(float)).map(str) + ')'),
wedgeprops = { 'linewidth' : 7, 'edgecolor' : 'white'}, colors=df['color'].to_list())
p = plt.gcf()
p.gca().add_artist(plt.Circle( (0,0), 0.7, color='white'))
st.pyplot(fig)
| 42.569832
| 198
| 0.639501
|
0b53eb2bfaa80d98e4b189f3be9447f4a1d055f1
| 1,232
|
py
|
Python
|
src/deliveroo_project/Deilveroo.py
|
jayjayspinks/DeliverooProject2
|
a7a5d2b03c6af4344308f924577ac3e7d0274126
|
[
"MIT"
] | null | null | null |
src/deliveroo_project/Deilveroo.py
|
jayjayspinks/DeliverooProject2
|
a7a5d2b03c6af4344308f924577ac3e7d0274126
|
[
"MIT"
] | null | null | null |
src/deliveroo_project/Deilveroo.py
|
jayjayspinks/DeliverooProject2
|
a7a5d2b03c6af4344308f924577ac3e7d0274126
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import *
class Game(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
headline_text = tk.Label(self, text="Meals Without Deliveroo!!", font=("Ariel",160), fg="red")
headline_text.pack()
self.label_var = tk.IntVar()
label = tk.Label(self, textvariable=self.label_var, font=("Ariel",600), fg="black") # Assigned that variable to the label
label.pack()
deliveroo_button = tk.Button(self, text="Deliveroo",
relief="groove", command=self.reset_var, width=20, height=20, font=("Ariel",70), fg="red")
deliveroo_button.pack(side=LEFT)
not_deliveroo = tk.Button(self, text="Not Deliveroo!!",
relief="groove", command=self.add_var, width=20, height=20, font=("Ariel",70), fg="green")
not_deliveroo.pack(side=RIGHT)
def add_var(self):
self.label_var.set(self.label_var.get() + 1)
def reset_var(self):
self.label_var.set(0)
root = tk.Tk()
root.title("Meals Without Deliveroo!!")
root.attributes('-fullscreen', True)
#root.resizable()
#root.geometry("2500x1000")
game = Game(root)
game.pack()
root.mainloop()
| 34.222222
| 131
| 0.627435
|
558f88cd519e1cd45eb1f2e4ca92f4221ac031e3
| 1,285
|
py
|
Python
|
src/api_v1/viewsets/system.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/api_v1/viewsets/system.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/api_v1/viewsets/system.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import viewsets
from api_v1.serializers.system import (
Charakter_FormalnySerializer,
Typ_KBNSerializer,
JezykSerializer,
Dyscyplina_NaukowaSerializer,
KonferencjaSerializer,
Seria_WydawniczaSerializer,
)
from bpp.models import (
Charakter_Formalny,
Typ_KBN,
Jezyk,
Dyscyplina_Naukowa,
Konferencja,
Seria_Wydawnicza,
)
class Seria_WydawniczaViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Seria_Wydawnicza.objects.all()
serializer_class = Seria_WydawniczaSerializer
class KonferencjaViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Konferencja.objects.all()
serializer_class = KonferencjaSerializer
class Dyscyplina_NaukowaViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Dyscyplina_Naukowa.objects.all()
serializer_class = Dyscyplina_NaukowaSerializer
class Charakter_FormalnyViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Charakter_Formalny.objects.all()
serializer_class = Charakter_FormalnySerializer
class Typ_KBNViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Typ_KBN.objects.all()
serializer_class = Typ_KBNSerializer
class JezykViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Jezyk.objects.all()
serializer_class = JezykSerializer
| 26.22449
| 63
| 0.796109
|
b28f7ecf27bc96f1e292831431fe03d1e16a3554
| 11,744
|
py
|
Python
|
pywraps/py_nalt.py
|
joeleong/idapython
|
d99a89369741ce272ba792d6f087d0739a2f8ac7
|
[
"BSD-3-Clause"
] | null | null | null |
pywraps/py_nalt.py
|
joeleong/idapython
|
d99a89369741ce272ba792d6f087d0739a2f8ac7
|
[
"BSD-3-Clause"
] | null | null | null |
pywraps/py_nalt.py
|
joeleong/idapython
|
d99a89369741ce272ba792d6f087d0739a2f8ac7
|
[
"BSD-3-Clause"
] | null | null | null |
#<pycode(py_nalt)>
import _ida_idaapi
SWI_SPARSE = 0x1
"""sparse switch ( value table present ) otherwise lowcase present"""
SWI_V32 = 0x2
"""32-bit values in table"""
SWI_J32 = 0x4
"""32-bit jump offsets"""
SWI_VSPLIT = 0x8
"""value table is split (only for 32-bit values)"""
SWI_DEFAULT = 0x10
"""default case is present"""
SWI_DEF_IN_TBL = 0x20
"""default case is an entry in the jump table.
This flag is only applicable in the case of a sparse
nonindirect switch (i.e. a switch with a values table).
<jump table size> == <value table size> + 1.
The default case entry is the last one in the table
(or the first one in the case of an inversed jump table)."""
SWI_JMP_INV = 0x40
"""jumptable is inversed (last entry is for first entry in values table)"""
SWI_SHIFT_MASK = 0x180
"""use formula (element*shift + elbase) to find jump targets"""
SWI_ELBASE = 0x200
"""elbase is present (if not and shift!=0, endof(jumpea) is used)"""
SWI_JSIZE = 0x400
"""jump offset expansion bit"""
SWI_VSIZE = 0x800
"""value table element size expansion bit"""
SWI_SEPARATE = 0x1000
"""do not create an array of individual dwords"""
SWI_SIGNED = 0x2000
"""jump table entries are signed"""
SWI_CUSTOM = 0x4000
"""custom jump table.
\ph{create_switch_xrefs} will be called to create code xrefs for the
table. Custom jump table must be created by the module
(see also #SWI_STDTBL)"""
SWI_INDIRECT = 0x00010000
"""value table elements are used as indexes into the jump table"""
SWI_SUBTRACT = 0x00020000
"""table values are subtracted from the elbase instead of being addded"""
SWI_HXNOLOWCASE = 0x00040000
"""lowcase value should not be used by the decompiler (internal flag)"""
SWI_STDTBL = 0x00080000
"""custom jump table with standard table formatting.
ATM IDA doesn't use SWI_CUSTOM for switches with standard
table formatting. So this flag can be considered as obsolete."""
SWI_DEFRET = 0x00100000
"""return in the default case (defjump==BADADDR)"""
# --------------------------------------------------------------------------
class switch_info_t(ida_idaapi.py_clinked_object_t):
def __init__(self, lnk = None):
ida_idaapi.py_clinked_object_t.__init__(self, lnk)
self.bc695_api = False
def _create_clink(self):
return _ida_nalt.switch_info_t_create()
def _del_clink(self, lnk):
return _ida_nalt.switch_info_t_destroy(lnk)
def assign(self, other):
return _ida_nalt.switch_info_t_assign(self, other)
def is_indirect(self):
return (self.flags & SWI_INDIRECT) != 0
def is_subtract(self):
return (self.flags & SWI_SUBTRACT) != 0
def get_jtable_size(self):
return self.jcases if self.is_indirect() else self.ncases
def get_lowcase(self):
return self.ind_lowcase if self.is_indirect() else self.lowcase
def set_expr(self, r, dt):
self.regnum = r
self.regdtype = dt
def get_shift(self):
return (self.flags & SWI_SHIFT_MASK) >> 7
def set_shift(self, shift):
self.flags &= ~SWI_SHIFT_MASK
self.flags |= ((shift & 3) << 7)
def get_jtable_element_size(self):
code = self.flags & (SWI_J32|SWI_JSIZE)
if code == 0: return 2
elif code == SWI_J32: return 4
elif code == SWI_JSIZE: return 1
else: return 8
def set_jtable_element_size(self, size):
self.flags &= ~(SWI_J32|SWI_JSIZE)
if size == 4: self.flags |= SWI_J32
elif size == 1: self.flags |= SWI_JSIZE
elif size == 8: self.flags |= SWI_J32|SWI_JSIZE
elif size != 2: return False
return True
def get_vtable_element_size(self):
code = self.flags & (SWI_V32|SWI_VSIZE)
if code == 0: return 2
elif code == SWI_V32: return 4
elif code == SWI_VSIZE: return 1
return 8
def set_vtable_element_size(self, size):
self.flags &= ~SWI_V32|SWI_VSIZE
if size == 4: self.flags |= SWI_V32
elif size == 1: self.flags |= SWI_VSIZE
elif size == 8: self.flags |= SWI_V32|SWI_VSIZE
elif size != 2: return False
return True
#
# Autogenerated
#
def __get_regdtype__(self):
return _ida_nalt.switch_info_t_get_regdtype(self)
def __set_regdtype__(self, v):
_ida_nalt.switch_info_t_set_regdtype(self, v)
def __get_jcases__(self):
return _ida_nalt.switch_info_t_get_jcases(self)
def __set_jcases__(self, v):
_ida_nalt.switch_info_t_set_jcases(self, v)
def __get_regnum__(self):
return _ida_nalt.switch_info_t_get_regnum(self)
def __set_regnum__(self, v):
_ida_nalt.switch_info_t_set_regnum(self, v)
def __get_flags__(self):
return _ida_nalt.switch_info_t_get_flags(self)
def __set_flags__(self, v):
_ida_nalt.switch_info_t_set_flags(self, v)
def __get_ncases__(self):
return _ida_nalt.switch_info_t_get_ncases(self)
def __set_ncases__(self, v):
_ida_nalt.switch_info_t_set_ncases(self, v)
def __get_defjump__(self):
return _ida_nalt.switch_info_t_get_defjump(self)
def __set_defjump__(self, v):
_ida_nalt.switch_info_t_set_defjump(self, v)
def __get_jumps__(self):
return _ida_nalt.switch_info_t_get_jumps(self)
def __set_jumps__(self, v):
_ida_nalt.switch_info_t_set_jumps(self, v)
def __get_elbase__(self):
return _ida_nalt.switch_info_t_get_elbase(self)
def __set_elbase__(self, v):
_ida_nalt.switch_info_t_set_elbase(self, v)
def __get_startea__(self):
return _ida_nalt.switch_info_t_get_startea(self)
def __set_startea__(self, v):
_ida_nalt.switch_info_t_set_startea(self, v)
def __get_custom__(self):
return _ida_nalt.switch_info_t_get_custom(self)
def __set_custom__(self, v):
_ida_nalt.switch_info_t_set_custom(self, v)
def __get_ind_lowcase__(self):
return _ida_nalt.switch_info_t_get_ind_lowcase(self)
def __set_ind_lowcase__(self, v):
_ida_nalt.switch_info_t_set_ind_lowcase(self, v)
def __get_values_lowcase__(self):
return _ida_nalt.switch_info_t_get_values_lowcase(self)
def __set_values_lowcase__(self, v):
_ida_nalt.switch_info_t_set_values_lowcase(self, v)
regdtype = property(__get_regdtype__, __set_regdtype__)
"""size of the switch expression register as dtype"""
jcases = property(__get_jcases__, __set_jcases__)
"""number of entries in the jump table (SWI_INDIRECT)"""
regnum = property(__get_regnum__, __set_regnum__)
"""the switch expression as a register number"""
flags = property(__get_flags__, __set_flags__)
"""switch info flags"""
ncases = property(__get_ncases__, __set_ncases__)
"""number of cases (excluding default)"""
defjump = property(__get_defjump__, __set_defjump__)
"""default jump address"""
jumps = property(__get_jumps__, __set_jumps__)
"""jump table address"""
elbase = property(__get_elbase__, __set_elbase__)
"""element base"""
startea = property(__get_startea__, __set_startea__)
"""start of switch idiom"""
custom = property(__get_custom__, __set_custom__)
"""information for custom tables (filled and used by modules)"""
ind_lowcase = property(__get_ind_lowcase__, __set_ind_lowcase__)
values = property(__get_values_lowcase__, __set_values_lowcase__)
lowcase = property(__get_values_lowcase__, __set_values_lowcase__)
#</pycode(py_nalt)>
#<pycode_BC695(py_nalt)>
ASCSTR_LAST=7
ASCSTR_LEN2=STRTYPE_LEN2
ASCSTR_LEN4=STRTYPE_LEN4
ASCSTR_PASCAL=STRTYPE_PASCAL
ASCSTR_TERMCHR=STRTYPE_TERMCHR
ASCSTR_ULEN2=STRTYPE_LEN2_16
ASCSTR_ULEN4=STRTYPE_LEN4_16
ASCSTR_UNICODE=STRTYPE_C_16
ASCSTR_UTF16=STRTYPE_C_16
ASCSTR_UTF32=STRTYPE_C_32
REF_VHIGH=V695_REF_VHIGH
REF_VLOW=V695_REF_VLOW
SWI_END_IN_TBL=SWI_DEF_IN_TBL
SWI_EXTENDED=0x8000
SWI2_INDIRECT=SWI_INDIRECT >> 16
SWI2_SUBTRACT=SWI_SUBTRACT >> 16
import ida_netnode
RIDX_AUTO_PLUGINS=ida_netnode.BADNODE
change_encoding_name=rename_encoding
@bc695redef
def del_tinfo2(ea, n=None):
if n is not None:
return del_op_tinfo(ea, n)
else:
return del_tinfo(ea)
get_encodings_count=get_encoding_qty
def get_op_tinfo(*args):
import ida_typeinf
if isinstance(args[2], ida_typeinf.tinfo_t): # 6.95: ea, n, tinfo_t
ea, n, tif = args
else: # 7.00: tinfo_t, ea, n
tif, ea, n = args
return _ida_nalt.get_op_tinfo(tif, ea, n)
get_op_tinfo2=get_op_tinfo
@bc695redef
def is_unicode(strtype):
return (strtype & STRWIDTH_MASK) > 0
set_op_tinfo2=set_op_tinfo
set_tinfo2=set_tinfo
switch_info_t.regdtyp = switch_info_t.regdtype
def get_tinfo(*args):
import ida_typeinf
if isinstance(args[1], ida_typeinf.tinfo_t): # 6.95: ea, tinfo_t
ea, tif = args
else: # 7.00: tinfo_t, ea
tif, ea = args
return _ida_nalt.get_tinfo(tif, ea)
get_tinfo2=get_tinfo
def get_refinfo(*args):
if isinstance(args[2], refinfo_t): # 6.95: ea, n, refinfo_t
ea, n, ri = args
else: # 7.00: refinfo_t, ea, n
ri, ea, n = args
return _ida_nalt.get_refinfo(ri, ea, n)
get_switch_info_ex=get_switch_info
set_switch_info_ex=set_switch_info
del_switch_info_ex=del_switch_info
switch_info_ex_t_assign=switch_info_t_assign
switch_info_ex_t_create=switch_info_t_create
switch_info_ex_t_destroy=switch_info_t_destroy
switch_info_ex_t_get_custom=switch_info_t_get_custom
switch_info_ex_t_get_defjump=switch_info_t_get_defjump
switch_info_ex_t_get_elbase=switch_info_t_get_elbase
switch_info_ex_t_get_flags=switch_info_t_get_flags
switch_info_ex_t_get_ind_lowcase=switch_info_t_get_ind_lowcase
switch_info_ex_t_get_jcases=switch_info_t_get_jcases
switch_info_ex_t_get_jumps=switch_info_t_get_jumps
switch_info_ex_t_get_ncases=switch_info_t_get_ncases
switch_info_ex_t_get_regdtyp=switch_info_t_get_regdtype
switch_info_ex_t_get_regnum=switch_info_t_get_regnum
switch_info_ex_t_get_startea=switch_info_t_get_startea
switch_info_ex_t_get_values_lowcase=switch_info_t_get_values_lowcase
switch_info_ex_t_set_custom=switch_info_t_set_custom
switch_info_ex_t_set_defjump=switch_info_t_set_defjump
switch_info_ex_t_set_elbase=switch_info_t_set_elbase
switch_info_ex_t_set_flags=switch_info_t_set_flags
switch_info_ex_t_set_ind_lowcase=switch_info_t_set_ind_lowcase
switch_info_ex_t_set_jcases=switch_info_t_set_jcases
switch_info_ex_t_set_jumps=switch_info_t_set_jumps
switch_info_ex_t_set_ncases=switch_info_t_set_ncases
switch_info_ex_t_set_regdtyp=switch_info_t_set_regdtype
switch_info_ex_t_set_regnum=switch_info_t_set_regnum
switch_info_ex_t_set_startea=switch_info_t_set_startea
switch_info_ex_t_set_values_lowcase=switch_info_t_set_values_lowcase
def __switch_info_t_get_flags__(instance):
return switch_info_t.__get_flags__(instance) | SWI_EXTENDED
def __switch_info_t_set_flags__(instance, v):
if instance.bc695_api:
v |= (switch_info_t.__get_flags__(instance) & 0xFFFF0000)
switch_info_t.__set_flags__(instance, v)
switch_info_t.flags = property(__switch_info_t_get_flags__, __switch_info_t_set_flags__)
def __switch_info_t_get_flags2__(instance):
instance.bc695_api = True
return switch_info_t.__get_flags__(instance) >> 16
def __switch_info_t_set_flags2__(instance, v):
instance.bc695_api = True
flags = switch_info_t.__get_flags__(instance)
instance.flags = (flags & 0xFFFF) | (v << 16)
switch_info_t.flags2 = property(__switch_info_t_get_flags2__, __switch_info_t_set_flags2__)
switch_info_ex_t=switch_info_t
#</pycode_BC695(py_nalt)>
| 36.47205
| 91
| 0.731012
|
8a884c1199d0d5aaf00c11bebcc0ccd3757378b9
| 23,464
|
py
|
Python
|
lxbuildenv.py
|
xobs/spibone
|
4666e36a6bd63980be7a3555dca32373e29fdf6c
|
[
"Apache-2.0"
] | 11
|
2019-08-30T13:17:56.000Z
|
2022-03-16T17:39:37.000Z
|
lxbuildenv.py
|
xobs/spibone
|
4666e36a6bd63980be7a3555dca32373e29fdf6c
|
[
"Apache-2.0"
] | 4
|
2019-11-13T22:38:17.000Z
|
2019-11-13T23:20:48.000Z
|
lxbuildenv.py
|
xobs/spibone
|
4666e36a6bd63980be7a3555dca32373e29fdf6c
|
[
"Apache-2.0"
] | 1
|
2021-10-01T01:45:57.000Z
|
2021-10-01T01:45:57.000Z
|
#!/usr/bin/env python3
# This script enables easy, cross-platform building without the need
# to install third-party Python modules.
LXBUILDENV_VERSION = '2019.8.19.1'
import sys
import os
import subprocess
import argparse
DEPS_DIR = "deps"
DEFAULT_DEPS = {
'migen': 'https://github.com/m-labs/migen.git',
'litex': 'https://github.com/enjoy-digital/litex.git',
'litedram': 'https://github.com/enjoy-digital/litedram.git',
'litex_boards': 'https://github.com/litex-hub/litex-boards.git',
'litescope': 'https://github.com/enjoy-digital/litescope.git',
'pyserial': 'https://github.com/pyserial/pyserial.git',
}
OPTIONAL_DEPS = {
'liteeth': 'https://github.com/enjoy-digital/liteeth.git',
'liteusb': 'https://github.com/enjoy-digital/liteusb.git',
'litepcie': 'https://github.com/enjoy-digital/litepcie.git',
'litesdcard': 'https://github.com/enjoy-digital/litesdcard.git',
'liteiclink': 'https://github.com/enjoy-digital/liteiclink.git',
'litevideo': 'https://github.com/enjoy-digital/litevideo.git',
'usb': 'https://github.com/pyusb/pyusb.git',
}
# Obtain the path to this script, plus a trailing separator. This will
# be used later on to construct various environment variables for paths
# to a variety of support directories.
script_path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
def lxprint(s):
print("lxbuildenv: " + s)
# Look through the specified file for known variables to get the dependency list
def read_configuration(filename, args):
import ast
# Always check the Python version
dependencies = {
'python': 1
}
configuration = {
'skip-git': False
}
main_src = ""
try:
with open(sys.argv[0], 'r') as f:
main_src = f.read()
main_ast = ast.parse(main_src, filename=filename)
except:
configuration['dependencies'] = list(dependencies.keys())
return configuration
# Iterate through the top-level nodes looking for variables named
# LX_DEPENDENCIES or LX_DEPENDENCY and get the values that are
# assigned to them.
for node in ast.iter_child_nodes(main_ast):
if isinstance(node, ast.Assign):
value = node.value
for target in node.targets:
if isinstance(target, ast.Name):
if target.id == "LX_DEPENDENCIES" or target.id == "LX_DEPENDENCY":
if isinstance(value, (ast.List, ast.Tuple)):
for elt in value.elts:
if isinstance(elt, ast.Str):
dependencies[elt.s] = 1
elif isinstance(value, ast.Str):
dependencies[value.s] = 1
elif target.id == "LX_CONFIGURATION" or target.id == "LX_CONFIG":
if isinstance(value, (ast.List, ast.Tuple)):
for elt in value.elts:
if isinstance(elt, ast.Str):
configuration[elt.s] = True
elif isinstance(value, ast.Str):
configuration[value.s] = True
# Set up sub-dependencies
if 'riscv' in dependencies:
dependencies['make'] = 1
if args.lx_check_git or (not configuration['skip-git'] and not args.lx_ignore_git):
dependencies['git'] = 1
configuration['dependencies'] = list(dependencies.keys())
return configuration
def get_python_path(script_path, args):
# Python has no concept of a local dependency path, such as the C `-I``
# switch, or the nodejs `node_modules` path, or the rust cargo registry.
# Instead, it relies on an environment variable to append to the search
# path.
# Construct this variable by adding each subdirectory under the `deps/`
# directory to the PYTHONPATH environment variable.
python_path = []
if os.path.isdir(script_path + DEPS_DIR):
for dep in os.listdir(script_path + DEPS_DIR):
dep = script_path + DEPS_DIR + os.path.sep + dep
if os.path.isdir(dep):
python_path.append(dep)
return python_path
def fixup_env(script_path, args):
os.environ["PYTHONPATH"] = os.pathsep.join(get_python_path(script_path, 0))
# Set the "LXBUILDENV_REEXEC" variable to prevent the script from continuously
# reinvoking itself.
os.environ["LXBUILDENV_REEXEC"] = "1"
# Python randomizes the order in which it traverses hashes, and Migen uses
# hashes an awful lot when bringing together modules. As such, the order
# in which Migen generates its output Verilog will change with every run,
# and the addresses for various modules will change.
# Make builds deterministic so that the generated Verilog code won't change
# across runs.
os.environ["PYTHONHASHSEED"] = "1"
# Some Makefiles are invoked as part of the build process, and those Makefiles
# occasionally have calls to Python. Ensure those Makefiles use the same
# interpreter that this script is using.
os.environ["PYTHON"] = sys.executable
# Set the environment variable "V" to 1. This causes Makefiles to print
# the commands they run, which makes them easier to debug.
if "lx_verbose" in args and args.lx_verbose:
os.environ["V"] = "1"
# If the user just wanted to print the environment variables, do that and quit.
if args.lx_print_env:
print("PYTHONPATH={}".format(os.environ["PYTHONPATH"]))
print("PYTHONHASHSEED={}".format(os.environ["PYTHONHASHSEED"]))
print("PYTHON={}".format(sys.executable))
print("LXBUILDENV_REEXEC={}".format(os.environ["LXBUILDENV_REEXEC"]))
sys.exit(0)
# Equivalent to the powershell Get-Command, and kinda like `which`
def get_command(cmd):
if os.name == 'nt':
path_ext = os.environ["PATHEXT"].split(os.pathsep)
else:
path_ext = [""]
for ext in path_ext:
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(path + os.path.sep + cmd + ext):
return path + os.path.sep + cmd + ext
return None
def check_python_version(args):
import platform
# Litex / Migen require Python 3.5 or newer. Ensure we're running
# under a compatible version of Python.
if sys.version_info[:3] < (3, 5):
return (False,
"python: You need Python 3.5+ (version {} found)".format(sys.version_info[:3]))
return (True, "python 3.5+: ok (Python {} found)".format(platform.python_version()))
def check_vivado(args):
vivado_path = get_command("vivado")
if vivado_path == None:
# Look for the default Vivado install directory
if os.name == 'nt':
base_dir = r"C:\Xilinx\Vivado"
else:
base_dir = "/opt/Xilinx/Vivado"
if os.path.exists(base_dir):
for file in os.listdir(base_dir):
bin_dir = base_dir + os.path.sep + file + os.path.sep + "bin"
if os.path.exists(bin_dir + os.path.sep + "vivado"):
os.environ["PATH"] += os.pathsep + bin_dir
vivado_path = bin_dir
break
if vivado_path == None:
return (False, "toolchain not found in your PATH", "download it from https://www.xilinx.com/support/download.html")
return (True, "found at {}".format(vivado_path))
def check_cmd(args, cmd, name=None, fix=None):
if name is None:
name = cmd
path = get_command(cmd)
if path == None:
return (False, name + " not found in your PATH", fix)
return (True, "found at {}".format(path))
def check_make(args):
return check_cmd(args, "make", "GNU Make")
def check_riscv(args):
riscv64 = check_cmd(args, "riscv64-unknown-elf-gcc", "riscv toolchain", "download it from https://www.sifive.com/boards/")
if riscv64[0] == True:
return riscv64
riscv32 = check_cmd(args, "riscv32-unknown-elf-gcc", "riscv toolchain", "download it from https://www.sifive.com/boards/")
if riscv32[0] == True:
return riscv32
return riscv64
def check_yosys(args):
return check_cmd(args, "yosys")
def check_arachne(args):
return check_cmd(args, "arachne-pnr")
def check_git(args):
return check_cmd(args, "git")
def check_icestorm(args):
return check_cmd(args, "icepack")
def check_nextpnr_ice40(args):
return check_cmd(args, "nextpnr-ice40")
def check_nextpnr_ecp5(args):
return check_cmd(args, "nextpnr-ecp5")
dependency_checkers = {
'python': check_python_version,
'vivado': check_vivado,
'make': check_make,
'git': check_git,
'riscv': check_riscv,
'yosys': check_yosys,
'arachne-pnr': check_arachne,
'icestorm': check_icestorm,
'nextpnr-ice40': check_nextpnr_ice40,
'nextpnr-ecp5': check_nextpnr_ecp5,
}
# Validate that the required dependencies (Vivado, compilers, etc.)
# have been installed.
def check_dependencies(args, dependency_list):
dependency_errors = 0
for dependency_name in dependency_list:
if not dependency_name in dependency_checkers:
lxprint('WARNING: Unrecognized dependency "{}"'.format(dependency_name))
continue
result = dependency_checkers[dependency_name](args)
if result[0] == False:
if len(result) > 2:
lxprint('{}: {} -- {}'.format(dependency_name, result[1], result[2]))
else:
lxprint('{}: {}'.format(dependency_name, result[1]))
dependency_errors = dependency_errors + 1
elif args.lx_check_deps or args.lx_verbose:
lxprint('dependency: {}: {}'.format(dependency_name, result[1]))
if dependency_errors > 0:
if args.lx_ignore_deps:
if not args.lx_quiet:
lxprint('{} missing dependencies were found but continuing anyway'.format(dependency_errors))
else:
if not args.lx_quiet:
lxprint('To ignore dependencies, re-run with "--lx-ignore-deps"')
raise SystemExit(str(dependency_errors) +
" missing dependencies were found")
if args.lx_check_deps:
sys.exit(0)
# Return True if the given tree needs to be initialized
def check_module_recursive(root_path, depth, verbose=False, breadcrumbs=[]):
if verbose:
lxprint('git-dep: checking if "{}" requires updating (depth: {})...'.format(root_path, depth))
# If the directory isn't a valid git repo, initialization is required
git_dir_cmd = subprocess.Popen(["git", "rev-parse", "--show-toplevel"],
cwd=root_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(git_stdout, _) = git_dir_cmd.communicate()
if git_dir_cmd.wait() != 0:
if verbose:
lxprint('git-dep: missing git directory, starting update...')
return True
git_dir = git_stdout.decode().strip()
if git_dir in breadcrumbs:
if verbose:
lxprint('git-dep: root path {} is not in git path'.format(root_path))
return True
breadcrumbs.append(git_dir)
if not os.path.exists(git_dir + os.path.sep + '.git'):
if verbose:
lxprint('git-dep: .git not found in "{}"'.format(git_dir))
return True
# If there are no submodules, no initialization needs to be done
if not os.path.isfile(git_dir + os.path.sep + '.gitmodules'):
if verbose:
lxprint('git-dep: .gitmodules not found in "{}", so not updating'.format(git_dir))
return False
# Loop through the gitmodules to check all submodules
gitmodules = open(git_dir + os.path.sep + '.gitmodules', 'r')
for line in gitmodules:
parts = line.split("=", 2)
if parts[0].strip() == "path":
path = parts[1].strip()
if check_module_recursive(git_dir + os.path.sep + path, depth + 1, verbose=verbose, breadcrumbs=breadcrumbs):
return True
return False
# Determine whether we need to invoke "git submodules init --recurse"
def check_submodules(script_path, args):
if check_module_recursive(script_path, 0, verbose=args.lx_verbose):
if not args.lx_quiet:
lxprint("Missing git submodules -- updating")
lxprint("To ignore git issues, re-run with --lx-ignore-git")
subprocess.Popen(["git", "submodule", "update",
"--init", "--recursive"], cwd=script_path).wait()
elif args.lx_verbose:
if not args.lx_quiet:
lxprint("Submodule check: Submodules found")
def lx_git(cmd, *args):
import subprocess
git_cmd = ["git", cmd]
if args is not None:
for arg in args:
git_cmd = git_cmd + [arg]
subprocess.call(git_cmd)
def lx_print_deps():
lxprint('Supported dependencies:')
for dep in dependency_checkers.keys():
lxprint(' {}'.format(dep))
lxprint('To define a dependency, add a variable inside {} at the top level called LX_DEPENDENCIES and assign it a list or tuple.'.format(sys.argv[0]))
lxprint('For example:')
lxprint('LX_DEPENDENCIES = ("riscv", "vivado")')
def lx_main(args):
if args.lx_print_env:
fixup_env(script_path, args)
elif args.lx_print_deps:
lx_print_deps()
elif args.lx_run is not None:
script_name=args.lx_run[0]
config = read_configuration(script_name, args)
fixup_env(script_path, args)
if not config['skip-git']:
check_submodules(script_path, args)
try:
sys.exit(subprocess.Popen(
[sys.executable] + [script_name] + args.lx_run[1:]).wait())
except:
sys.exit(1)
elif args.init:
if args.main is None:
main_name = os.getcwd().split(os.path.sep)[-1] + '.py'
new_main_name = input('lxbuildenv: What would you like your main program to be called? [' + main_name + '] ')
if new_main_name is not None and new_main_name != "":
main_name = new_main_name
else:
main_name = args.main
if not main_name.endswith('.py'):
main_name = main_name + '.py'
if args.no_git:
lxprint("skipping git initialization")
else:
if not os.path.exists(DEPS_DIR):
os.mkdir(DEPS_DIR)
if not os.path.exists(".git"):
lxprint("initializing git repository")
lx_git('init')
else:
lxprint("using existing git repository")
lx_git('add', str(__file__))
for dep_name, dep_url in DEFAULT_DEPS.items():
dest_path = '{}{}{}'.format(DEPS_DIR, '/', dep_name)
if not os.path.exists(dest_path):
lx_git('submodule', 'add', dep_url, dest_path)
lx_git('add', dest_path)
lx_git('submodule', 'update', '--init', '--recursive')
if args.no_bin:
lxprint("skipping bin/ initialization")
elif os.path.exists("bin"):
lxprint("bin/ directory exists -- remove bin/ directory to re-initialize")
else:
bin_tools = {
'mkmscimg': 'litex.soc.software.mkmscimg',
'litex_term': 'litex.tools.litex_term',
'litex_server': 'litex.tools.litex_server',
'litex_sim': 'litex.tools.litex_sim',
'litex_read_verilog': 'litex.tools.litex_read_verilog',
'litex_simple': 'litex.boards.targets.simple',
}
bin_template = """#!/usr/bin/env python3
import sys
import os
# This script lives in the "bin" directory, but uses a helper script in the parent
# directory. Obtain the current path so we can get the absolute parent path.
script_path = os.path.dirname(os.path.realpath(
__file__)) + os.path.sep + os.path.pardir + os.path.sep
sys.path.insert(0, script_path)
import lxbuildenv
"""
lxprint("Creating binaries")
os.mkdir("bin")
for bin_name, python_module in bin_tools.items():
with open('bin' + os.path.sep + bin_name, 'w', newline='\n') as new_bin:
new_bin.write(bin_template)
new_bin.write('from ' + python_module + ' import main\n')
new_bin.write('main()\n')
import stat
os.chmod('bin' + os.path.sep + bin_name, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if not args.no_git:
lx_git('add', '--chmod=+x', 'bin' + os.path.sep + bin_name)
if os.path.exists(main_name):
lxprint("skipping creation of {}: file exists".format(main_name))
else:
lxprint("creating main program {}".format(main_name))
with open(main_name, 'w') as m:
program_template = """#!/usr/bin/env python3
# This variable defines all the external programs that this module
# relies on. lxbuildenv reads this variable in order to ensure
# the build will finish without exiting due to missing third-party
# programs.
LX_DEPENDENCIES = ["riscv", "vivado"]
# Import lxbuildenv to integrate the deps/ directory
import lxbuildenv
# Disable pylint's E1101, which breaks completely on migen
#pylint:disable=E1101
from migen import *
from litex.build.xilinx import VivadoProgrammer, XilinxPlatform
from litex.build.generic_platform import Pins, IOStandard
from litex.soc.integration import SoCSDRAM
from litex.soc.integration.builder import Builder
from litex.soc.integration.soc_core import csr_map_update
_io = [
("clk50", 0, Pins("J19"), IOStandard("LVCMOS33")),
]
class Platform(XilinxPlatform):
def __init__(self, toolchain="vivado", programmer="vivado", part="35"):
part = "xc7a" + part + "t-fgg484-2"
def create_programmer(self):
if self.programmer == "vivado":
return VivadoProgrammer(flash_part="n25q128-3.3v-spi-x1_x2_x4")
else:
raise ValueError("{} programmer is not supported"
.format(self.programmer))
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
class BaseSoC(SoCSDRAM):
csr_peripherals = [
"ddrphy",
# "dna",
"xadc",
"cpu_or_bridge",
]
csr_map_update(SoCSDRAM.csr_map, csr_peripherals)
def __init__(self, platform, **kwargs):
clk_freq = int(100e6)
def main():
platform = Platform()
soc = BaseSoC(platform)
builder = Builder(soc, output_dir="build", csr_csv="test/csr.csv")
vns = builder.build()
soc.do_exit(vns)
if __name__ == "__main__":
main()
"""
m.write(program_template)
if not args.no_git:
lx_git("add", main_name)
else:
return False
return True
# For the main command, parse args and hand it off to main()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Wrap Python code to enable quickstart",
add_help=False)
parser.add_argument(
"-h", "--help", '--lx-help', help="show this help message and exit", action="help"
)
parser.add_argument(
'-i', '--init', '--lx-init', help='initialize a new project', action="store_true"
)
parser.add_argument(
'-m', '--main', '--lx-main', help='name of main project'
)
parser.add_argument(
'--no-bin', '--lx-no-bin', help="don't create a bin/ directory"
)
parser.add_argument(
'--no-git', '--lx-no-git', help="Don't create a git repository"
)
parser.add_argument(
'-e', '--print-env', '--lx-print-env', dest="lx_print_env", help="print environment variable listing for pycharm, vscode, or bash", action="store_true"
)
parser.add_argument(
'-d', '--print-deps', '--lx-print-deps', dest="lx_print_deps", help="print all possible dependencies and then exit", action="store_true"
)
parser.add_argument(
"--lx-verbose", help="increase verboseness of some processes", action="store_true"
)
parser.add_argument(
'-r', '--run', '--lx-run', dest='lx_run', help="run the given script under lxbuildenv", nargs=argparse.REMAINDER
)
args = parser.parse_args()
if not lx_main(args):
parser.print_help()
elif not os.path.isfile(sys.argv[0]):
lxprint("lxbuildenv doesn't operate while in interactive mode")
elif "LXBUILDENV_REEXEC" not in os.environ:
parser = argparse.ArgumentParser(
description="Wrap Python code to enable quickstart",
add_help=False)
parser.add_argument(
"--lx-verbose", help="increase verboseness of some processes", action="store_true"
)
parser.add_argument(
"--lx-quiet", help="decrease verboseness of some processes", action="store_true"
)
parser.add_argument(
"--lx-print-env", help="print environment variable listing for pycharm, vscode, or bash", action="store_true"
)
parser.add_argument(
"--lx-check-deps", help="check build environment for dependencies such as compiler and fpga tools and then exit", action="store_true"
)
parser.add_argument(
"--lx-print-deps", help="print all possible dependencies and then exit", action="store_true"
)
parser.add_argument(
"--lx-help", action="help"
)
parser.add_argument(
"--lx-ignore-deps", help="try building even if dependencies are missing", action="store_true"
)
parser.add_argument(
"--lx-ignore-git", help="don't do a check of the git repo", action="store_true"
)
parser.add_argument(
"--lx-check-git", help="force a git check even if it's otherwise disabled", action="store_true"
)
(args, rest) = parser.parse_known_args()
if not args.lx_quiet:
lxprint("v{} (run {} --lx-help for help)".format(LXBUILDENV_VERSION, sys.argv[0]))
if args.lx_print_deps:
lx_print_deps()
sys.exit(0)
config = read_configuration(sys.argv[0], args)
deps = config['dependencies']
fixup_env(script_path, args)
check_dependencies(args, deps)
if args.lx_check_git:
check_submodules(script_path, args)
elif config['skip-git']:
if not args.lx_quiet:
lxprint('Skipping git configuration because "skip-git" was found in LX_CONFIGURATION')
lxprint('To fetch from git, run {} --lx-check-git'.format(" ".join(sys.argv)))
elif args.lx_ignore_git:
if not args.lx_quiet:
lxprint('Skipping git configuration because "--lx-ignore-git" Was specified')
else:
check_submodules(script_path, args)
try:
sys.exit(subprocess.Popen(
[sys.executable] + [sys.argv[0]] + rest).wait())
except:
sys.exit(1)
else:
# Overwrite the deps directory.
# Because we're running with a predefined PYTHONPATH, you'd think that
# the DEPS_DIR would be first.
# Unfortunately, setuptools causes the sitewide packages to take precedence
# over the PYTHONPATH variable.
# Work around this bug by inserting paths into the first index.
for path in get_python_path(script_path, None):
sys.path.insert(0, path)
| 38.277325
| 159
| 0.622954
|
7d9d9e3b97ff5f1809e7f55be3c5665bf1ca28e0
| 1,869
|
py
|
Python
|
tools/harness-automation/cases_R140/ed_6_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 2,962
|
2016-05-11T15:06:06.000Z
|
2022-03-27T20:06:16.000Z
|
tools/harness-automation/cases_R140/ed_6_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 5,899
|
2016-05-11T19:21:49.000Z
|
2022-03-31T18:17:20.000Z
|
tools/harness-automation/cases_R140/ed_6_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 1,113
|
2016-05-11T15:37:42.000Z
|
2022-03-31T09:37:04.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class ED_6_1_6(HarnessCase):
role = HarnessCase.ROLE_ED
case = '6 1 6'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| 40.630435
| 77
| 0.769395
|
ad2480c80d071a86a442b037bb0843f01bee1126
| 97
|
py
|
Python
|
bench/bench_long_empty_string.py
|
janaknat/markupsafe
|
770b029aac97da76d306a45a6f33998393a0835a
|
[
"BSD-3-Clause"
] | 415
|
2016-03-29T13:11:21.000Z
|
2022-03-30T16:25:27.000Z
|
bench/bench_long_empty_string.py
|
janaknat/markupsafe
|
770b029aac97da76d306a45a6f33998393a0835a
|
[
"BSD-3-Clause"
] | 116
|
2016-03-31T15:09:01.000Z
|
2022-03-18T01:46:10.000Z
|
bench/bench_long_empty_string.py
|
janaknat/markupsafe
|
770b029aac97da76d306a45a6f33998393a0835a
|
[
"BSD-3-Clause"
] | 124
|
2016-03-29T15:55:23.000Z
|
2022-03-24T23:16:04.000Z
|
from markupsafe import escape
def run():
string = "Hello World!" * 1000
escape(string)
| 13.857143
| 34
| 0.659794
|
82cf720edafd07719a19f852b78640086064242a
| 6,180
|
py
|
Python
|
p1_navigation/dqn_agent.py
|
aamonten/deep-reinforcement-learning
|
201969ea1cc829e57a1ccb79123279e6e4c44ca7
|
[
"MIT"
] | 1
|
2021-11-21T19:09:10.000Z
|
2021-11-21T19:09:10.000Z
|
p1_navigation/dqn_agent.py
|
aamonten/deep-reinforcement-learning
|
201969ea1cc829e57a1ccb79123279e6e4c44ca7
|
[
"MIT"
] | null | null | null |
p1_navigation/dqn_agent.py
|
aamonten/deep-reinforcement-learning
|
201969ea1cc829e57a1ccb79123279e6e4c44ca7
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
# max predicted
q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# q targets
q_targets = rewards + (gamma * q_targets_next * (1 - dones))
q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(q_expected, q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| 39.363057
| 127
| 0.620227
|
602d20dc9145d9f0d1361da1ac3d1ff086e48546
| 1,670
|
py
|
Python
|
checkerboard.py
|
balajirama/checkerboard
|
7882d604e90ab93914a4c8af798e5c4d95894251
|
[
"MIT"
] | null | null | null |
checkerboard.py
|
balajirama/checkerboard
|
7882d604e90ab93914a4c8af798e5c4d95894251
|
[
"MIT"
] | null | null | null |
checkerboard.py
|
balajirama/checkerboard
|
7882d604e90ab93914a4c8af798e5c4d95894251
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect, session
from colour import Color
app = Flask(__name__)
app.secret_key = 'darksecret'
@app.route("/")
def checker_8_by_8():
return render_template("checker.html", num_rows=8, num_cols=8, colors=["salmon", "black"])
@app.route("/<x>")
def checker_x_by_8(x):
try:
return render_template("checker.html", num_rows=int(x), num_cols=8,colors=["salmon", "black"])
except:
return checker_8_by_8()
@app.route("/<x>/<y>")
def checker_x_by_y(x,y):
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=["salmon", "black"])
except:
return checker_x_by_8(x)
def is_valid_color(str):
try:
c = Color(str)
return True
except:
return False
@app.route("/<x>/<y>/<color1>")
def checker_with_light_color(x,y,color1):
if color1 == "black":
color1="salmon"
if not is_valid_color(color1):
color1="salmon"
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=[color1,"black"])
except:
return checker_x_by_8(x)
@app.route("/<x>/<y>/<color1>/<color2>")
def checker_with_colors(x,y,color1,color2):
if not is_valid_color(color1):
color1="salmon"
if not is_valid_color(color2):
color2="black"
if color1 == color2:
color2 = "black"
if color1 == "black":
color1 = "salmon"
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=[color1,color2])
except:
return checker_x_by_8(x)
if __name__ == '__main__':
app.run(debug=True)
| 27.377049
| 107
| 0.634731
|
4604248dbc9e7b356cd820735a1f0eb4a36817fe
| 10,258
|
py
|
Python
|
hsdata/utils.py
|
youfou/hsdata
|
49866116eaeb78516829da59df59a2c0b1141c08
|
[
"Apache-2.0"
] | 115
|
2016-10-20T10:03:48.000Z
|
2022-01-17T04:18:06.000Z
|
hsdata/utils.py
|
youfou/hs_data
|
49866116eaeb78516829da59df59a2c0b1141c08
|
[
"Apache-2.0"
] | 3
|
2016-10-25T14:07:57.000Z
|
2020-03-26T10:35:53.000Z
|
hsdata/utils.py
|
youfou/hs_data
|
49866116eaeb78516829da59df59a2c0b1141c08
|
[
"Apache-2.0"
] | 27
|
2016-10-26T05:31:54.000Z
|
2022-03-17T10:31:52.000Z
|
#!/usr/bin/env python3
# coding: utf-8
"""
一些实用的小功能
"""
import csv
import logging
import os
from collections import Counter
from datetime import datetime, timedelta
from .core import (
MODE_STANDARD,
Decks,
days_ago,
Career, CAREERS, Cards)
from .hearthstats import HearthStatsDecks
from .hsbox import HSBoxDecks
def diff_decks(*decks):
"""
卡组对比
:param decks: 两个或以上的卡组
:return: 返回每个卡组特有的部分
"""
intersection = decks[0].cards & decks[1].cards
for deck in decks[2:]:
intersection &= deck.cards
differs = dict(intersection=intersection)
for deck in decks:
differs[deck] = deck.cards - intersection
return differs
def decks_expired(decks, expired=timedelta(days=1)):
"""
检查 Decks 是否已过期
:param decks: Decks 对象
:param expired: 有效期长度
:return: 若已过期则返回True
"""
if os.path.isfile(decks.json_path):
m_time = os.path.getmtime(decks.json_path)
if datetime.fromtimestamp(m_time) < (datetime.today() - expired):
return True
else:
return False
return True
def cards_value(decks, mode=MODE_STANDARD):
"""
区分职业的单卡价值排名,可在纠结是否合成或拆解时作为参考
decks: 所在卡组数量
games: 所在卡组游戏次数总和
wins: 所在卡组获胜次数总和
win_rate: 所在卡组平均胜率 (wins/games)
*_rank: 在当前职业所有卡牌中的 * 排名
*_rank%: 在当前职业所有卡牌中的 * 排名百分比 (排名/卡牌数)
:param decks: 卡组合集,作为分析数据源
:param mode: 模式
:return: 单卡价值排名数据
"""
if not isinstance(decks, Decks):
raise TypeError('from_decks 须为 Decks 对象')
total = 'total'
ranked_keys = 'decks', 'games', 'wins', 'win_rate'
rpf = '_rank'
ppf = '%'
stats = dict()
stats[total] = dict()
for deck in decks.search(mode=mode):
career = deck.career
if career not in stats:
stats[career] = dict()
for card, count in deck.cards.items():
for k in total, career:
if card not in stats[k]:
stats[k][card] = dict(
decks=0, games=0, wins=0, count=0)
stats[k][card]['decks'] += 1
stats[k][card]['games'] += deck.games or 0
stats[k][card]['wins'] += deck.wins or 0
stats[k][card]['count'] += count
for k in stats:
for c in stats[k]:
try:
stats[k][c]['win_rate'] = stats[k][c]['wins'] / stats[k][c]['games']
except ZeroDivisionError:
stats[k][c]['win_rate'] = None
stats[k][c]['avg_count'] = stats[k][c]['count'] / stats[k][c]['decks']
rkvl = dict()
for k in stats:
if k not in rkvl:
rkvl[k] = dict()
for rk in ranked_keys:
vl = [s[rk] for c, s in stats[k].items()]
vl = list(filter(lambda x: x, vl))
vl.sort(reverse=True)
rkvl[k][rk] = vl
for k in stats:
for c in stats[k]:
for rk in ranked_keys:
if stats[k][c][rk]:
rank = rkvl[k][rk].index(stats[k][c][rk]) + 1
stats[k][c][rk + rpf] = rank
stats[k][c][rk + rpf + ppf] = rank / len(stats[k])
else:
stats[k][c][rk + rpf] = None
stats[k][c][rk + ppf] = None
return stats
def get_all_decks(
hsn_email=None, hsn_password=None,
hsn_min_games=300, hsn_created_after=days_ago(30),
expired=timedelta(days=1)
):
"""
获得获取所有卡组数据
:param hsn_email: Hearthstats 的登陆邮箱
:param hsn_password: Hearthstats 的登陆密码
:param hsn_min_games: Hearthstats 的搜索参数 最少游戏次数
:param hsn_created_after: Hearthstats 最早更新时间
:param expired: 过期时间,若载入的数据是次时间前获得的,则重新获取新数据
:return: 返回 Decks 对象,包含所有数据源的卡组
"""
decks = Decks()
hsb = HSBoxDecks()
if decks_expired(hsb, expired):
hsb.update()
decks.extend(hsb)
if hsn_email and hsn_password:
hsn = HearthStatsDecks()
if decks_expired(hsn, expired):
hsn.login(hsn_email, hsn_password)
hsn.search_online(min_games=hsn_min_games, created_after=hsn_created_after)
decks.extend(hsn)
return decks
class DeckGenerator:
def __init__(
self,
career, decks,
include=None, exclude=None,
mode=MODE_STANDARD):
"""
通过若干包含游戏次数和胜率的卡组合集,找出其中高价值的卡牌,生成新的卡组(.cards)
:param career: 指定职业
:param decks: 来源卡组合集
:param include: 生成的新卡组中将包含这些卡,应为 dict 对象,key为卡牌,value为数量
:param exclude: 生成的新卡组中将排除这些卡,应为 dict 对象,key为卡牌,value为数量
:param mode: 指定模式
"""
self._career = None
self.cards_stats = None
self.top_decks = None
self.career = career
if decks and not isinstance(decks, list):
raise TypeError('decks 应为 list')
self.decks = decks or list()
if include and not isinstance(include, dict):
raise TypeError('include 应为 dict')
self.include = include or Counter()
if exclude and not isinstance(exclude, dict):
raise TypeError('exclude 应为 dict')
self.exclude = exclude or Counter()
self.mode = mode
self.top_decks_total_games = None
self._gen_cards_stats()
@property
def cards(self):
cards = Counter(self.include)
exclude = Counter(self.exclude)
for card, stats in self.cards_stats:
count = 2 if stats['avg_count'] > 1.5 else 1
if cards.get(card, 0) > count:
count = 1 if card.rarity == 'LEGENDARY' else 2
if card in exclude:
count -= exclude.get(card)
if count < 1:
logging.info('排除卡牌: {}'.format(card.name))
continue
games_percentage = stats['total_games'] / self.top_decks_total_games
if card not in self.include and games_percentage < 0.1:
logging.info('排除冷门卡牌: {} (使用率 {:.2%})'.format(
card.name, games_percentage))
continue
cards[card] = count
cards_count = sum(list(cards.values()))
if cards_count == 30:
break
elif cards_count > 30:
cards.subtract([card])
break
total_count = sum(cards.values())
if total_count < 30:
logging.warning('推荐卡牌数量不足,仅为 {} 张!'.format(total_count))
return Counter(dict(filter(lambda x: x[1] > 0, cards.items())))
@property
def career(self):
return self._career
@career.setter
# TODO: 考虑做成公共的
def career(self, value):
if not value:
raise ValueError('career 不可为空')
if isinstance(value, Career):
career = value
elif isinstance(value, str):
career = CAREERS.search(value)
else:
raise TypeError('career 不支持 {} 类型的数值'.format(type(value).__name__))
if career in (CAREERS.get('NEUTRAL'), CAREERS.get('DREAM')):
raise ValueError('不能为该职业: {}'.format(career.name))
if not career:
raise ValueError('未找到该职业: {}'.format(value))
self._career = career
logging.info('设置职业为: {}'.format(career.name))
def __setattr__(self, key, value):
super(DeckGenerator, self).__setattr__(key, value)
if key in ('career', 'decks', 'mode') and self.cards_stats:
self._gen_cards_stats()
def _gen_cards_stats(self):
decks = list(filter(lambda x: x.games, self.decks))
self.decks = Decks(decks)
cards_stats, self.top_decks = self.decks.career_cards_stats(
career=self.career, mode=self.mode, top_win_rate_percentage=0.1)
self.top_decks_total_games = sum(map(lambda x: x.games, self.top_decks))
self.cards_stats = list(cards_stats.items())
self.cards_stats.sort(key=lambda x: x[1]['avg_win_rate'], reverse=True)
def add_include(self, card, count=1):
self.include.update({card: count})
def add_exclude(self, card, count=1):
self.exclude.update({card: count})
def remove_include(self, card, count=1):
self.include.subtract({card: count})
def remove_exclude(self, card, count=1):
self.exclude.subtract({card: count})
def print_cards(cards, return_text_only=False, sep=' ', rarity=True):
"""
但法力值从小到大打印卡牌列表
:param cards: 卡牌 list 或 Counter
:param return_text_only: 选项,仅返回文本
:param sep: 卡牌名称和数量之间的分隔符
"""
if isinstance(cards, list):
cards = Counter(cards)
elif not isinstance(cards, Counter):
raise TypeError('cards 参数应为 list 或 Counter 类型')
cards = list(cards.items())
cards.sort(key=lambda x: x[0].name)
cards.sort(key=lambda x: x[0].cost or 0)
text = list()
for card, count in cards:
line = '{}{}{}'.format(card.name, sep, count)
if rarity and card.rarity not in ('FREE', 'COMMON'):
line = '({}) {}'.format(card.rarity[0], line)
text.append(line)
text = '\n'.join(text)
if return_text_only:
return text
else:
print(text)
def cards_to_csv(save_path, cards=None):
"""
将卡牌保存为 CSV 文件,方便使用 Excel 等工具进行分析
:param cards: cards 对象
:param save_path: 保存路径,例如 cards.csv
"""
if cards is None:
cards = Cards()
# 仅列出相对常用的字段
fields = [
'id', 'name', 'text', 'cost', 'overload', 'type', 'race',
'careers', 'multiClassGroup', 'set', 'collectible',
'rarity', 'dust', 'howToEarn', 'howToEarnGolden',
'health', 'attack', 'durability', 'spellDamage',
]
with open(save_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(fields)
for card in cards:
row = list()
for field in fields:
field = getattr(card, field)
if isinstance(field, (list, tuple, set)):
field = ', '.join(list(map(str, field)))
elif isinstance(field, type(None)):
field = ''
elif not isinstance(field, (str, int, float)):
field = str(field)
row.append(field)
writer.writerow(row)
| 28.181319
| 87
| 0.56629
|
667c019dc0058486f74704eeb9eb88e840a157dc
| 40,260
|
py
|
Python
|
tests/cfngin/providers/aws/test_default.py
|
animaxcg/runway
|
dfe38107126b61a7f297874f235f68a045a5ca09
|
[
"Apache-2.0"
] | null | null | null |
tests/cfngin/providers/aws/test_default.py
|
animaxcg/runway
|
dfe38107126b61a7f297874f235f68a045a5ca09
|
[
"Apache-2.0"
] | null | null | null |
tests/cfngin/providers/aws/test_default.py
|
animaxcg/runway
|
dfe38107126b61a7f297874f235f68a045a5ca09
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for runway.cfngin.providers.aws.default."""
# pylint: disable=too-many-lines
import copy
import os.path
import random
import string
import threading
import unittest
from datetime import datetime
import boto3
from botocore.exceptions import ClientError, UnStubbedResponseError
from botocore.stub import Stubber
from mock import MagicMock, patch
from runway.cfngin import exceptions
from runway.cfngin.actions.diff import DictValue
from runway.cfngin.providers.aws import default
from runway.cfngin.providers.aws.default import (DEFAULT_CAPABILITIES,
MAX_TAIL_RETRIES, Provider,
ask_for_approval,
create_change_set,
generate_cloudformation_args,
output_full_changeset,
requires_replacement,
summarize_params_diff,
wait_till_change_set_complete)
from runway.cfngin.providers.base import Template
from runway.cfngin.session_cache import get_session
from runway.cfngin.stack import Stack
def random_string(length=12):
"""Return a random string of variable length.
Args:
length (int): The # of characters to use in the random string.
Returns:
str: The random string.
"""
return ''.join(
[random.choice(string.ascii_letters) for _ in range(length)])
def generate_describe_stacks_stack(stack_name,
creation_time=None,
stack_status="CREATE_COMPLETE",
tags=None):
"""Generate describe stacks stack."""
tags = tags or []
return {
"StackName": stack_name,
"StackId": stack_name,
"CreationTime": creation_time or datetime(2015, 1, 1),
"StackStatus": stack_status,
"Tags": tags
}
def generate_get_template(file_name='cfn_template.json',
stages_available=None):
"""Generate get template."""
fixture_dir = os.path.join(os.path.dirname(__file__), '../../fixtures')
with open(os.path.join(fixture_dir, file_name), 'r') as _file:
return {
"StagesAvailable": stages_available or ['Original'],
"TemplateBody": _file.read()
}
def generate_stack_object(stack_name, outputs=None):
"""Generate stack object."""
mock_stack = MagicMock(['name', 'fqn', 'blueprint'])
if not outputs:
outputs = {
"FakeOutput": {
"Value": {"Ref": "FakeResource"}
}
}
mock_stack.name = stack_name
mock_stack.fqn = stack_name
mock_stack.blueprint = MagicMock(['get_output_definitions'])
mock_stack.blueprint.get_output_definitions = MagicMock(
return_value=outputs
)
return mock_stack
def generate_resource_change(replacement=True):
"""Generate resource change."""
resource_change = {
"Action": "Modify",
"Details": [],
"LogicalResourceId": "Fake",
"PhysicalResourceId": "arn:aws:fake",
"Replacement": "True" if replacement else "False",
"ResourceType": "AWS::Fake",
"Scope": ["Properties"],
}
return {
"ResourceChange": resource_change,
"Type": "Resource",
}
def generate_change_set_response(status, execution_status="AVAILABLE",
changes=None, status_reason="FAKE"):
"""Generate change set response."""
return {
"ChangeSetName": "string",
"ChangeSetId": "string",
"StackId": "string",
"StackName": "string",
"Description": "string",
"Parameters": [
{
"ParameterKey": "string",
"ParameterValue": "string",
"UsePreviousValue": False
},
],
"CreationTime": datetime(2015, 1, 1),
"ExecutionStatus": execution_status,
"Status": status,
"StatusReason": status_reason,
"NotificationARNs": [
"string",
],
"Capabilities": [
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND"
],
"Tags": [
{
"Key": "string",
"Value": "string"
},
],
"Changes": changes or [],
"NextToken": "string"
}
def generate_change(action="Modify", resource_type="EC2::Instance",
replacement="False", requires_recreation="Never"):
"""Generate a minimal change for a changeset."""
return {
"Type": "Resource",
"ResourceChange": {
"Action": action,
"LogicalResourceId": random_string(),
"PhysicalResourceId": random_string(),
"ResourceType": resource_type,
"Replacement": replacement,
"Scope": ["Properties"],
"Details": [
{
"Target": {
"Attribute": "Properties",
"Name": random_string(),
"RequiresRecreation": requires_recreation
},
"Evaluation": "Static",
"ChangeSource": "ResourceReference",
"CausingEntity": random_string()
},
]
}
}
class TestMethods(unittest.TestCase):
"""Tests for runway.cfngin.providers.aws.default."""
def setUp(self):
"""Run before tests."""
self.cfn = boto3.client("cloudformation")
self.stubber = Stubber(self.cfn)
def test_requires_replacement(self):
"""Test requires replacement."""
changeset = [
generate_resource_change(),
generate_resource_change(replacement=False),
generate_resource_change(),
]
replacement = requires_replacement(changeset)
self.assertEqual(len(replacement), 2)
for resource in replacement:
self.assertEqual(resource["ResourceChange"]["Replacement"], "True")
def test_summarize_params_diff(self):
"""Test summarize params diff."""
unmodified_param = DictValue("ParamA", "new-param-value",
"new-param-value")
modified_param = DictValue("ParamB", "param-b-old-value",
"param-b-new-value-delta")
added_param = DictValue("ParamC", None, "param-c-new-value")
removed_param = DictValue("ParamD", "param-d-old-value", None)
params_diff = [
unmodified_param,
modified_param,
added_param,
removed_param,
]
self.assertEqual(summarize_params_diff([]), "")
self.assertEqual(summarize_params_diff(params_diff), '\n'.join([
"Parameters Added: ParamC",
"Parameters Removed: ParamD",
"Parameters Modified: ParamB\n",
]))
only_modified_params_diff = [modified_param]
self.assertEqual(summarize_params_diff(only_modified_params_diff),
"Parameters Modified: ParamB\n")
only_added_params_diff = [added_param]
self.assertEqual(summarize_params_diff(only_added_params_diff),
"Parameters Added: ParamC\n")
only_removed_params_diff = [removed_param]
self.assertEqual(summarize_params_diff(only_removed_params_diff),
"Parameters Removed: ParamD\n")
def test_ask_for_approval(self):
"""Test ask for approval."""
get_input_path = "runway.cfngin.ui.get_raw_input"
with patch(get_input_path, return_value="y"):
self.assertIsNone(ask_for_approval([], [], None))
for v in ("n", "N", "x", "\n"):
with patch(get_input_path, return_value=v):
with self.assertRaises(exceptions.CancelExecution):
ask_for_approval([], [])
with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input:
with patch(
'runway.cfngin.providers.aws.default.output_full_changeset'
) as mock_full_changeset:
with self.assertRaises(exceptions.CancelExecution):
ask_for_approval([], [], True)
self.assertEqual(mock_full_changeset.call_count, 1)
self.assertEqual(mock_get_input.call_count, 2)
def test_ask_for_approval_with_params_diff(self):
"""Test ask for approval with params diff."""
get_input_path = "runway.cfngin.ui.get_raw_input"
params_diff = [
DictValue('ParamA', None, 'new-param-value'),
DictValue('ParamB', 'param-b-old-value', 'param-b-new-value-delta')
]
with patch(get_input_path, return_value="y"):
self.assertIsNone(ask_for_approval([], params_diff, None))
for v in ("n", "N", "x", "\n"):
with patch(get_input_path, return_value=v):
with self.assertRaises(exceptions.CancelExecution):
ask_for_approval([], params_diff)
with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input:
with patch(
'runway.cfngin.providers.aws.default.output_full_changeset'
) as mock_full_changeset:
with self.assertRaises(exceptions.CancelExecution):
ask_for_approval([], params_diff, True)
self.assertEqual(mock_full_changeset.call_count, 1)
self.assertEqual(mock_get_input.call_count, 2)
@patch("runway.cfngin.providers.aws.default.format_params_diff")
@patch('runway.cfngin.providers.aws.default.yaml.safe_dump')
def test_output_full_changeset(self, mock_safe_dump, patched_format):
"""Test output full changeset."""
get_input_path = "runway.cfngin.ui.get_raw_input"
safe_dump_counter = 0
for v in ['y', 'v', 'Y', 'V']:
with patch(get_input_path, return_value=v) as prompt:
self.assertIsNone(output_full_changeset(full_changeset=[],
params_diff=[],
fqn=None))
self.assertEqual(prompt.call_count, 1)
safe_dump_counter += 1
self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)
self.assertEqual(patched_format.call_count, 0)
for v in ['n', 'N']:
with patch(get_input_path, return_value=v) as prompt:
output_full_changeset(full_changeset=[], params_diff=[],
answer=None, fqn=None)
self.assertEqual(prompt.call_count, 1)
self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)
self.assertEqual(patched_format.call_count, 0)
with self.assertRaises(exceptions.CancelExecution):
output_full_changeset(full_changeset=[], params_diff=[],
answer='x', fqn=None)
output_full_changeset(full_changeset=[], params_diff=['mock'],
answer='y', fqn=None)
safe_dump_counter += 1
self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)
self.assertEqual(patched_format.call_count, 1)
def test_wait_till_change_set_complete_success(self):
"""Test wait till change set complete success."""
self.stubber.add_response(
"describe_change_set",
generate_change_set_response("CREATE_COMPLETE")
)
with self.stubber:
wait_till_change_set_complete(self.cfn, "FAKEID")
self.stubber.add_response(
"describe_change_set",
generate_change_set_response("FAILED")
)
with self.stubber:
wait_till_change_set_complete(self.cfn, "FAKEID")
def test_wait_till_change_set_complete_failed(self):
"""Test wait till change set complete failed."""
# Need 2 responses for try_count
for _ in range(2):
self.stubber.add_response(
"describe_change_set",
generate_change_set_response("CREATE_PENDING")
)
with self.stubber:
with self.assertRaises(exceptions.ChangesetDidNotStabilize):
wait_till_change_set_complete(self.cfn, "FAKEID", try_count=2,
sleep_time=.1)
def test_create_change_set_stack_did_not_change(self):
"""Test create change set stack did not change."""
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
"FAILED", status_reason="Stack didn't contain changes."
)
)
self.stubber.add_response(
"delete_change_set",
{},
expected_params={"ChangeSetName": "CHANGESETID"}
)
with self.stubber:
with self.assertRaises(exceptions.StackDidNotChange):
create_change_set(
cfn_client=self.cfn, fqn="my-fake-stack",
template=Template(url="http://fake.template.url.com/"),
parameters=[], tags=[]
)
def test_create_change_set_unhandled_failed_status(self):
"""Test create change set unhandled failed status."""
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
"FAILED", status_reason="Some random bad thing."
)
)
with self.stubber:
with self.assertRaises(exceptions.UnhandledChangeSetStatus):
create_change_set(
cfn_client=self.cfn, fqn="my-fake-stack",
template=Template(url="http://fake.template.url.com/"),
parameters=[], tags=[]
)
def test_create_change_set_bad_execution_status(self):
"""Test create change set bad execution status."""
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="UNAVAILABLE",
)
)
with self.stubber:
with self.assertRaises(exceptions.UnableToExecuteChangeSet):
create_change_set(
cfn_client=self.cfn, fqn="my-fake-stack",
template=Template(url="http://fake.template.url.com/"),
parameters=[], tags=[]
)
def test_generate_cloudformation_args(self):
"""Test generate cloudformation args."""
stack_name = "mystack"
template_url = "http://fake.s3url.com/blah.json"
template_body = '{"fake_body": "woot"}'
std_args = {
"stack_name": stack_name, "parameters": [], "tags": [],
"template": Template(url=template_url)}
std_return = {"StackName": stack_name, "Parameters": [], "Tags": [],
"Capabilities": DEFAULT_CAPABILITIES,
"TemplateURL": template_url}
result = generate_cloudformation_args(**std_args)
self.assertEqual(result, std_return)
result = generate_cloudformation_args(service_role="FakeRole",
**std_args)
service_role_result = copy.deepcopy(std_return)
service_role_result["RoleARN"] = "FakeRole"
self.assertEqual(result, service_role_result)
result = generate_cloudformation_args(change_set_name="MyChanges",
**std_args)
change_set_result = copy.deepcopy(std_return)
change_set_result["ChangeSetName"] = "MyChanges"
self.assertEqual(result, change_set_result)
# Check stack policy
stack_policy = Template(body="{}")
result = generate_cloudformation_args(stack_policy=stack_policy,
**std_args)
stack_policy_result = copy.deepcopy(std_return)
stack_policy_result["StackPolicyBody"] = "{}"
self.assertEqual(result, stack_policy_result)
# If not TemplateURL is provided, use TemplateBody
std_args["template"] = Template(body=template_body)
template_body_result = copy.deepcopy(std_return)
del template_body_result["TemplateURL"]
template_body_result["TemplateBody"] = template_body
result = generate_cloudformation_args(**std_args)
self.assertEqual(result, template_body_result)
class TestProviderDefaultMode(unittest.TestCase):
"""Tests for runway.cfngin.providers.aws.default default mode."""
def setUp(self):
"""Run before tests."""
region = "us-east-1"
self.session = get_session(region=region)
self.provider = Provider(
self.session, region=region, recreate_failed=False)
self.stubber = Stubber(self.provider.cloudformation)
def test_destroy_stack(self):
"""Test destroy stack."""
stack = {'StackName': 'MockStack'}
self.stubber.add_response('delete_stack', {}, stack)
with self.stubber:
self.assertIsNone(self.provider.destroy_stack(stack))
self.stubber.assert_no_pending_responses()
def test_get_stack_stack_does_not_exist(self):
"""Test get stack stack does not exist."""
stack_name = "MockStack"
self.stubber.add_client_error(
"describe_stacks",
service_error_code="ValidationError",
service_message="Stack with id %s does not exist" % stack_name,
expected_params={"StackName": stack_name}
)
with self.assertRaises(exceptions.StackDoesNotExist):
with self.stubber:
self.provider.get_stack(stack_name)
def test_get_stack_stack_exists(self):
"""Test get stack stack exists."""
stack_name = "MockStack"
stack_response = {
"Stacks": [generate_describe_stacks_stack(stack_name)]
}
self.stubber.add_response(
"describe_stacks",
stack_response,
expected_params={"StackName": stack_name}
)
with self.stubber:
response = self.provider.get_stack(stack_name)
self.assertEqual(response["StackName"], stack_name)
def test_select_destroy_method(self):
"""Test select destroy method."""
for i in [[{'force_interactive': False},
self.provider.noninteractive_destroy_stack],
[{'force_interactive': True},
self.provider.interactive_destroy_stack]]:
self.assertEqual(self.provider.select_destroy_method(**i[0]),
i[1])
def test_select_update_method(self):
"""Test select update method."""
for i in [[{'force_interactive': True,
'force_change_set': False},
self.provider.interactive_update_stack],
[{'force_interactive': False,
'force_change_set': False},
self.provider.default_update_stack],
[{'force_interactive': False,
'force_change_set': True},
self.provider.noninteractive_changeset_update],
[{'force_interactive': True,
'force_change_set': True},
self.provider.interactive_update_stack]]:
self.assertEqual(
self.provider.select_update_method(**i[0]),
i[1]
)
def test_prepare_stack_for_update_completed(self):
"""Test prepare stack for update completed."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="UPDATE_COMPLETE")
with self.stubber:
self.assertTrue(
self.provider.prepare_stack_for_update(stack, []))
def test_prepare_stack_for_update_in_progress(self):
"""Test prepare stack for update in progress."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="UPDATE_IN_PROGRESS")
with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:
with self.stubber:
self.provider.prepare_stack_for_update(stack, [])
self.assertIn('in-progress', str(raised.exception))
def test_prepare_stack_for_update_non_recreatable(self):
"""Test prepare stack for update non recreatable."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="REVIEW_IN_PROGRESS")
with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:
with self.stubber:
self.provider.prepare_stack_for_update(stack, [])
self.assertIn('Unsupported state', str(raised.exception))
def test_prepare_stack_for_update_disallowed(self):
"""Test prepare stack for update disallowed."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="ROLLBACK_COMPLETE")
with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:
with self.stubber:
self.provider.prepare_stack_for_update(stack, [])
self.assertIn('re-creation is disabled', str(raised.exception))
# Ensure we point out to the user how to enable re-creation
self.assertIn('--recreate-failed', str(raised.exception))
def test_prepare_stack_for_update_bad_tags(self):
"""Test prepare stack for update bad tags."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="ROLLBACK_COMPLETE")
self.provider.recreate_failed = True
with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:
with self.stubber:
self.provider.prepare_stack_for_update(
stack,
tags=[{'Key': 'cfngin_namespace', 'Value': 'test'}])
self.assertIn('tags differ', str(raised.exception).lower())
def test_prepare_stack_for_update_recreate(self):
"""Test prepare stack for update recreate."""
stack_name = "MockStack"
stack = generate_describe_stacks_stack(
stack_name, stack_status="ROLLBACK_COMPLETE")
self.stubber.add_response(
"delete_stack",
{},
expected_params={"StackName": stack_name}
)
self.provider.recreate_failed = True
with self.stubber:
self.assertFalse(
self.provider.prepare_stack_for_update(stack, []))
def test_noninteractive_changeset_update_no_stack_policy(self):
"""Test noninteractive changeset update no stack policy."""
stack_name = "MockStack"
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("execute_change_set", {})
with self.stubber:
self.provider.noninteractive_changeset_update(
fqn=stack_name,
template=Template(url="http://fake.template.url.com/"),
old_parameters=[],
parameters=[], stack_policy=None, tags=[],
)
def test_noninteractive_changeset_update_with_stack_policy(self):
"""Test noninteractive changeset update with stack policy."""
stack_name = "MockStack"
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("set_stack_policy", {})
self.stubber.add_response("execute_change_set", {})
with self.stubber:
self.provider.noninteractive_changeset_update(
fqn=stack_name,
template=Template(url="http://fake.template.url.com/"),
old_parameters=[],
parameters=[], stack_policy=Template(body="{}"), tags=[],
)
@patch('runway.cfngin.providers.aws.default.output_full_changeset')
def test_get_stack_changes_update(self, mock_output_full_cs):
"""Test get stack changes update."""
stack_name = "MockStack"
mock_stack = generate_stack_object(stack_name)
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(stack_name)]}
)
self.stubber.add_response(
'get_template',
generate_get_template('cfn_template.yaml')
)
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': stack_name}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("delete_change_set", {})
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(stack_name)]}
)
with self.stubber:
result = self.provider.get_stack_changes(
stack=mock_stack, template=Template(
url="http://fake.template.url.com/"
), parameters=[], tags=[])
mock_output_full_cs.assert_called_with(full_changeset=changes,
params_diff=[],
fqn=stack_name,
answer='y')
expected_outputs = {
'FakeOutput': '<inferred-change: MockStack.FakeOutput={}>'.format(
str({"Ref": "FakeResource"})
)
}
self.assertEqual(self.provider.get_outputs(stack_name),
expected_outputs)
self.assertEqual(result, expected_outputs)
@patch('runway.cfngin.providers.aws.default.output_full_changeset')
def test_get_stack_changes_create(self, mock_output_full_cs):
"""Test get stack changes create."""
stack_name = "MockStack"
mock_stack = generate_stack_object(stack_name)
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(
stack_name, stack_status='REVIEW_IN_PROGRESS'
)]}
)
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': stack_name}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("delete_change_set", {})
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(
stack_name, stack_status='REVIEW_IN_PROGRESS'
)]}
)
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(
stack_name, stack_status='REVIEW_IN_PROGRESS'
)]}
)
self.stubber.add_response("delete_stack", {})
with self.stubber:
self.provider.get_stack_changes(
stack=mock_stack, template=Template(
url="http://fake.template.url.com/"
), parameters=[], tags=[])
mock_output_full_cs.assert_called_with(full_changeset=changes,
params_diff=[],
fqn=stack_name,
answer='y')
def test_tail_stack_retry_on_missing_stack(self):
"""Test tail stack retry on missing stack."""
stack_name = "SlowToCreateStack"
stack = MagicMock(spec=Stack)
stack.fqn = "my-namespace-{}".format(stack_name)
default.TAIL_RETRY_SLEEP = .01
# Ensure the stack never appears before we run out of retries
for i in range(MAX_TAIL_RETRIES + 5):
self.stubber.add_client_error(
"describe_stack_events",
service_error_code="ValidationError",
service_message="Stack [{}] does not exist".format(stack_name),
http_status_code=400,
response_meta={"attempt": i + 1},
)
with self.stubber:
try:
self.provider.tail_stack(stack, threading.Event())
except ClientError as exc:
self.assertEqual(
exc.response["ResponseMetadata"]["attempt"],
MAX_TAIL_RETRIES
)
def test_tail_stack_retry_on_missing_stack_eventual_success(self):
"""Test tail stack retry on missing stack eventual success."""
stack_name = "SlowToCreateStack"
stack = MagicMock(spec=Stack)
stack.fqn = "my-namespace-{}".format(stack_name)
default.TAIL_RETRY_SLEEP = .01
default.GET_EVENTS_SLEEP = .01
received_events = []
def mock_log_func(event):
received_events.append(event)
def valid_event_response(stack, event_id):
return {
"StackEvents": [
{
"StackId": stack.fqn + "12345",
"EventId": event_id,
"StackName": stack.fqn,
"Timestamp": datetime.now()
},
]
}
# Ensure the stack never appears before we run out of retries
for i in range(3):
self.stubber.add_client_error(
"describe_stack_events",
service_error_code="ValidationError",
service_message="Stack [{}] does not exist".format(stack_name),
http_status_code=400,
response_meta={"attempt": i + 1},
)
self.stubber.add_response(
"describe_stack_events",
valid_event_response(stack, "InitialEvents")
)
self.stubber.add_response(
"describe_stack_events",
valid_event_response(stack, "Event1")
)
with self.stubber:
try:
self.provider.tail_stack(stack, threading.Event(),
log_func=mock_log_func)
except UnStubbedResponseError:
# Eventually we run out of responses - could not happen in
# regular execution
# normally this would just be dealt with when the threads were
# shutdown, but doing so here is a little difficult because
# we can't control the `tail_stack` loop
pass
self.assertEqual(received_events[0]["EventId"], "Event1")
class TestProviderInteractiveMode(unittest.TestCase):
"""Tests for runway.cfngin.providers.aws.default interactive mode."""
def setUp(self):
"""Run before tests."""
region = "us-east-1"
self.session = get_session(region=region)
self.provider = Provider(
self.session, interactive=True, recreate_failed=True)
self.stubber = Stubber(self.provider.cloudformation)
@patch('runway.cfngin.ui.get_raw_input')
def test_destroy_stack(self, patched_input):
"""Test destroy stack."""
stack = {'StackName': 'MockStack'}
patched_input.return_value = 'y'
self.stubber.add_response('delete_stack', {}, stack)
with self.stubber:
self.assertIsNone(self.provider.destroy_stack(stack))
self.stubber.assert_no_pending_responses()
@patch('runway.cfngin.ui.get_raw_input')
def test_destroy_stack_canceled(self, patched_input):
"""Test destroy stack canceled."""
stack = {'StackName': 'MockStack'}
patched_input.return_value = 'n'
with self.assertRaises(exceptions.CancelExecution):
self.provider.destroy_stack(stack)
def test_successful_init(self):
"""Test successful init."""
replacements = True
provider = Provider(self.session, interactive=True,
replacements_only=replacements)
self.assertEqual(provider.replacements_only, replacements)
@patch("runway.cfngin.providers.aws.default.ask_for_approval")
def test_update_stack_execute_success_no_stack_policy(self,
patched_approval):
"""Test update stack execute success no stack policy."""
stack_name = "my-fake-stack"
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("execute_change_set", {})
with self.stubber:
self.provider.update_stack(
fqn=stack_name,
template=Template(url="http://fake.template.url.com/"),
old_parameters=[],
parameters=[], tags=[]
)
patched_approval.assert_called_with(full_changeset=changes,
params_diff=[],
include_verbose=True,
fqn=stack_name)
self.assertEqual(patched_approval.call_count, 1)
@patch("runway.cfngin.providers.aws.default.ask_for_approval")
def test_update_stack_execute_success_with_stack_policy(self,
patched_approval):
"""Test update stack execute success with stack policy."""
stack_name = "my-fake-stack"
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': 'STACKID'}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("set_stack_policy", {})
self.stubber.add_response("execute_change_set", {})
with self.stubber:
self.provider.update_stack(
fqn=stack_name,
template=Template(url="http://fake.template.url.com/"),
old_parameters=[],
parameters=[], tags=[],
stack_policy=Template(body="{}"),
)
patched_approval.assert_called_with(full_changeset=changes,
params_diff=[],
include_verbose=True,
fqn=stack_name)
self.assertEqual(patched_approval.call_count, 1)
def test_select_destroy_method(self):
"""Test select destroy method."""
for i in [[{'force_interactive': False},
self.provider.interactive_destroy_stack],
[{'force_interactive': True},
self.provider.interactive_destroy_stack]]:
self.assertEqual(self.provider.select_destroy_method(**i[0]),
i[1])
def test_select_update_method(self):
"""Test select update method."""
for i in [[{'force_interactive': False,
'force_change_set': False},
self.provider.interactive_update_stack],
[{'force_interactive': True,
'force_change_set': False},
self.provider.interactive_update_stack],
[{'force_interactive': False,
'force_change_set': True},
self.provider.interactive_update_stack],
[{'force_interactive': True,
'force_change_set': True},
self.provider.interactive_update_stack]]:
self.assertEqual(
self.provider.select_update_method(**i[0]),
i[1]
)
@patch('runway.cfngin.providers.aws.default.output_full_changeset')
@patch('runway.cfngin.providers.aws.default.output_summary')
def test_get_stack_changes_interactive(self, mock_output_summary,
mock_output_full_cs):
"""Test get stack changes interactive."""
stack_name = "MockStack"
mock_stack = generate_stack_object(stack_name)
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(stack_name)]}
)
self.stubber.add_response(
'get_template',
generate_get_template('cfn_template.yaml')
)
self.stubber.add_response(
"create_change_set",
{'Id': 'CHANGESETID', 'StackId': stack_name}
)
changes = []
changes.append(generate_change())
self.stubber.add_response(
"describe_change_set",
generate_change_set_response(
status="CREATE_COMPLETE", execution_status="AVAILABLE",
changes=changes,
)
)
self.stubber.add_response("delete_change_set", {})
self.stubber.add_response(
'describe_stacks',
{'Stacks': [generate_describe_stacks_stack(stack_name)]}
)
with self.stubber:
self.provider.get_stack_changes(
stack=mock_stack, template=Template(
url="http://fake.template.url.com/"
), parameters=[], tags=[])
mock_output_summary.assert_called_with(stack_name, 'changes',
changes, [],
replacements_only=False)
mock_output_full_cs.assert_called_with(full_changeset=changes,
params_diff=[],
fqn=stack_name)
| 37.909605
| 79
| 0.573249
|
3085559a0626fb866660d4f39f77bd9ee1a99e6f
| 4,001
|
py
|
Python
|
semantic_release/ci_checks.py
|
TheTwitchy/python-semantic-release
|
6048843ea1642c67fcfbbd6fd402f0f3ebdabdb2
|
[
"MIT"
] | null | null | null |
semantic_release/ci_checks.py
|
TheTwitchy/python-semantic-release
|
6048843ea1642c67fcfbbd6fd402f0f3ebdabdb2
|
[
"MIT"
] | null | null | null |
semantic_release/ci_checks.py
|
TheTwitchy/python-semantic-release
|
6048843ea1642c67fcfbbd6fd402f0f3ebdabdb2
|
[
"MIT"
] | null | null | null |
"""CI Checks
"""
import os
from typing import Callable
from semantic_release.errors import CiVerificationError
def checker(func: Callable) -> Callable:
"""
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
"""
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except AssertionError:
raise CiVerificationError(
"The verification check for the environment did not pass."
)
return func_wrapper
@checker
def travis(branch: str):
"""
Performs necessary checks to ensure that the travis build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get("TRAVIS_BRANCH") == branch
assert os.environ.get("TRAVIS_PULL_REQUEST") == "false"
@checker
def semaphore(branch: str):
"""
Performs necessary checks to ensure that the semaphore build is successful,
on the correct branch and not a pull-request.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get("BRANCH_NAME") == branch
assert os.environ.get("PULL_REQUEST_NUMBER") is None
assert os.environ.get("SEMAPHORE_THREAD_RESULT") != "failed"
@checker
def frigg(branch: str):
"""
Performs necessary checks to ensure that the frigg build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get("FRIGG_BUILD_BRANCH") == branch
assert not os.environ.get("FRIGG_PULL_REQUEST")
@checker
def circle(branch: str):
"""
Performs necessary checks to ensure that the circle build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get("CIRCLE_BRANCH") == branch
assert not os.environ.get("CI_PULL_REQUEST")
@checker
def gitlab(branch: str):
"""
Performs necessary checks to ensure that the gitlab build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
# assert os.environ.get("CI_COMMIT_REF_NAME") == branch
# TODO - don't think there's a merge request indicator variable
@checker
def bitbucket(branch: str):
"""
Performs necessary checks to ensure that the bitbucket build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get("BITBUCKET_BRANCH") == branch
assert not os.environ.get("BITBUCKET_PR_ID")
@checker
def jenkins(branch: str):
"""
Performs necessary checks to ensure that the jenkins build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
branch_name = os.environ.get("BRANCH_NAME") or os.environ.get("GIT_BRANCH")
assert os.environ.get("JENKINS_URL") is not None
assert branch_name == branch
assert not os.environ.get("CHANGE_ID") # pull request id
def check(branch: str = "master"):
"""
Detects the current CI environment, if any, and performs necessary
environment checks.
:param branch: The branch that should be the current branch.
"""
if os.environ.get("TRAVIS") == "true":
travis(branch)
elif os.environ.get("SEMAPHORE") == "true":
semaphore(branch)
elif os.environ.get("FRIGG") == "true":
frigg(branch)
elif os.environ.get("CIRCLECI") == "true":
circle(branch)
elif os.environ.get("GITLAB_CI") == "true":
gitlab(branch)
elif os.environ.get("JENKINS_URL") is not None:
jenkins(branch)
elif "BITBUCKET_BUILD_NUMBER" in os.environ:
bitbucket(branch)
| 28.784173
| 88
| 0.683329
|
e29861b76e765132df1542aa3b1385e9bf77ca6f
| 6,800
|
py
|
Python
|
python/hmaps_postgres2es.py
|
jvanulde/opendrr-api
|
ab45369363d33f361ec7a2bc0ddbea7f293545aa
|
[
"MIT"
] | null | null | null |
python/hmaps_postgres2es.py
|
jvanulde/opendrr-api
|
ab45369363d33f361ec7a2bc0ddbea7f293545aa
|
[
"MIT"
] | 6
|
2021-06-03T16:30:22.000Z
|
2022-03-27T15:31:17.000Z
|
python/hmaps_postgres2es.py
|
jvanulde/opendrr-api
|
ab45369363d33f361ec7a2bc0ddbea7f293545aa
|
[
"MIT"
] | null | null | null |
# =================================================================
#
# Authors: Drew Rotheram <drew.rotheram@gmail.com>
#
# =================================================================
import json
import os
import sys
import psycopg2
import configparser
import logging
import argparse
import decimal
from elasticsearch import Elasticsearch
from elasticsearch import helpers
'''
Script to convert hmap indicator views to ElasticSearch Index
Can be run from the command line with mandatory arguments
Run this script with a command like:
python3 hmap_postgres2es.py --province=${PT}
'''
# Main Function
def main():
logFileName = '{}.log'.format(os.path.splitext(sys.argv[0])[0])
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[logging.FileHandler(logFileName),
logging.StreamHandler()])
auth = get_config_params('config.ini')
args = parse_args()
view = "psra_{province}_hmaps".format(**{
'province': args.province.lower()})
limit = 10000
offset = 0
# create index
es = Elasticsearch([auth.get('es', 'es_endpoint')],
http_auth=(auth.get('es', 'es_un'),
auth.get('es', 'es_pw')))
if es.indices.exists(view):
es.indices.delete(view)
# if args.idField == 'sauid':
# id_field = 'Sauid'
# settings = {
# 'settings': {
# 'number_of_shards': 1,
# 'number_of_replicas': 0
# },
# 'mappings': {
# 'properties': {
# 'geometry': {
# 'type': 'geo_shape'
# }
# }
# }
# }
# elif args.idField == 'building':
# id_field = 'AssetID'
settings = {
'settings': {
'number_of_shards': 1,
'number_of_replicas': 0
},
'mappings': {
'properties': {
'coordinates': {
'type': 'geo_point'
},
'geometry': {
'type': 'geo_shape'
}
}
}
}
es.indices.create(index=view, body=settings, request_timeout=90)
while True:
# if args.idField == 'sauid':
# id_field = 'Sauid'
# sqlquerystring = 'SELECT *, ST_AsGeoJSON(geom_poly) \
# FROM results_hmap_{eqScenario}.{view} \
# ORDER BY {view}."Sauid" \
# LIMIT {limit} \
# OFFSET {offset}'.format(**{'eqScenario': args.eqScenario,
# 'view': view,
# 'limit': limit,
# 'offset': offset})
# elif args.idField == 'building':
# id_field = 'AssetID'
sqlquerystring = 'SELECT *, ST_AsGeoJSON(geom) \
FROM results_psra_{province}.{view} \
ORDER BY {view}."geom" \
LIMIT {limit} \
OFFSET {offset}'.format(**{'province': args.province.lower(),
'view': view,
'limit': limit,
'offset': offset})
offset += limit
connection = None
try:
# Connect to the PostGIS database
connection = psycopg2.connect(user=auth.get('rds',
'postgres_un'),
password=auth.get('rds',
'postgres_pw'),
host=auth.get('rds',
'postgres_host'),
port=auth.get('rds',
'postgres_port'),
database=auth.get('rds',
'postgres_db'))
# Query the entire view with the geometries in geojson format
cur = connection.cursor()
cur.execute(sqlquerystring)
rows = cur.fetchall()
if rows:
columns = [name[0] for name in cur.description]
geomIndex = columns.index('st_asgeojson')
feature_collection = {'type': 'FeatureCollection',
'features': []}
# Format table into a geojson format for ES/Kibana consumption
for row in rows:
coordinates = json.loads(row[geomIndex])['coordinates']
feature = {
'type': 'Feature',
'geometry': json.loads(row[geomIndex]),
'coordinates': coordinates,
'properties': {},
}
for index, column in enumerate(columns):
if column != "st_asgeojson":
value = row[index]
feature['properties'][column] = value
feature_collection['features'].append(feature)
geojsonobject = json.dumps(feature_collection,
indent=2,
default=decimal_default)
d = json.loads(geojsonobject)
helpers.bulk(es,
gendata(d, view),
raise_on_error=False)
else:
if(connection):
# cursor.close()
connection.close()
return
except (Exception, psycopg2.Error) as error:
logging.error(error)
def gendata(data, view):
for item in data['features']:
yield {
"_index": view,
"_source": item
}
# Function to handle decimal encoder error
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
def get_config_params(args):
"""
Parse Input/Output columns from supplied *.ini file
"""
configParseObj = configparser.ConfigParser()
configParseObj.read(args)
return configParseObj
def parse_args():
parser = argparse.ArgumentParser(description="script description")
parser.add_argument("--province",
type=str,
help="Two letters only",
required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 34.517766
| 78
| 0.446765
|
78c789bc439b97e169369d5c3ef8e8a44f77c192
| 1,798
|
py
|
Python
|
keybase_piazza_bot/keybase.py
|
andrew-fuchs/keybase-piazza-bot
|
7559120896c602ea398b7bb43640e8c273c03fe5
|
[
"MIT"
] | null | null | null |
keybase_piazza_bot/keybase.py
|
andrew-fuchs/keybase-piazza-bot
|
7559120896c602ea398b7bb43640e8c273c03fe5
|
[
"MIT"
] | null | null | null |
keybase_piazza_bot/keybase.py
|
andrew-fuchs/keybase-piazza-bot
|
7559120896c602ea398b7bb43640e8c273c03fe5
|
[
"MIT"
] | null | null | null |
from pykeybasebot import Bot, KbEvent
from pykeybasebot.types import chat1
async def chat_reply(bot: Bot, channel: chat1.ChatChannel,
message_id: chat1.MessageID, message: str
) -> chat1.SendRes:
await bot.ensure_initialized()
# TODO do we need to handle `chat1.ConvIDStr` in `channel`?
result = await bot.chat.execute({
"method": "send",
"params": {
"options": {
"channel": {
"name": channel.name
},
"message": {
"body": message,
},
"reply_to": message_id,
},
},
})
return chat1.SendRes.from_dict(result)
async def create_message(bot: Bot, channel: chat1.ChatChannel):
send_result = await bot.chat.send(channel, '[Piazza Post Notification Text Goes Here]')
await bot.chat.react(channel, send_result.message_id, ':speech_baloon:')
def kb_quote(in_str: str):
return in_str.replace('\n', '\n>')
def escape_chat_chars(escape_str: str):
# replace formatting chars with escaped versions
# # TODO I doubt this is comprehensive
# # maybe escape anything not in [a-zA-Z0-9]?
# escape_str = escape_str.replace('*', '\\*')
# escape_str = escape_str.replace('_', '\\_')
# escape_str = escape_str.replace('`', '\\`')
# escape_str = escape_str.replace('>', '\\>')
# escape_str = escape_str.replace('@', '\\@')
# escape_str = escape_str.replace('#', '\\#')
# return escape_str
# escape anything that is not a letter ([a-zA-Z]), number ([0-9]), or whitespace
out_str = ''
for ch in escape_str:
if ch.isalnum() or ch.isspace():
out_str += ch
else:
out_str += '\\' + ch
return out_str
| 32.107143
| 91
| 0.573415
|
eedb27a6bbc1c2a1fbad4890111e7f45aa23c2bb
| 9,650
|
py
|
Python
|
brian2/tests/features/speed.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | 1
|
2021-06-10T15:28:51.000Z
|
2021-06-10T15:28:51.000Z
|
brian2/tests/features/speed.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/tests/features/speed.py
|
awillats/brian2
|
e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Check the speed of different Brian 2 configurations
'''
from brian2 import *
from brian2.tests.features import SpeedTest
__all__ = ['LinearNeuronsOnly',
'HHNeuronsOnly',
'CUBAFixedConnectivity',
'COBAHHFixedConnectivity',
'VerySparseMediumRateSynapsesOnly',
'SparseMediumRateSynapsesOnly',
'DenseMediumRateSynapsesOnly',
'SparseLowRateSynapsesOnly',
'SparseHighRateSynapsesOnly',
'STDP'
]
class LinearNeuronsOnly(SpeedTest):
category = "Neurons only"
name = "Linear 1D"
tags = ["Neurons"]
n_range = [10, 100, 1000, 10000, 100000, 1000000]
n_label = 'Num neurons'
# configuration options
duration = 10 * second
def run(self):
self.tau = tau = 1 * second
self.v_init = linspace(0.1, 1, self.n)
G = self.G = NeuronGroup(self.n, 'dv/dt=-v/tau:1')
self.G.v = self.v_init
self.timed_run(self.duration)
class HHNeuronsOnly(SpeedTest):
category = "Neurons only"
name = "Hodgkin-Huxley"
tags = ["Neurons"]
n_range = [10, 100, 1000, 10000, 100000]
n_label = 'Num neurons'
# configuration options
duration = 1 * second
def run(self):
num_neurons = self.n
# Parameters
area = 20000 * umetre**2
Cm = 1 * ufarad * cm**-2 * area
gl = 5e-5 * siemens * cm**-2 * area
El = -65 * mV
EK = -90 * mV
ENa = 50 * mV
g_na = 100 * msiemens * cm**-2 * area
g_kd = 30 * msiemens * cm**-2 * area
VT = -63 * mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v) - g_na*(m*m*m)*h*(v-ENa) - g_kd*(n*n*n*n)*(v-EK) + I)/Cm : volt
dm/dt = 0.32*(mV**-1)*(13.*mV-v+VT)/
(exp((13.*mV-v+VT)/(4.*mV))-1.)/ms*(1-m)-0.28*(mV**-1)*(v-VT-40.*mV)/
(exp((v-VT-40.*mV)/(5.*mV))-1.)/ms*m : 1
dn/dt = 0.032*(mV**-1)*(15.*mV-v+VT)/
(exp((15.*mV-v+VT)/(5.*mV))-1.)/ms*(1.-n)-.5*exp((10.*mV-v+VT)/(40.*mV))/ms*n : 1
dh/dt = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms*(1.-h)-4./(1+exp((40.*mV-v+VT)/(5.*mV)))/ms*h : 1
I : amp
''')
# Threshold and refractoriness are only used for spike counting
group = NeuronGroup(num_neurons, eqs,
threshold='v > -40*mV',
refractory='v > -40*mV')
group.v = El
group.I = '0.7*nA * i / num_neurons'
self.timed_run(self.duration)
class CUBAFixedConnectivity(SpeedTest):
category = "Full examples"
name = "CUBA fixed connectivity"
tags = ["Neurons", "Synapses", "SpikeMonitor"]
n_range = [10, 100, 1000, 10000, 100000]
n_label = 'Num neurons'
# configuration options
duration = 1 * second
def run(self):
N = self.n
Ne = int(.8 * N)
taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
eqs = '''
dv/dt = (ge+gi-(v-El))/taum : volt (unless refractory)
dge/dt = -ge/taue : volt (unless refractory)
dgi/dt = -gi/taui : volt (unless refractory)
'''
P = NeuronGroup(
N, eqs, threshold='v>Vt', reset='v = Vr', refractory=5 * ms)
P.v = 'Vr + rand() * (Vt - Vr)'
P.ge = 0 * mV
P.gi = 0 * mV
we = (60 * 0.27 / 10) * mV # excitatory synaptic weight (voltage)
wi = (-20 * 4.5 / 10) * mV # inhibitory synaptic weight
Ce = Synapses(P, P, on_pre='ge += we')
Ci = Synapses(P, P, on_pre='gi += wi')
Ce.connect('i<Ne', p=80. / N)
Ci.connect('i>=Ne', p=80. / N)
s_mon = SpikeMonitor(P)
self.timed_run(self.duration)
class COBAHHFixedConnectivity(SpeedTest):
category = "Full examples"
name = "COBAHH fixed connectivity"
tags = ["Neurons", "Synapses", "SpikeMonitor"]
n_range = [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000]
n_label = 'Num neurons'
# configuration options
duration = 1 * second
def run(self):
N = self.n
area = 20000 * umetre ** 2
Cm = (1 * ufarad * cm ** -2) * area
gl = (5e-5 * siemens * cm ** -2) * area
El = -60 * mV
EK = -90 * mV
ENa = 50 * mV
g_na = (100 * msiemens * cm ** -2) * area
g_kd = (30 * msiemens * cm ** -2) * area
VT = -63 * mV
# Time constants
taue = 5 * ms
taui = 10 * ms
# Reversal potentials
Ee = 0 * mV
Ei = -80 * mV
we = 6 * nS # excitatory synaptic weight
wi = 67 * nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
dge/dt = -ge*(1./taue) : siemens
dgi/dt = -gi*(1./taui) : siemens
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
P = NeuronGroup(N, model=eqs, threshold='v>-20*mV',
refractory=3 * ms,
method='exponential_euler')
P.v = 'El + (randn() * 5 - 5)*mV'
P.ge = '(randn() * 1.5 + 4) * 10.*nS'
P.gi = '(randn() * 12 + 20) * 10.*nS'
Pe = P[:int(0.8*N)]
Pi = P[int(0.8 * N):]
Ce = Synapses(Pe, P, on_pre='ge+=we')
Ci = Synapses(Pi, P, on_pre='gi+=wi')
Ce.connect(p=80.0/N)
Ci.connect(p=80.0/N)
s_mon = SpikeMonitor(P)
self.timed_run(self.duration)
class STDP(SpeedTest):
category = "Full examples"
name = "STDP with Poisson input"
tags = ["Neurons", "Synapses", "SpikeMonitor", "PoissonGroup"]
n_range = [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000]
n_label = 'Num neurons'
# configuration options
duration = 1 * second
def run(self):
N = self.n
taum = 10 * ms
taupre = 20 * ms
taupost = taupre
Ee = 0 * mV
vt = -54 * mV
vr = -60 * mV
El = -74 * mV
taue = 5 * ms
F = 15 * Hz
gmax = .01
dApre = .01
dApost = -dApre * taupre / taupost * 1.05
dApost *= gmax
dApre *= gmax
eqs_neurons = '''
dv/dt = (ge * (Ee-vr) + El - v) / taum : volt
dge/dt = -ge / taue : 1
'''
poisson_input = PoissonGroup(N, rates=F)
neurons = NeuronGroup(1, eqs_neurons, threshold='v>vt', reset='v = vr',
method='exact')
S = Synapses(poisson_input, neurons,
'''w : 1
dApre/dt = -Apre / taupre : 1 (event-driven)
dApost/dt = -Apost / taupost : 1 (event-driven)''',
on_pre='''ge += w
Apre += dApre
w = clip(w + Apost, 0, gmax)''',
on_post='''Apost += dApost
w = clip(w + Apre, 0, gmax)''',
)
S.connect()
S.w = 'rand() * gmax'
s_mon = SpikeMonitor(poisson_input)
self.timed_run(self.duration)
class SynapsesOnly(object):
category = "Synapses only"
tags = ["Synapses"]
n_range = [10, 100, 1000, 10000]
n_label = 'Num neurons'
duration = 1 * second
# memory usage will be approximately p**2*rate*dt*N**2*bytes_per_synapse/1024**3 GB
# for CPU, bytes_per_synapse appears to be around 40?
def run(self):
N = self.n
rate = self.rate
M = int(rate * N * defaultclock.dt)
if M <= 0:
M = 1
G = NeuronGroup(M, 'v:1', threshold='True')
H = NeuronGroup(N, 'w:1')
S = Synapses(G, H, on_pre='w += 1.0')
S.connect(True, p=self.p)
#M = SpikeMonitor(G)
self.timed_run(self.duration,
# report='text',
)
#plot(M.t/ms, M.i, ',k')
class VerySparseMediumRateSynapsesOnly(SynapsesOnly, SpeedTest):
name = "Very sparse, medium rate (10s duration)"
rate = 10 * Hz
p = 0.02
n_range = [10, 100, 1000, 10000, 100000]
duration = 10 * second
class SparseMediumRateSynapsesOnly(SynapsesOnly, SpeedTest):
name = "Sparse, medium rate (1s duration)"
rate = 10 * Hz
p = 0.2
n_range = [10, 100, 1000, 10000, 100000]
class DenseMediumRateSynapsesOnly(SynapsesOnly, SpeedTest):
name = "Dense, medium rate (1s duration)"
rate = 10 * Hz
p = 1.0
n_range = [10, 100, 1000, 10000, 40000]
class SparseLowRateSynapsesOnly(SynapsesOnly, SpeedTest):
name = "Sparse, low rate (10s duration)"
rate = 1 * Hz
p = 0.2
n_range = [10, 100, 1000, 10000, 100000]
duration = 10 * second
class SparseHighRateSynapsesOnly(SynapsesOnly, SpeedTest):
name = "Sparse, high rate (1s duration)"
rate = 100 * Hz
p = 0.2
n_range = [10, 100, 1000, 10000]
if __name__ == '__main__':
#prefs.codegen.target = 'numpy'
VerySparseMediumRateSynapsesOnly(100000).run()
show()
| 29.968944
| 102
| 0.503731
|
389de58652834e7d67dd37cfade26eb8869cde8c
| 15,954
|
py
|
Python
|
Colorizing-with-GANs-master/src/models.py
|
adityajain28/Colorizing-with-GANs
|
7657da967ee29e53a8344b554b3d817bea215e69
|
[
"Apache-2.0"
] | null | null | null |
Colorizing-with-GANs-master/src/models.py
|
adityajain28/Colorizing-with-GANs
|
7657da967ee29e53a8344b554b3d817bea215e69
|
[
"Apache-2.0"
] | null | null | null |
Colorizing-with-GANs-master/src/models.py
|
adityajain28/Colorizing-with-GANs
|
7657da967ee29e53a8344b554b3d817bea215e69
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from abc import abstractmethod
from .networks import Generator, Discriminator
from .dataset import Places365Dataset, Cifar10Dataset
from .ops import pixelwise_accuracy, preprocess, postprocess
from .ops import COLORSPACE_RGB, COLORSPACE_LAB
from .utils import stitch_images, turingtest, imshow, visualize
import matplotlib.pyplot as plt
class BaseModel:
def __init__(self, sess, options):
self.sess = sess
self.options = options
self.name = options.name
self.samples_dir = os.path.join(options.checkpoints_path, 'samples')
self.test_log_file = os.path.join(options.checkpoints_path, 'log_test.dat')
self.train_log_file = os.path.join(options.checkpoints_path, 'log_train.dat')
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.dataset_train = self.create_dataset(True)
self.dataset_test = self.create_dataset(False)
self.sample_generator = self.dataset_test.generator(options.sample_size, True)
self.iteration = 0
self.epoch = 0
self.is_built = False
def train(self):
self.build()
total = len(self.dataset_train)
for epoch in range(self.options.epochs):
lr_rate = self.sess.run(self.learning_rate)
print('Training epoch: %d' % (epoch + 1) + " - learning rate: " + str(lr_rate))
self.epoch = epoch + 1
self.iteration = 0
generator = self.dataset_train.generator(self.options.batch_size)
progbar = keras.utils.Progbar(total, stateful_metrics=['epoch', 'iteration', 'step'])
for input_rgb in generator:
feed_dic = {self.input_rgb: input_rgb}
self.iteration = self.iteration + 1
self.sess.run([self.dis_train], feed_dict=feed_dic)
self.sess.run([self.gen_train, self.accuracy], feed_dict=feed_dic)
self.sess.run([self.gen_train, self.accuracy], feed_dict=feed_dic)
lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step = self.eval_outputs(feed_dic=feed_dic)
progbar.add(len(input_rgb), values=[
("epoch", epoch + 1),
("iteration", self.iteration),
("step", step),
("D loss", lossD),
("D fake", lossD_fake),
("D real", lossD_real),
("G loss", lossG),
("G L1", lossG_l1),
("G gan", lossG_gan),
("accuracy", acc)
])
# # log model at checkpoints
# if self.options.log and step % self.options.log_interval == 0:
# with open(self.train_log_file, 'a') as f:
# f.write('%d %d %f %f %f %f %f %f %f\n' % (self.epoch, step, lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc))
# if self.options.visualize:
# visualize(self.train_log_file, self.test_log_file, self.options.visualize_window, self.name)
# # sample model at checkpoints
# if self.options.sample and step % self.options.sample_interval == 0:
# self.sample(show=False)
# # evaluate model at checkpoints
# if self.options.validate and self.options.validate_interval > 0 and step % self.options.validate_interval == 0:
# self.evaluate()
# # save model at checkpoints
# if self.options.save and step % self.options.save_interval == 0:
# self.save()
if self.options.save and step % self.options.save_interval == 0:
self.save()
if self.options.sample and step % self.options.sample_interval == 0:
self.sample(show=False)
if self.options.log and step % self.options.log_interval == 0:
print("Logging")
with open(self.train_log_file, 'a') as f:
f.write('%d %d %f %f %f %f %f %f %f\n' % (self.epoch, step, lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc))
if self.options.visualize:
visualize(self.train_log_file, self.test_log_file, self.options.visualize_window, self.name)
#if self.options.validate:
# self.evaluate()
def evaluate(self):
print('\n\nEvaluating epoch: %d' % self.epoch)
test_total = len(self.dataset_test)
test_generator = self.dataset_test.generator(self.options.batch_size)
progbar = keras.utils.Progbar(test_total)
result = []
for input_rgb in test_generator:
feed_dic = {self.input_rgb: input_rgb}
self.sess.run([self.dis_loss, self.gen_loss, self.accuracy], feed_dict=feed_dic)
# returns (D loss, D_fake loss, D_real loss, G loss, G_L1 loss, G_gan loss, accuracy, step)
result.append(self.eval_outputs(feed_dic=feed_dic))
progbar.add(len(input_rgb))
result = np.mean(np.array(result), axis=0)
print('Results: D loss: %f - D fake: %f - D real: %f - G loss: %f - G L1: %f - G gan: %f - accuracy: %f'
% (result[0], result[1], result[2], result[3], result[4], result[5], result[6]))
if self.options.log:
with open(self.test_log_file, 'a') as f:
# (epoch, step, lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc)
f.write('%d %d %f %f %f %f %f %f %f\n' % (self.epoch, result[7], result[0], result[1], result[2], result[3], result[4], result[5], result[6]))
print('\n')
def sample(self, show=True):
self.build()
input_rgb = next(self.sample_generator)
feed_dic = {self.input_rgb: input_rgb}
step, rate = self.sess.run([self.global_step, self.learning_rate])
fake_image, input_gray = self.sess.run([self.sampler, self.input_gray], feed_dict=feed_dic)
fake_image = postprocess(tf.convert_to_tensor(fake_image), colorspace_in=self.options.color_space, colorspace_out=COLORSPACE_RGB)
img = stitch_images(input_gray, input_rgb, fake_image.eval())
if not os.path.exists(self.samples_dir):
os.makedirs(self.samples_dir)
sample = self.options.dataset + "_" + str(step).zfill(5) + ".png"
if show:
imshow(np.array(img), self.name)
else:
print('\nsaving sample ' + sample + ' - learning rate: ' + str(rate))
img.save(os.path.join(self.samples_dir, sample))
def turing_test(self):
batch_size = self.options.batch_size
gen = self.dataset_test.generator(batch_size, True)
count = 0
score = 0
fig = plt.figure()
while count < self.options.test_size:
input_rgb = next(gen)
feed_dic = {self.input_rgb: input_rgb}
fake_image = self.sess.run(self.sampler, feed_dict=feed_dic)
fake_image = postprocess(tf.convert_to_tensor(fake_image), colorspace_in=self.options.color_space, colorspace_out=COLORSPACE_RGB)
for i in range(np.min([batch_size, self.options.test_size - count])):
res = turingtest( self.options.dataset_path, count, input_rgb[i], fake_image.eval()[i], self.options.test_delay)
count += 1
score += res
print('success: %d - fail: %d - rate: %f' % (score, count - score, (count - score) / count))
def build(self):
if self.is_built:
return
self.is_built = True
gen_factory = self.create_generator()
dis_factory = self.create_discriminator()
smoothing = 0.9 if self.options.label_smoothing else 1
seed = seed = self.options.seed
kernel = self.options.kernel_size
self.input_rgb = tf.placeholder(tf.float32, shape=(None, None, None, 3), name='input_rgb')
self.input_gray = tf.image.rgb_to_grayscale(self.input_rgb)
self.input_color = preprocess(self.input_rgb, colorspace_in=COLORSPACE_RGB, colorspace_out=self.options.color_space)
gen = gen_factory.create(self.input_gray, kernel, seed)
dis_real = dis_factory.create(tf.concat([self.input_gray, self.input_color], 3), kernel, seed)
dis_fake = dis_factory.create(tf.concat([self.input_gray, gen], 3), kernel, seed, reuse_variables=True)
gen_ce = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_fake, labels=tf.ones_like(dis_fake))
dis_real_ce = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_real, labels=tf.ones_like(dis_real) * smoothing)
dis_fake_ce = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_fake, labels=tf.zeros_like(dis_fake))
self.dis_loss_real = tf.reduce_mean(dis_real_ce)
self.dis_loss_fake = tf.reduce_mean(dis_fake_ce)
self.dis_loss = tf.reduce_mean(dis_real_ce + dis_fake_ce)
self.gen_loss_gan = tf.reduce_mean(gen_ce)
self.gen_loss_l1 = tf.reduce_mean(tf.abs(self.input_color - gen)) * self.options.l1_weight
self.gen_loss = self.gen_loss_gan + self.gen_loss_l1
self.sampler = gen_factory.create(self.input_gray, kernel, seed, reuse_variables=True)
self.accuracy = pixelwise_accuracy(self.input_color, gen, self.options.color_space, self.options.acc_thresh)
self.learning_rate = tf.constant(self.options.lr)
# learning rate decay
if self.options.lr_decay_rate > 0:
self.learning_rate = tf.maximum(1e-8, tf.train.exponential_decay(
learning_rate=self.options.lr,
global_step=self.global_step,
decay_steps=self.options.lr_decay_steps,
decay_rate=self.options.lr_decay_rate))
# generator optimizaer
self.gen_train = tf.train.AdamOptimizer(
learning_rate=self.learning_rate,
beta1=self.options.beta1
).minimize(self.gen_loss, var_list=gen_factory.var_list)
# discriminator optimizaer
self.dis_train = tf.train.AdamOptimizer(
learning_rate=self.learning_rate,
beta1=self.options.beta1
).minimize(self.dis_loss, var_list=dis_factory.var_list, global_step=self.global_step)
self.saver = tf.train.Saver()
def load(self):
ckpt = tf.train.get_checkpoint_state(self.options.checkpoints_path)
if ckpt is not None:
print('loading model...\n')
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.options.checkpoints_path, ckpt_name))
return True
return False
def save(self):
print('saving model...\n')
self.saver.save(self.sess, os.path.join(self.options.checkpoints_path, 'CGAN_' + self.options.dataset), write_meta_graph=False)
def eval_outputs(self, feed_dic):
'''
evaluates the loss and accuracy
returns (D loss, D_fake loss, D_real loss, G loss, G_L1 loss, G_gan loss, accuracy, step)
'''
lossD_fake = self.dis_loss_fake.eval(feed_dict=feed_dic)
lossD_real = self.dis_loss_real.eval(feed_dict=feed_dic)
lossD = self.dis_loss.eval(feed_dict=feed_dic)
lossG_l1 = self.gen_loss_l1.eval(feed_dict=feed_dic)
lossG_gan = self.gen_loss_gan.eval(feed_dict=feed_dic)
lossG = lossG_l1 + lossG_gan
acc = self.accuracy.eval(feed_dict=feed_dic)
step = self.sess.run(self.global_step)
return lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step
@abstractmethod
def create_generator(self):
raise NotImplementedError
@abstractmethod
def create_discriminator(self):
raise NotImplementedError
@abstractmethod
def create_dataset(self, training):
raise NotImplementedError
class Cifar10Model(BaseModel):
def __init__(self, sess, options):
super(Cifar10Model, self).__init__(sess, options)
def create_generator(self):
kernels_gen_encoder = [
(64, 1, 0), # [batch, 32, 32, ch] => [batch, 32, 32, 64]
(128, 2, 0), # [batch, 32, 32, 64] => [batch, 16, 16, 128]
(256, 2, 0), # [batch, 16, 16, 128] => [batch, 8, 8, 256]
(512, 2, 0), # [batch, 8, 8, 256] => [batch, 4, 4, 512]
(512, 2, 0), # [batch, 4, 4, 512] => [batch, 2, 2, 512]
]
kernels_gen_decoder = [
(512, 2, 0.5), # [batch, 2, 2, 512] => [batch, 4, 4, 512]
(256, 2, 0.5), # [batch, 4, 4, 512] => [batch, 8, 8, 256]
(128, 2, 0), # [batch, 8, 8, 256] => [batch, 16, 16, 128]
(64, 2, 0), # [batch, 16, 16, 128] => [batch, 32, 32, 64]
]
return Generator('gen', kernels_gen_encoder, kernels_gen_decoder)
def create_discriminator(self):
kernels_dis = [
(64, 2, 0), # [batch, 32, 32, ch] => [batch, 16, 16, 64]
(128, 2, 0), # [batch, 16, 16, 64] => [batch, 8, 8, 128]
(256, 2, 0), # [batch, 8, 8, 128] => [batch, 4, 4, 256]
(512, 1, 0), # [batch, 4, 4, 256] => [batch, 4, 4, 512]
]
return Discriminator('dis', kernels_dis)
def create_dataset(self, training=True):
return Cifar10Dataset(
path=self.options.dataset_path,
training=training,
augment=self.options.augment)
class Places365Model(BaseModel):
def __init__(self, sess, options):
super(Places365Model, self).__init__(sess, options)
def create_generator(self):
kernels_gen_encoder = [
(64, 1, 0), # [batch, 256, 256, ch] => [batch, 256, 256, 64]
(64, 2, 0), # [batch, 256, 256, 64] => [batch, 128, 128, 64]
(128, 2, 0), # [batch, 128, 128, 64] => [batch, 64, 64, 128]
(256, 2, 0), # [batch, 64, 64, 128] => [batch, 32, 32, 256]
(512, 2, 0), # [batch, 32, 32, 256] => [batch, 16, 16, 512]
(512, 2, 0), # [batch, 16, 16, 512] => [batch, 8, 8, 512]
(512, 2, 0), # [batch, 8, 8, 512] => [batch, 4, 4, 512]
(512, 2, 0) # [batch, 4, 4, 512] => [batch, 2, 2, 512]
]
kernels_gen_decoder = [
(512, 2, 0.5), # [batch, 2, 2, 512] => [batch, 4, 4, 512]
(512, 2, 0.5), # [batch, 4, 4, 512] => [batch, 8, 8, 512]
(512, 2, 0.5), # [batch, 8, 8, 512] => [batch, 16, 16, 512]
(256, 2, 0), # [batch, 16, 16, 512] => [batch, 32, 32, 256]
(128, 2, 0), # [batch, 32, 32, 256] => [batch, 64, 64, 128]
(64, 2, 0), # [batch, 64, 64, 128] => [batch, 128, 128, 64]
(64, 2, 0) # [batch, 128, 128, 64] => [batch, 256, 256, 64]
]
return Generator('gen', kernels_gen_encoder, kernels_gen_decoder)
def create_discriminator(self):
kernels_dis = [
(64, 2, 0), # [batch, 256, 256, ch] => [batch, 128, 128, 64]
(128, 2, 0), # [batch, 128, 128, 64] => [batch, 64, 64, 128]
(256, 2, 0), # [batch, 64, 64, 128] => [batch, 32, 32, 256]
(512, 2, 0), # [batch, 32, 32, 256] => [batch, 16, 16, 512]
(512, 2, 0), # [batch, 16, 16, 512] => [batch, 8, 8, 512]
(512, 2, 0), # [batch, 8, 8, 512] => [batch, 4, 4, 512]
(512, 1, 0), # [batch, 4, 4, 512] => [batch, 4, 4, 512]
]
return Discriminator('dis', kernels_dis)
def create_dataset(self, training=True):
return Places365Dataset(
path=self.options.dataset_path,
training=training,
augment=self.options.augment)
| 43.950413
| 158
| 0.586499
|
8d18e54ec36fbc8cbfabd72d0aa2c4531df69717
| 5,227
|
py
|
Python
|
src/data_setup.py
|
sukumargv/hurricane
|
98e792f94f6924e0b3fcfb8aef32a6638a551cd2
|
[
"MIT"
] | null | null | null |
src/data_setup.py
|
sukumargv/hurricane
|
98e792f94f6924e0b3fcfb8aef32a6638a551cd2
|
[
"MIT"
] | 2
|
2016-09-26T16:45:53.000Z
|
2016-09-26T17:11:35.000Z
|
src/data_setup.py
|
sukumargv/hurricane
|
98e792f94f6924e0b3fcfb8aef32a6638a551cd2
|
[
"MIT"
] | null | null | null |
import numpy as np
# For loading the data into Dataframes
import pandas as pd
# For string literal
import ast
# import packages related to gdal
from osgeo import gdal
import pyproj
# For time Zone Conversions
import pytz
# for plots
import matplotlib.pyplot as plt
with open('../data_collection/track_data/r2_Queen_of_Nanaimo.json') as f:
for line in f:
data = line
# Imported data now is in string format
# Convert it into list of lists which we know the data structure of imported file.
data = ast.literal_eval(data)
# Covnert into numpy array
np.array(data)
# Assigning datatypes
mt_data = np.array([tuple(x) for x in data], dtype = [('lon', 'f8'), ('lat', 'f8'), ('speed', 'i4'), ('course', 'i4'), ('heading', 'i4'), ('timestamp', 'M8[s]'), ('ut', 'i8'), ('station', 'i8'), ('gap','i4')])
mt_data = pd.DataFrame(mt_data)
mt_data = mt_data.sort_values(by='timestamp')
tz_est = pytz.timezone('UTC')
tz_pst = "US/Pacific"
# Convert timezone of the data to Pacific
mt_data['timestamp'] = mt_data['timestamp'].dt.tz_localize(tz_est).dt.tz_convert(tz_pst)
bc_data = pd.read_pickle('bc_data.pkl')
#lon lat to cord
def proj_transform(df):
bng = pyproj.Proj(init="epsg:4326")
wgs84 = pyproj.Proj(init="epsg:3857")
cart_x = pd.Series()
cart_y = pd.Series()
for idx, val in enumerate(df['lon']):
lon, lat = pyproj.transform(bng,wgs84,df['lon'][idx], df['lat'][idx])
cart_x.set_value(idx, lat)
cart_y.set_value(idx, lon)
df['cart_x'] = cart_y
df['cart_y'] = cart_x
return df
mt_data = proj_transform(mt_data)
mt_data['cart_x'] = mt_data['cart_x'].astype(float)
mt_data['cart_y'] = mt_data['cart_y'].astype(float)
# pixels to coordinates conversion funciton can only be applied upon
def pixels_to_coordinates(bc_row,route_tif,index, **kwargs):
if ('affine' in kwargs):
pass
else:
# load in the route image
ds = gdal.Open(route_tif)
# unravel GDAL affine transform parameters
c, a, b, f, d, e = ds.GetGeoTransform()
def pixel2coord(col, row):
# Returns global coordinates to pixel center using base-0 raster index
xp = a * col + b * row + a * (-5.5) + b * (-5.5) + c
yp = d * col + e * row + d * (-5.5) + e * (-5.5) + f
return xp, yp
cart_cord = pixel2coord(bc_row['cx'],bc_row['cy'])
# Converting coordinates from EPSG 3857 to 4326
inProj = pyproj.Proj(init='epsg:3857')
outProj = pyproj.Proj(init='epsg:4326')
coordinates = pyproj.transform(inProj, outProj, cart_cord[0], cart_cord[1])
local_dict = {'lat': coordinates[1], 'lon': coordinates[0], 'cart_cord_x': cart_cord[0], 'cart_cord_y': cart_cord[1] }
if index==0:
return local_dict['lat']
if index==1:
return local_dict['lon']
if index==2:
return local_dict['cart_cord_x']
if index==3:
return local_dict['cart_cord_y']
# Route file location
route_file = "../qgis_approach/route2.tif"
# Filter specific route based on timestamp
mt_data_min_time = mt_data['timestamp'].min()
mt_data_max_time = mt_data['timestamp'].max()
# Localize time_Zone of Bc_Data to Pacific time.
bc_data['Time'] = bc_data['Time'].dt.tz_localize(tz_pst)
# Need to modify it so that can be supplied from input
vessel_name = 'Queen of Nanaimo'
bc_route_data = bc_data[(bc_data['Time'] >= mt_data_min_time) & (bc_data['Time'] <= mt_data_max_time) & (bc_data['Vessel'] == vessel_name)]
bc_route_data['lon'] = bc_route_data.apply(lambda x: pixels_to_coordinates(x,route_file,0), axis = 1)
bc_route_data['lat'] = bc_route_data.apply(lambda x: pixels_to_coordinates(x,route_file,1), axis = 1)
bc_route_data['cart_x'] = bc_route_data.apply(lambda x: pixels_to_coordinates(x,route_file,2), axis = 1)
bc_route_data['cart_y'] = bc_route_data.apply(lambda x: pixels_to_coordinates(x,route_file,3), axis = 1)
bc_route_data['dup_key'] = bc_route_data['cx'] + bc_route_data['cy']
no_dups_bc_route_data = bc_route_data.drop_duplicates('dup_key')
# Plot of all the route data
# plt.plot(mt_data['lon'], mt_data['lat'],'-',linewidth=0.5, color = 'blue')
# plt.plot(bc_route_data['lat'], bc_route_data['lon'],'-',linewidth=0.5, color ='green')
#plt.plot(mt_data['cart_x'][:100], mt_data['cart_y'][:100],'-',linewidth=0.5, color = 'red')
#plt.plot(bc_route_data['cart_x'][:200], bc_route_data['cart_y'][:200],'-',linewidth=0.5, color ='black')
#plt.show()
'''
# determining control points
filter_1 = mt_data[['cart_x', 'cart_y','timestamp','speed']][(mt_data['speed']==0)]
filter_1_ = bc_route_data[['cart_x', 'cart_y','Time', 'Status']][(bc_route_data['Status'] == 'In Port')]
plt.scatter(filter_1['cart_x'], filter_1['cart_y'], color ='blue')
plt.scatter(filter_1_['cart_x'],filter_1_['cart_y'], color = 'green')
plt.show()
'''
'''
Algorithm to solve for affine parameters
def solve_affine(s1,s2,s3,t1,t2,t3):
Source_S = np.array([[Xs1, Ys1, 1, 0, 0, 0], [Xs2, Ys2, 1, 0, 0, 0], [Xs3, Ys3, 1, 0, 0, 0], [0, 0, 0, Xs1, Ys1, 1], [0, 0, 0, Xs2, Ys2, 1], [0, 0, 0, Xs3, Ys3, 1]])
Target_T = np.array([Xt1, Xt2, Xt2, Yt1, Yt2, Yt3])
affine = np.linalg.solve(Source_S,Target_T)
return affine
'''
| 34.388158
| 209
| 0.6608
|
e5b238f11c698b7ab8f6fc664daf095bf1d7164b
| 1,568
|
py
|
Python
|
app/latches.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | 2
|
2021-04-25T17:38:03.000Z
|
2022-03-20T20:48:50.000Z
|
app/latches.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | 1
|
2016-11-30T22:25:00.000Z
|
2017-01-16T22:43:39.000Z
|
app/latches.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of REST API for latch creation
"""
from flask import Blueprint, request
from .utils import typename_to_type
from .contexts import contexts
lr = Blueprint('latches', __name__)
@lr.route('', methods=['GET'])
def list_latches():
"""
Lists the available latches
"""
context = request.args.get('context')
if context is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
return {'latches': [key for key, _ in ctx.latches.items()]}, 200
@lr.route('/create', methods=['POST'])
def create_latch():
"""
Creates a new latch
"""
context = request.get_json()['context']
typ = request.get_json()['type']
if context is None or typ is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
name = '__l{}'.format(len(ctx.latches.items()))
ctx.mk_latch(name, typename_to_type(ctx, typ))
return {'result': name}, 201
@lr.route('/initnext', methods=['PUT'])
def set_latch_init_next():
"""
Sets the initial and next value of a latch
"""
context = request.get_json()['context']
latch = request.get_json()['latch']
init = request.get_json()['init']
nex = request.get_json()['next']
if context is None or latch is None or init is None or nex is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
latch_net = ctx.nets[latch]
init_net = ctx.nets[init]
next_net = ctx.nets[nex]
ctx.set_latch_init_next(latch_net, init_net, next_net)
return {'result': 'ok'}, 200
| 30.153846
| 71
| 0.639031
|
2da61cb367b97ce8b6d3bbbce3ec8c5bb46ca4d8
| 992
|
py
|
Python
|
isi_sdk_8_2_1/test/test_compatibilities_class_active.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/test/test_compatibilities_class_active.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/test/test_compatibilities_class_active.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.compatibilities_class_active import CompatibilitiesClassActive # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestCompatibilitiesClassActive(unittest.TestCase):
"""CompatibilitiesClassActive unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCompatibilitiesClassActive(self):
"""Test CompatibilitiesClassActive"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.compatibilities_class_active.CompatibilitiesClassActive() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.195122
| 110
| 0.731855
|
871e4e2eb0be1ca002d7ca224cc085f0e0502f70
| 2,040
|
py
|
Python
|
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddLiveStreamTranscodeRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddLiveStreamTranscodeRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddLiveStreamTranscodeRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddLiveStreamTranscodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddLiveStreamTranscode','live')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Template(self):
return self.get_query_params().get('Template')
def set_Template(self,Template):
self.add_query_param('Template',Template)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_App(self):
return self.get_query_params().get('App')
def set_App(self,App):
self.add_query_param('App',App)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
| 33.442623
| 82
| 0.758333
|
3cd4b3f07265683633cdb6bc1e8f6be4215c5aee
| 255
|
py
|
Python
|
docs/examples/compute/internetsolutions/instantiate_driver.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/compute/internetsolutions/instantiate_driver.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1
|
2021-12-06T12:29:13.000Z
|
2021-12-06T12:29:13.000Z
|
docs/examples/compute/internetsolutions/instantiate_driver.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1
|
2019-08-05T10:12:02.000Z
|
2019-08-05T10:12:02.000Z
|
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.INTERNETSOLUTIONS)
driver = cls("my username", "my password", region="is-af")
pprint(driver.list_nodes())
| 25.5
| 58
| 0.796078
|
dd590419697592b5ea0aa11d7ec66dc35becbb63
| 105
|
py
|
Python
|
PyWC3/__init__.py
|
Blimba/PyWC3
|
16d519bbb98e7593b8d14d14d9b81b6d6932ef0c
|
[
"MIT"
] | 14
|
2020-02-16T14:25:02.000Z
|
2021-12-07T13:57:34.000Z
|
PyWC3/__init__.py
|
sylvainSUPINTERNET/PyWC3
|
16d519bbb98e7593b8d14d14d9b81b6d6932ef0c
|
[
"MIT"
] | 3
|
2020-04-20T02:31:31.000Z
|
2022-02-25T17:06:12.000Z
|
PyWC3/__init__.py
|
sylvainSUPINTERNET/PyWC3
|
16d519bbb98e7593b8d14d14d9b81b6d6932ef0c
|
[
"MIT"
] | 2
|
2021-03-17T13:15:32.000Z
|
2021-09-26T09:24:21.000Z
|
from .PyWC3.map import Map
from .PyWC3.jass import Jass
from .PyWC3.obj import ObjFile, DataFile, DooFile
| 35
| 49
| 0.8
|
0e9457f9915d83fd71054beb322f6bfb59f2319e
| 4,627
|
py
|
Python
|
api/__init__.py
|
lyy321/BAR_API
|
82bcd83c72b6873d6cc9778214d1b41318200411
|
[
"MIT"
] | null | null | null |
api/__init__.py
|
lyy321/BAR_API
|
82bcd83c72b6873d6cc9778214d1b41318200411
|
[
"MIT"
] | null | null | null |
api/__init__.py
|
lyy321/BAR_API
|
82bcd83c72b6873d6cc9778214d1b41318200411
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_restx import Api
from flask_cors import CORS
from flask_caching import Cache
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from sqlalchemy import MetaData
import os
def create_app():
"""Initialize the app factory based on the official Flask documentation"""
bar_app = Flask(__name__)
CORS(bar_app)
# Load configuration
if os.environ.get("CI"):
# Travis
print("We are now loading configuration.")
bar_app.config.from_pyfile(os.getcwd() + "/config/BAR_API.cfg", silent=True)
if bar_app.config.get("ADMIN_ENCRYPT_KEY"):
os.environ["ADMIN_ENCRYPT_KEY"] = bar_app.config.get("ADMIN_ENCRYPT_KEY")
if bar_app.config.get("ADMIN_PASSWORD_FILE"):
os.environ["ADMIN_PASSWORD_FILE"] = bar_app.config.get(
"ADMIN_PASSWORD_FILE"
)
elif os.environ.get("BAR"):
# The BAR
bar_app.config.from_pyfile(os.environ.get("BAR_API_PATH"), silent=True)
else:
# The localhost
bar_app.config.from_pyfile(
os.path.expanduser("~") + "/.config/BAR_API.cfg", silent=True
)
# Load environment variables
if bar_app.config.get("ADMIN_ENCRYPT_KEY"):
os.environ["ADMIN_ENCRYPT_KEY"] = bar_app.config.get("ADMIN_ENCRYPT_KEY")
if bar_app.config.get("ADMIN_PASSWORD_FILE"):
os.environ["ADMIN_PASSWORD_FILE"] = bar_app.config.get(
"ADMIN_PASSWORD_FILE"
)
if bar_app.config.get("PHENIX"):
os.environ["PHENIX"] = bar_app.config.get("PHENIX")
if bar_app.config.get("PHENIX_VERSION"):
os.environ["PHENIX_VERSION"] = bar_app.config.get("PHENIX_VERSION")
if bar_app.config.get("PATH"):
os.environ["PATH"] = (
bar_app.config.get("PATH") + ":/usr/local/phenix-1.18.2-3874/build/bin"
)
# Initialize the databases
annotations_lookup_db.init_app(bar_app)
eplant2_db.init_app(bar_app)
eplant_poplar_db.init_app(bar_app)
eplant_tomato_db.init_app(bar_app)
poplar_nssnp_db.init_app(bar_app)
tomato_nssnp_db.init_app(bar_app)
tomato_seq_db.init_app(bar_app)
single_cell_db.init_app(bar_app)
summarization_db.init_app(bar_app)
# Initialize the cache
cache.init_app(bar_app)
# Initialize rate limiter
limiter.init_app(bar_app)
# Configure the Swagger UI
bar_api = Api(
title="BAR API",
version="0.0.1",
description="API for the Bio-Analytic Resource",
)
# Now add routes
from api.resources.gene_information import gene_information
from api.resources.rnaseq_gene_expression import rnaseq_gene_expression
from api.resources.summarization_gene_expression import (
summarization_gene_expression,
)
from api.resources.api_manager import api_manager
from api.resources.proxy import bar_proxy
from api.resources.thalemine import thalemine
from api.resources.snps import snps
from api.resources.sequence import sequence
from api.resources.gene_annotation import gene_annotation
bar_api.add_namespace(gene_information)
bar_api.add_namespace(rnaseq_gene_expression)
bar_api.add_namespace(summarization_gene_expression)
bar_api.add_namespace(api_manager)
bar_api.add_namespace(bar_proxy)
bar_api.add_namespace(thalemine)
bar_api.add_namespace(snps)
bar_api.add_namespace(sequence)
bar_api.add_namespace(gene_annotation)
bar_api.init_app(bar_app)
return bar_app
# Initialize database system
# This is needed because multiple databases have the same database name
# Metadata cannot have multiple tables with the same name
annotations_lookup_db = SQLAlchemy(metadata=MetaData())
eplant2_db = SQLAlchemy(metadata=MetaData())
eplant_poplar_db = SQLAlchemy(metadata=MetaData())
eplant_tomato_db = SQLAlchemy(metadata=MetaData())
poplar_nssnp_db = SQLAlchemy(metadata=MetaData())
tomato_nssnp_db = SQLAlchemy(metadata=MetaData())
tomato_seq_db = SQLAlchemy(metadata=MetaData())
single_cell_db = SQLAlchemy(metadata=MetaData())
summarization_db = SQLAlchemy(metadata=MetaData())
# Initialize Redis
cache = Cache(
config={
"CACHE_TYPE": "flask_caching.backends.redis",
"CACHE_KEY_PREFIX": "BAR_API_",
"CACHE_REDIS_PASSWORD": os.environ.get("BAR_REDIS_PASSWORD"),
}
)
# Initialize Limiter
limiter = Limiter(key_func=get_remote_address)
# Now create the bar_app
app = create_app()
if __name__ == "__main__":
app.run()
| 34.789474
| 87
| 0.711692
|
9a810e32121034b6d588902f15d8068d6210319b
| 213
|
py
|
Python
|
aio_pika/patterns/__init__.py
|
aviramha/aio-pika
|
c480d2e62ac3f3e31a0714114e368bb6fd6eeb34
|
[
"Apache-2.0"
] | null | null | null |
aio_pika/patterns/__init__.py
|
aviramha/aio-pika
|
c480d2e62ac3f3e31a0714114e368bb6fd6eeb34
|
[
"Apache-2.0"
] | null | null | null |
aio_pika/patterns/__init__.py
|
aviramha/aio-pika
|
c480d2e62ac3f3e31a0714114e368bb6fd6eeb34
|
[
"Apache-2.0"
] | null | null | null |
from .master import Master, Worker, NackMessage, RejectMessage, JsonMaster
from .rpc import RPC, JsonRPC
__all__ = (
'Master', 'NackMessage', 'RejectMessage',
'RPC', 'Worker', 'JsonMaster', 'JsonRPC',
)
| 23.666667
| 74
| 0.694836
|
4b280fb8590d009795cfb8b0746a89edb18e751f
| 1,497
|
py
|
Python
|
adventofcode/2021/1/test/test_process_depth.py
|
bneradt/toy
|
982e80ec98f4e951f7275e5f22cb0197f8f86c08
|
[
"Apache-2.0"
] | null | null | null |
adventofcode/2021/1/test/test_process_depth.py
|
bneradt/toy
|
982e80ec98f4e951f7275e5f22cb0197f8f86c08
|
[
"Apache-2.0"
] | null | null | null |
adventofcode/2021/1/test/test_process_depth.py
|
bneradt/toy
|
982e80ec98f4e951f7275e5f22cb0197f8f86c08
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from process_depth import count_increases, IntFileIterator
from tempfile import NamedTemporaryFile
import unittest
class TestCountIncreasesWindowSize1(unittest.TestCase):
"""
Test count_increases with a window size of 1 (the default).
"""
def test_all_increases(self):
l = [-20, 1, 2, 10, 100, 103]
self.assertEqual(len(l) - 1, count_increases(l))
def test_all_decreases(self):
l = [120, 8, 5, 0, -1]
self.assertEqual(0, count_increases(l))
def test_mix(self):
l = [-30, -40, -20, -5, 0, 3, 2, -1, 5, 8, 7, 8, 8, 6]
self.assertEqual(7, count_increases(l))
class TestCountWindowIncreases(unittest.TestCase):
"""
Test count_increases with a window size greater than 1.
"""
def test_with_sample_input(self):
l = [199, 200, 208, 210, 200, 207, 240, 269, 260, 263]
self.assertEqual(5, count_increases(l, 3))
class TestIntFileIterator(unittest.TestCase):
"""
Test the IntFileIterator.
"""
def setUp(self):
self.test_file = NamedTemporaryFile(mode='w+t')
def test_iterator_functionality(self):
input_list = [14, 24, 103, 2, -1, -1, 22, 22]
for input_int in input_list:
self.test_file.write(f'{input_int}\n')
file_ints = IntFileIterator(self.test_file)
for i, file_int in enumerate(file_ints):
self.assertEqual(input_list[i], file_int)
if __name__ == '__main__':
unittest.main()
| 29.352941
| 63
| 0.640615
|
89304c1cae1226fe56067fa18038e2c32da5aceb
| 32,162
|
py
|
Python
|
test/functional/fundrawtransaction.py
|
tubacle/tubacle
|
b699ee9667e0997fab5c0327173f959b26177ba6
|
[
"MIT"
] | null | null | null |
test/functional/fundrawtransaction.py
|
tubacle/tubacle
|
b699ee9667e0997fab5c0327173f959b26177ba6
|
[
"MIT"
] | null | null | null |
test/functional/fundrawtransaction.py
|
tubacle/tubacle
|
b699ee9667e0997fab5c0327173f959b26177ba6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid tubaclecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.60749
| 223
| 0.569865
|
2c5d1cd03e9897ab66e80adaccba702e97f80b59
| 1,184
|
py
|
Python
|
setup.py
|
brunobeltran/nuc_chain
|
ef03d7285e35788ea86b467303dfd78e52525114
|
[
"MIT"
] | null | null | null |
setup.py
|
brunobeltran/nuc_chain
|
ef03d7285e35788ea86b467303dfd78e52525114
|
[
"MIT"
] | null | null | null |
setup.py
|
brunobeltran/nuc_chain
|
ef03d7285e35788ea86b467303dfd78e52525114
|
[
"MIT"
] | 1
|
2019-07-16T16:41:36.000Z
|
2019-07-16T16:41:36.000Z
|
import setuptools
import nuc_chain
long_description = nuc_chain.__doc__
setuptools.setup(
name="nuc_chain",
version=nuc_chain.__version__,
author="Bruno Belran",
author_email="brunobeltran0@gmail.com",
description="Modeling chromatin as a chain of nucleosomes",
long_description=long_description,
long_description_content_type="text/rst",
url="https://gitlab.com/brunobeltran/nuc_chain",
packages=setuptools.find_packages(),
install_requires=['numpy', 'scipy', 'matplotlib', 'pandas', 'seaborn',
'sympy'],
setup_requires=['sphinx', 'sphinx_rtd_theme'],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 1 - Planning",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: C",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Visualization",
),
)
| 35.878788
| 74
| 0.652872
|
b371f56c23b68bba34d5c6c6b9cf0b43dda6fcdf
| 8,181
|
py
|
Python
|
src/extensions/BIOS COMMANDS/SetPasswordCommand.py
|
tomy781107/python-redfish-utility
|
1639871df55233ba8553bbf250e5caaa23ebd247
|
[
"Apache-2.0"
] | null | null | null |
src/extensions/BIOS COMMANDS/SetPasswordCommand.py
|
tomy781107/python-redfish-utility
|
1639871df55233ba8553bbf250e5caaa23ebd247
|
[
"Apache-2.0"
] | 1
|
2021-09-17T21:23:50.000Z
|
2021-11-24T01:47:24.000Z
|
src/extensions/BIOS COMMANDS/SetPasswordCommand.py
|
tomy781107/python-redfish-utility
|
1639871df55233ba8553bbf250e5caaa23ebd247
|
[
"Apache-2.0"
] | 1
|
2021-10-18T08:36:53.000Z
|
2021-10-18T08:36:53.000Z
|
###
# Copyright 2016-2021 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
""" SetPassword Command for rdmc """
import getpass
from argparse import ArgumentParser, SUPPRESS
from rdmc_helper import ReturnCodes, InvalidCommandLineError, InvalidCommandLineErrorOPTS, \
Encryption, UnableToDecodeError
class SetPasswordCommand():
""" Set password class command """
def __init__(self):
self.ident = {
'name':'setpassword',
'usage': None,
'description':'Sets the admin password and power-on password\n'
'setpassword --newpassword <NEW_PASSWORD> --currentpassword <OLD_PASSWORD> [OPTIONS]\n\n\t'
'Setting the admin password with no previous password set.'
'\n\texample: setpassword --newpassword testnew --currentpassword None\n\n\tSetting the admin '
'password back to nothing.\n\texample: setpassword --newpassword None --currentpassword testnew '
'\n\n\tSetting the power on password.\n\texample: setpassword'
' --newpassword testnew --currentpassword None --poweron\n\tNote: '
'if it is empty password, send None as above.',
'summary':'Sets the admin password and power-on password',
'aliases': [],
'auxcommands': ['LoginCommand', 'SetCommand', 'SelectCommand',
'CommitCommand', 'RebootCommand', 'LogoutCommand']
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def run(self, line, help_disp=False):
""" Main set password worker function
:param line: string of arguments passed in
:type line: str.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line)
if not line or line[0] == "help":
self.parser.print_help()
return ReturnCodes.SUCCESS
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.setpasswordvalidation(options)
#if not args:
# self.rdmc.ui.printer('Please input the current password.\n')
# tempoldpass = getpass.getpass()
# if tempoldpass and tempoldpass != '\r':
# tempoldpass = tempoldpass
# else:
# tempoldpass = '""'
# self.rdmc.ui.printer('Please input the new password.\n')
# tempnewpass = getpass.getpass()
# if tempnewpass and tempnewpass != '\r':
# tempnewpass = tempnewpass
# else:
# tempnewpass = '""'
# args.extend([tempnewpass, tempoldpass])
#if len(args) < 2:
# raise InvalidCommandLineError("Please pass both new password and old password.")
args = list()
args.append(options.newpassword)
args.append(options.currentpassword)
count = 0
for arg in args:
if arg:
if arg.lower() == 'none':
args[count] = None
elif len(arg) > 2:
if ('"' in arg[0] and '"' in arg[-1]) or ('\'' in arg[0] and '\'' in arg[-1]):
args[count] = arg[1:-1]
elif len(arg) == 2:
if (arg[0] == '"' and arg[1] == '"') or (arg[0] == '\'' and arg[1] == '\''):
args[count] = None
count += 1
if options.encode:
_args = []
for arg in args:
try:
arg = Encryption.decode_credentials(arg)
if isinstance(arg, bytes):
arg = arg.decode('utf-8')
_args.append(arg)
except UnableToDecodeError:
_args.append(arg)
args = _args
if self.rdmc.app.typepath.defs.isgen10:
bodydict = self.rdmc.app.get_handler(self.rdmc.app.typepath.defs.biospath,
service=True, silent=True).dict
for item in bodydict['Actions']:
if 'ChangePassword' in item:
path = bodydict['Actions'][item]['target']
break
if options.poweron:
body = {"PasswordName": "User", "OldPassword": args[1], "NewPassword": args[0]}
else:
body = {"PasswordName": "Administrator", "OldPassword": args[1],
"NewPassword": args[0]}
self.rdmc.app.post_handler(path, body)
else:
if options.poweron:
self.auxcommands['select'].run("HpBios.")
self.auxcommands['set'].run("PowerOnPassword=%s OldPowerOnPassword=%s" % (args[0], args[1]))
self.auxcommands['commit'].run("")
else:
self.auxcommands['select'].run("HpBios.")
self.auxcommands['set'].run("AdminPassword=%s OldAdminPassword=%s" % (args[0], args[1]))
self.auxcommands['commit'].run("")
self.rdmc.ui.printer('\nThe session will now be terminated.\n'
' login again with updated credentials in order to continue.\n')
self.auxcommands['logout'].run("")
if options:
if options.reboot:
self.auxcommands['reboot'].run(options.reboot)
self.cmdbase.logout_routine(self, options)
return ReturnCodes.SUCCESS
def setpasswordvalidation(self, options):
""" Results method validation function
:param options: command line options
:type options: list.
"""
self.cmdbase.login_select_validation(self, options)
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
customparser.add_argument(
'--currentpassword',
dest='currentpassword',
help="Use this flag to provide current password.",
required=True,
)
customparser.add_argument(
'--newpassword',
dest='newpassword',
help="Use this flag to provide new password.",
required=True,
)
customparser.add_argument(
'--reboot',
dest='reboot',
help="Use this flag to perform a reboot command function after "\
"completion of operations. 'REBOOT' is a replaceable parameter "\
"that can have multiple values. For help with parameters and "\
"descriptions regarding the reboot flag, run help reboot.",
default=None,
)
customparser.add_argument(
'--poweron',
dest='poweron',
action="store_true",
help="""Use this flag to set power on password instead""",
default=None,
)
| 39.713592
| 118
| 0.538687
|
9c4fbe8a0e49f5ca98c21de9b202c245aa00306d
| 115
|
py
|
Python
|
apps/news/admin.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | 159
|
2019-03-15T10:46:19.000Z
|
2022-03-12T09:19:31.000Z
|
apps/news/admin.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | 6
|
2019-03-16T12:51:24.000Z
|
2020-07-09T02:25:42.000Z
|
apps/news/admin.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | 36
|
2019-03-16T10:37:14.000Z
|
2021-11-14T21:04:18.000Z
|
from django.contrib import admin
# Register your models here.
from .models import News
admin.site.register(News)
| 16.428571
| 32
| 0.791304
|
7ca0cfd0c9c38b26dc98545c16b0ff93ce8d6370
| 582
|
bzl
|
Python
|
third_party/darts_clone/workspace.bzl
|
jeongukjae/nori-clone
|
e0f8afb842499be4d55f1fc47292fbecdbca2a86
|
[
"Apache-2.0"
] | 38
|
2022-01-07T05:19:28.000Z
|
2022-03-27T13:44:53.000Z
|
third_party/darts_clone/workspace.bzl
|
jeongukjae/nori-clone
|
e0f8afb842499be4d55f1fc47292fbecdbca2a86
|
[
"Apache-2.0"
] | 19
|
2022-01-11T14:25:03.000Z
|
2022-02-18T14:24:19.000Z
|
third_party/darts_clone/workspace.bzl
|
jeongukjae/nori-clone
|
e0f8afb842499be4d55f1fc47292fbecdbca2a86
|
[
"Apache-2.0"
] | 3
|
2022-02-14T13:51:20.000Z
|
2022-03-28T06:55:38.000Z
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("//third_party:repo.bzl", "clean_dep", "clean_deps")
def configure_darts_clone():
http_archive(
name = "darts_clone",
build_file = clean_dep("//third_party/darts_clone:BUILD.bzl"),
sha256 = "c97f55d05c98da6fcaf7f9ecc6a6dc6bc5b18b8564465f77abff8879d446491c",
strip_prefix = "darts-clone-e40ce4627526985a7767444b6ed6893ab6ff8983",
# darts_clone 0.32h
url = "https://github.com/s-yata/darts-clone/archive/e40ce4627526985a7767444b6ed6893ab6ff8983.zip",
)
| 44.769231
| 107
| 0.726804
|
6a566073eda12f49a456b7099709ff281750e0a6
| 846
|
py
|
Python
|
setup.py
|
jakejack13/gachapy
|
78c0ca5cd14a618a433009f9220049bcfc507516
|
[
"MIT"
] | 5
|
2021-03-09T04:04:33.000Z
|
2021-05-27T03:28:15.000Z
|
setup.py
|
jakejack13/gachapy
|
78c0ca5cd14a618a433009f9220049bcfc507516
|
[
"MIT"
] | null | null | null |
setup.py
|
jakejack13/gachapy
|
78c0ca5cd14a618a433009f9220049bcfc507516
|
[
"MIT"
] | 3
|
2021-04-02T02:31:18.000Z
|
2021-04-20T03:03:04.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="gachapy",
version="1.0.0",
author="Jacob Kerr",
author_email="jck268@cornell.edu",
description="A gacha engine built in Python for developing gacha games",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jakejack13/gachapy",
project_urls={
"Bug Tracker": "https://github.com/jakejack13/gachapy/issues",
"Documentation": "https://gachapy.readthedocs.io/",
},
classifiers=[
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.10",
)
| 31.333333
| 76
| 0.658392
|
176519885e88d46b1a66e155e5a33a661059455b
| 7,648
|
py
|
Python
|
wot/graphics/plot_fates.py
|
Jabbath/wot
|
90b40f03ed1c5feb4018096b9e2c1294cf66f535
|
[
"BSD-3-Clause"
] | null | null | null |
wot/graphics/plot_fates.py
|
Jabbath/wot
|
90b40f03ed1c5feb4018096b9e2c1294cf66f535
|
[
"BSD-3-Clause"
] | null | null | null |
wot/graphics/plot_fates.py
|
Jabbath/wot
|
90b40f03ed1c5feb4018096b9e2c1294cf66f535
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
from matplotlib import patches
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import math
import wot.graphics
class Fate_Plotter():
def __init__(self, fate_ds):
"""
Parameters
----------
fate_ds: pandas.DataFrame
A df of cell fates as generated by wot.tmap.TransportMapModel.fates.
"""
self.fate_ds = fate_ds
def plot_triangle(self, name1, name2, day, filename=None):
"""
Plots cells in barycentric coordinates (2D) according to their fates.
Cells are placed by using the fates to generate a convex combination
of the triangle's vertices.
Parameters
----------
name1: str
The cell population whose fate will the first of the triangle's vertices.
name2: str
The cell population whose fate will the second of the triangle's vertices.
day: float
The timepoint from which we want to plot cells.
filename: str, optional
The name of the file to save the plot as. None to skip saving.
"""
figure = plt.figure(figsize=(8, 8))
#Get the fates for our two cell populations
fate1 = self.fate_ds[:,name1][self.fate_ds.obs['day']==day].X.flatten()
fate2 = self.fate_ds[:,name2][self.fate_ds.obs['day']==day].X.flatten()
#Take a convex combination of the triangle's vertices using the fates
Nrows = len(fate1)
x = np.zeros(Nrows)
y = np.zeros(Nrows)
P = np.array([[1,0],[np.cos(2*math.pi/3),math.sin(2*math.pi/3)],[math.cos(4*math.pi/3),math.sin(4*math.pi/3)]])
for i in range(0,Nrows):
ff = np.array([fate1[i],fate2[i],1-(fate1[i]+fate2[i])])
x[i] = (ff @ P)[0]
y[i] = (ff @ P)[1]
#Plot the triangle
t1 = plt.Polygon(P, color=(0,0,0,0.1))
plt.gca().add_patch(t1)
#Plot the vertices
vx = P[:,0]
vy = P[:,1]
plt.scatter(vx,vy)
#Plot cells and labels
plt.scatter(x, y, c='tab:blue')
plt.text(P[0,0]+.1, P[0,1], name1)
plt.text(P[1,0]-.1, P[1,1]+.1, name2)
plt.text(P[2,0]-.1, P[2,1]-.2, 'Other')
plt.axis('equal')
plt.axis('off')
plt.title('{} vs. {} on day {}'.format(name1, name2, day))
#Optionally save the figure
if filename is not None:
plt.savefig(filename)
def plot_tetrahedron(self, name1, name2, name3, day,
azimuth=40, elevation=20, filename=None):
"""
Plots cells in barycentric coordinates (3D) according to their fates.
Cells are placed by using the fates to generate a convex combination
of a tetrahedron's vertices. Works well with the "%matplotlib widget"
cell magic if used in Jupyter notebooks.
Parameters
----------
name1: str
The cell population whose fate will the first of the tetrahedron's vertices.
name2: str
The cell population whose fate will the second of the tetrahedron's vertices.
name3: str
The cell population whose fate will the third of the tetrahedron's vertices.
day: float
The timepoint at which we want to plot cells.
azimuth: float, optional
The angle in degrees by which to rotate the xy plane. The face made by
name1, name2, and name3 is normal to the xy plane.
elevation: float, optional
The angle in degrees by which to rotate the z plane.
filename: str, optional
The name of the file to save the plot as. None to skip saving.
"""
# Map the time to the closest time in the dataset
days = pd.unique(self.fate_ds.obs.day)
day = days[np.argmin(abs(days - day))]
fate1 = self.fate_ds[:,name1][self.fate_ds.obs['day']==day].X.flatten()
fate2 = self.fate_ds[:,name2][self.fate_ds.obs['day']==day].X.flatten()
fate3 = self.fate_ds[:,name3][self.fate_ds.obs['day']==day].X.flatten()
Nrows = len(fate1)
x = np.zeros(Nrows)
y = np.zeros(Nrows)
z = np.zeros(Nrows)
# Define coordinates for the vertices of the tetrahedron
T = np.array([[ math.sqrt(8/9), 0, -1/3],
[-math.sqrt(2/9), math.sqrt(2/3), -1/3],
[ -math.sqrt(2/9), -math.sqrt(2/3), -1/3],
[ 0, 0, 1]])
# Find the coordinates of each cell as a linear combination of the weight
# of the cell towards each vertex fate.
for i in range(0,Nrows):
ff = np.array([fate1[i],fate2[i], fate3[i], 1-(fate1[i]+fate2[i]+fate3[i])])
x[i] = (ff @ T)[0]
y[i] = (ff @ T)[1]
z[i] = (ff @ T)[2]
# Close the previous plot
plt.close()
# Create the plot and set axes settings
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
# Plot the cells
ax.scatter(xs=x, ys=y, zs=z, zdir='z', s=4, c='tab:blue', depthshade=True)
# Plot the vertices
ax.scatter(xs=T[:,0], ys=T[:,1], zs=T[:,2], zdir='z', s=20, c='orange', depthshade=True)
#Plot the labels, offsetting them from the vertices by a small factor
fate_names = [name1, name2, name3, 'Other']
for x,y,z,fate in zip(T[:,0] + 0.1*np.sign(T[:,0]),
T[:,1] + 0.05*np.sign(T[:,1]),
T[:,2] + 0.05*np.sign(T[:,2]), fate_names):
ax.text(x,y,z,fate,zdir=None)
# Plot the faces of the tetrahedron
ax.add_collection3d(Poly3DCollection(T[(0,1,2),:], color=(0.6,0.6,0.6), alpha=.05))
ax.add_collection3d(Poly3DCollection(T[(0,1,3),:], color=(0.6,0.6,0.6), alpha=.05))
ax.add_collection3d(Poly3DCollection(T[(0,2,3),:], color=(0.6,0.6,0.6), alpha=.05))
ax.add_collection3d(Poly3DCollection(T[(1,2,3),:], color=(0.6,0.6,0.6), alpha=.05))
# Set the angle of the plot
ax.view_init(elev=elevation, azim=azimuth)
# Turn off axes and trim extra space
plt.axis('off')
plt.title('{} vs. {} vs. {} on day {}'.format(name1, name2, name3, day))
plt.tight_layout()
#Optionally save the figure
if filename is not None:
plt.savefig(filename)
def plot_log_odds(self, name1, name2, filename=None):
"""
Displays log-odds for a pair of fates. This is the log of the
ratio of fate probabilities.
Parameters
----------
name1: str
The cell population whose fate will the numerator.
name2: str
The cell population whose fate will denominator.
filename: str, optional
The name of the file to save the plot as. None to skip saving.
"""
figure = plt.figure(figsize=(8, 8))
#Extract the fate probabilities for the two cell populations
fate1 = self.fate_ds[:, name1].X
fate2 = self.fate_ds[:, name2].X
#Calculate the log odds
p = np.log(1e-9 + np.divide(fate1, fate2, out=np.zeros_like(fate1), where=fate2 != 0))
#Plot log-odds by day
plt.scatter(self.fate_ds.obs['day'], p, s=4, marker=',', c='tab:blue')
plt.xlabel('Day')
plt.ylabel('Log Odds')
plt.title('{} vs. {}'.format(name1, name2))
#Optionally save the figure
if filename is not None:
plt.savefig(filename)
| 38.049751
| 119
| 0.565507
|
c6d40e861d662a648636cfea43df0215df59cb53
| 4,932
|
py
|
Python
|
saleor/graphql/app/tests/mutations/test_app_install.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/app/tests/mutations/test_app_install.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/app/tests/mutations/test_app_install.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from unittest.mock import Mock
import graphene
from .....app.models import AppInstallation
from .....core import JobStatus
from ....core.enums import AppErrorCode, PermissionEnum
from ....tests.utils import get_graphql_content
INSTALL_APP_MUTATION = """
mutation AppInstall(
$app_name: String, $manifest_url: String, $permissions: [PermissionEnum]){
appInstall(
input:{appName: $app_name, manifestUrl: $manifest_url,
permissions:$permissions}){
appInstallation{
id
status
appName
manifestUrl
}
errors{
field
message
code
permissions
}
}
}
"""
def test_install_app_mutation(
permission_manage_apps,
permission_manage_orders,
staff_api_client,
staff_user,
monkeypatch,
):
mocked_task = Mock()
monkeypatch.setattr(
"saleor.graphql.app.mutations.install_app_task.delay", mocked_task
)
query = INSTALL_APP_MUTATION
staff_user.user_permissions.set([permission_manage_apps, permission_manage_orders])
variables = {
"app_name": "New external integration",
"manifest_url": "http://localhost:3000/manifest",
"permissions": [PermissionEnum.MANAGE_ORDERS.name],
}
response = staff_api_client.post_graphql(
query,
variables=variables,
)
content = get_graphql_content(response)
app_installation = AppInstallation.objects.get()
app_installation_data = content["data"]["appInstall"]["appInstallation"]
_, app_id = graphene.Node.from_global_id(app_installation_data["id"])
assert int(app_id) == app_installation.id
assert app_installation_data["status"] == JobStatus.PENDING.upper()
assert app_installation_data["manifestUrl"] == app_installation.manifest_url
mocked_task.assert_called_with(app_installation.pk, True)
def test_install_app_mutation_by_app(
permission_manage_apps, permission_manage_orders, app_api_client, monkeypatch
):
mocked_task = Mock()
monkeypatch.setattr(
"saleor.graphql.app.mutations.install_app_task.delay", mocked_task
)
query = INSTALL_APP_MUTATION
app_api_client.app.permissions.set(
[permission_manage_apps, permission_manage_orders]
)
variables = {
"app_name": "New external integration",
"manifest_url": "http://localhost:3000/manifest",
"permissions": [PermissionEnum.MANAGE_ORDERS.name],
}
response = app_api_client.post_graphql(
query,
variables=variables,
)
content = get_graphql_content(response)
app_installation = AppInstallation.objects.get()
app_installation_data = content["data"]["appInstall"]["appInstallation"]
_, app_id = graphene.Node.from_global_id(app_installation_data["id"])
assert int(app_id) == app_installation.id
assert app_installation_data["status"] == JobStatus.PENDING.upper()
assert app_installation_data["manifestUrl"] == app_installation.manifest_url
mocked_task.assert_called_with(app_installation.pk, True)
def test_app_install_mutation_out_of_scope_permissions(
permission_manage_apps, staff_api_client, staff_user
):
query = INSTALL_APP_MUTATION
staff_user.user_permissions.set([permission_manage_apps])
variables = {
"app_name": "New external integration",
"manifest_url": "http://localhost:3000/manifest",
"permissions": [PermissionEnum.MANAGE_ORDERS.name],
}
response = staff_api_client.post_graphql(
query,
variables=variables,
)
content = get_graphql_content(response)
data = content["data"]["appInstall"]
errors = data["errors"]
assert not data["appInstallation"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_ORDERS.name]
def test_install_app_mutation_by_app_out_of_scope_permissions(
permission_manage_apps, app_api_client
):
query = INSTALL_APP_MUTATION
app_api_client.app.permissions.set([permission_manage_apps])
variables = {
"app_name": "New external integration",
"manifest_url": "http://localhost:3000/manifest",
"permissions": [PermissionEnum.MANAGE_ORDERS.name],
}
response = app_api_client.post_graphql(
query,
variables=variables,
)
content = get_graphql_content(response)
data = content["data"]["appInstall"]
errors = data["errors"]
assert not data["appInstallation"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_ORDERS.name]
| 33.780822
| 87
| 0.687956
|
9f466a25cd20bded9b0dcebd8afcf228cd48edac
| 718
|
py
|
Python
|
onadata/libs/permissions.py
|
amks1/kobocat
|
0f36c75cc55345716558e8bece2ef48567896997
|
[
"BSD-2-Clause"
] | 87
|
2015-02-10T23:36:28.000Z
|
2022-01-31T00:51:10.000Z
|
onadata/libs/permissions.py
|
amks1/kobocat
|
0f36c75cc55345716558e8bece2ef48567896997
|
[
"BSD-2-Clause"
] | 443
|
2015-04-28T18:50:25.000Z
|
2022-03-31T03:29:43.000Z
|
onadata/libs/permissions.py
|
amks1/kobocat
|
0f36c75cc55345716558e8bece2ef48567896997
|
[
"BSD-2-Clause"
] | 122
|
2015-01-13T16:27:44.000Z
|
2022-01-24T09:35:56.000Z
|
# coding: utf-8
from guardian.shortcuts import get_users_with_perms
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'permissions': permissions} for user, permissions in
users_with_perms
]
return result
| 31.217391
| 77
| 0.679666
|
7c599f4e2533a2a68eaa819487a7bf1416e78801
| 30,376
|
py
|
Python
|
synphot/models.py
|
spacetelescope/pysynphot_DONOTUSE
|
2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc
|
[
"BSD-3-Clause"
] | 20
|
2016-09-08T22:01:21.000Z
|
2022-01-18T03:56:53.000Z
|
synphot/models.py
|
spacetelescope/pysynphot_DONOTUSE
|
2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc
|
[
"BSD-3-Clause"
] | 212
|
2016-04-13T21:30:38.000Z
|
2021-12-30T21:31:25.000Z
|
synphot/models.py
|
spacetelescope/pysynphot_DONOTUSE
|
2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc
|
[
"BSD-3-Clause"
] | 14
|
2016-06-12T17:49:57.000Z
|
2022-01-18T03:57:02.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Spectrum models not in `astropy.modeling`."""
# STDLIB
import math
import warnings
from copy import deepcopy
from functools import partial
# THIRD-PARTY
import numpy as np
# ASTROPY
from astropy import constants as const
from astropy import units as u
from astropy.modeling import Fittable1DModel, Model, Parameter
from astropy.modeling import models as _models
from astropy.modeling.models import Tabular1D
from astropy.stats.funcs import gaussian_fwhm_to_sigma, gaussian_sigma_to_fwhm
from astropy.utils import metadata
from astropy.utils.exceptions import AstropyUserWarning
# LOCAL
from . import units
from .compat import ASTROPY_LT_4_0, ASTROPY_LT_4_3, ASTROPY_LT_5_0
from .exceptions import SynphotError
from .utils import merge_wavelengths
if ASTROPY_LT_4_0:
from astropy.modeling.core import _CompoundModel as CompoundModel
from astropy.modeling.models import MexicanHat1D as _RickerWavelet1D
else:
from astropy.modeling.core import CompoundModel
from astropy.modeling.models import RickerWavelet1D as _RickerWavelet1D
__all__ = ['BlackBody1D', 'BlackBodyNorm1D', 'Box1D', 'ConstFlux1D',
'Empirical1D', 'Gaussian1D', 'GaussianAbsorption1D',
'GaussianFlux1D', 'Lorentz1D', 'MexicanHat1D', 'RickerWavelet1D',
'PowerLawFlux1D', 'Trapezoid1D', 'get_waveset', 'get_metadata']
class BlackBody1D(Fittable1DModel):
"""Create a :ref:`blackbody spectrum <synphot-planck-law>`
model with given temperature.
Parameters
----------
temperature : float
Blackbody temperature in Kelvin.
"""
temperature = Parameter(default=5000)
def __init__(self, *args, **kwargs):
super(BlackBody1D, self).__init__(*args, **kwargs)
self.meta['expr'] = 'bb({0})'.format(self.temperature.value)
@property
def lambda_max(self):
"""Peak wavelength in Angstrom when the curve is expressed as
power density."""
return ((const.b_wien.value / self.temperature) * u.m).to_value(u.AA)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
.. math::
x_{\\mathrm{low}} = 0
x_{\\mathrm{high}} = \\log(\\lambda_{\\mathrm{max}} \\;\
(1 + \\mathrm{factor}))
Parameters
----------
factor : float
Used to calculate ``x_high``.
"""
w0 = self.lambda_max
return (w0 * 0, np.log10(w0 + factor * w0))
def sampleset(self, factor_bbox=10.0, num=1000):
"""Return ``x`` array that samples the feature.
Parameters
----------
factor_bbox : float
Factor for ``bounding_box`` calculations.
num : int
Number of points to generate.
"""
w1, w2 = self.bounding_box(factor=factor_bbox)
if self._n_models == 1:
w = np.logspace(w1, w2, num)
else:
w = list(map(partial(np.logspace, num=num), w1, w2))
return np.asarray(w)
@staticmethod
def evaluate(x, temperature):
"""Evaluate the model.
Parameters
----------
x : number or ndarray
Wavelengths in Angstrom.
temperature : number
Temperature in Kelvin.
Returns
-------
y : number or ndarray
Blackbody radiation in PHOTLAM per steradian.
"""
from synphot.blackbody import blackbody_nu
# Silence Numpy
old_np_err_cfg = np.seterr(all='ignore')
wave = np.ascontiguousarray(x) * u.AA
bbnu_flux = blackbody_nu(wave, temperature)
bbflux = (bbnu_flux * u.sr).to(
units.PHOTLAM, u.spectral_density(wave)) / u.sr # PHOTLAM/sr
# Restore Numpy settings
np.seterr(**old_np_err_cfg)
return bbflux.value
def integrate(self, *args):
with u.add_enabled_equivalencies(u.temperature()):
t = u.Quantity(self.temperature, u.K)
return (const.sigma_sb * t ** 4 / math.pi) # per steradian
class BlackBodyNorm1D(BlackBody1D):
"""Create a normalized :ref:`blackbody spectrum <synphot-planck-law>`
with given temperature.
It is normalized by multiplying `BlackBody1D` result with a solid angle,
:math:`\\Omega`, as defined below, where :math:`d` is 1 kpc:
.. math::
\\Omega = \\frac{\\pi R_{\\mathrm{Sun}}^{2}}{d^{2}}
Parameters
----------
temperature : float
Blackbody temperature in Kelvin.
"""
def __init__(self, *args, **kwargs):
super(BlackBodyNorm1D, self).__init__(*args, **kwargs)
self._omega = np.pi * (const.R_sun / const.kpc).value ** 2 # steradian
def evaluate(self, x, temperature):
"""Evaluate the model.
Parameters
----------
x : number or ndarray
Wavelengths in Angstrom.
temperature : number
Temperature in Kelvin.
Returns
-------
y : number or ndarray
Blackbody radiation in PHOTLAM.
"""
bbflux = super(BlackBodyNorm1D, self).evaluate(x, temperature)
return bbflux * self._omega
def integrate(self, *args):
return super().integrate(*args) * self._omega
class Box1D(_models.Box1D):
"""Same as `astropy.modeling.functional_models.Box1D`, except with
``sampleset`` defined.
"""
@staticmethod
def _calc_sampleset(w1, w2, step, minimal):
"""Calculate sampleset for each model."""
if minimal:
arr = [w1 - step, w1, w2, w2 + step]
else:
arr = np.arange(w1 - step, w2 + step + step, step)
return arr
def sampleset(self, step=0.01, minimal=False):
"""Return ``x`` array that samples the feature.
Parameters
----------
step : float
Distance of first and last points w.r.t. bounding box.
minimal : bool
Only return the minimal points needed to define the box;
i.e., box edges and a point outside on each side.
"""
if ASTROPY_LT_5_0:
w1, w2 = self.bounding_box
else:
w1, w2 = tuple(self.bounding_box.bounding_box())
if self._n_models == 1:
w = self._calc_sampleset(w1, w2, step, minimal)
else:
w = list(map(partial(
self._calc_sampleset, step=step, minimal=minimal), w1, w2))
return np.asarray(w)
def integrate(self, *args):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
w = u.Quantity(self.width, u.AA)
return self.amplitude * w
class ConstFlux1D(_models.Const1D):
"""One dimensional constant flux model.
Flux that is constant in a given unit might not be constant in
another unit. During evaluation, flux is always converted to PHOTLAM.
For multiple ``n_models``, this model only accepts amplitudes of the
same flux unit; e.g., ``[1, 2]`` or ``Quantity([1, 2], 'photlam')``.
Parameters
----------
amplitude : number or `~astropy.units.quantity.Quantity`
Value and unit of the constant function.
If not Quantity, assume the unit of PHOTLAM.
"""
def __init__(self, amplitude, **kwargs):
if not isinstance(amplitude, u.Quantity):
amplitude = amplitude * units.PHOTLAM
if amplitude.unit == u.STmag:
a = units.convert_flux(1, amplitude, units.FLAM)
elif amplitude.unit == u.ABmag:
a = units.convert_flux(1, amplitude, units.FNU)
elif (amplitude.unit.physical_type in
('spectral flux density', 'spectral flux density wav',
'photon flux density', 'photon flux density wav')):
a = amplitude
else:
raise NotImplementedError(
'{0} not supported.'.format(amplitude.unit))
self._flux_unit = a.unit
super(ConstFlux1D, self).__init__(amplitude=a.value, **kwargs)
def evaluate(self, x, *args):
"""One dimensional constant flux model function.
Parameters
----------
x : number or ndarray
Wavelengths in Angstrom.
Returns
-------
y : number or ndarray
Flux in PHOTLAM.
"""
a = (self.amplitude * np.ones_like(x)) * self._flux_unit
y = units.convert_flux(x, a, units.PHOTLAM)
return y.value
def integrate(self, x):
# TODO: Remove unit hardcoding when we use model with units natively.
# TODO: We do not handle wav_unit as wave number nor energy for now.
if ((ASTROPY_LT_4_3 and 'wav' in self._flux_unit.physical_type) or
(not ASTROPY_LT_4_3 and
any(['wav' in t for t in self._flux_unit.physical_type]))):
wav_unit = u.AA
else:
wav_unit = u.Hz
with u.add_enabled_equivalencies(u.spectral()):
x = u.Quantity(x, wav_unit)
amp = u.Quantity(self.amplitude, self._flux_unit)
return (max(x) - min(x)) * amp
class Empirical1D(Tabular1D):
"""Empirical (sampled) spectrum or bandpass model.
.. note::
This model requires `SciPy <https://www.scipy.org>`_ 0.14
or later to be installed.
Parameters
----------
keep_neg : bool
Convert negative ``lookup_table`` values to zeroes?
This is to be consistent with ASTROLIB PYSYNPHOT.
kwargs : dict
Keywords for `~astropy.modeling.tabular.Tabular1D` model
creation or :func:`~scipy.interpolate.interpn`.
When ``fill_value=np.nan`` is given, extrapolation is done
based on nearest end points on each end; This is the default
behavior.
"""
def __init__(self, **kwargs):
# Manually insert user metadata here to accomodate any warning
# from self._process_neg_flux()
meta = kwargs.pop('meta', {})
self.meta = meta
if 'warnings' not in self.meta:
self.meta['warnings'] = {}
x = kwargs['points']
y = kwargs['lookup_table']
# Points can only be ascending for interpn()
if x[-1] < x[0]:
x = x[::-1]
y = y[::-1]
kwargs['points'] = x
# Handle negative flux
keep_neg = kwargs.pop('keep_neg', False)
self._keep_neg = keep_neg
y = self._process_neg_flux(x, y)
kwargs['lookup_table'] = y
super(Empirical1D, self).__init__(**kwargs)
# Set non-default interpolation default values.
# For tapered model, just fill with zero;
# Otherwise, extrapolate like ASTROLIB PYSYNPHOT.
self.bounds_error = kwargs.get('bounds_error', False)
if self.is_tapered():
self.fill_value = kwargs.get('fill_value', 0)
else:
self.fill_value = kwargs.get('fill_value', np.nan)
def _process_neg_flux(self, x, y):
"""Remove negative flux."""
if self._keep_neg: # Nothing to do
return y
old_y = None
if np.isscalar(y): # pragma: no cover
if y < 0:
n_neg = 1
old_x = x
old_y = y
y = 0
else:
x = np.asarray(x) # In case input is just pure list
y = np.asarray(y)
i = np.where(y < 0)
n_neg = len(i[0])
if n_neg > 0:
old_x = x[i]
old_y = y[i]
y[i] = 0
if old_y is not None:
warn_str = ('{0} bin(s) contained negative flux or throughput'
'; it/they will be set to zero.'.format(n_neg))
warn_str += '\n points: {0}\n lookup_table: {1}'.format(
old_x, old_y) # Extra info
self.meta['warnings'].update({'NegativeFlux': warn_str})
warnings.warn(warn_str, AstropyUserWarning)
return y
def is_tapered(self):
return np.array_equal(
self.lookup_table[::self.lookup_table.size - 1], [0, 0])
def sampleset(self):
"""Return array that samples the feature."""
return np.squeeze(self.points)
def evaluate(self, inputs):
"""Evaluate the model.
Parameters
----------
inputs : number or ndarray
Wavelengths in same unit as ``points``.
Returns
-------
y : number or ndarray
Flux or throughput in same unit as ``lookup_table``.
"""
y = super(Empirical1D, self).evaluate(inputs)
# Assume NaN at both ends need to be extrapolated based on
# nearest end point.
if self.fill_value is np.nan:
# Cannot use sampleset() due to ExtinctionModel1D.
x = np.squeeze(self.points)
# np.squeeze may throw unit away.
if (isinstance(self.points, tuple) and
isinstance(self.points[0], u.Quantity) and
not isinstance(x, u.Quantity)):
x = x * self.points[0].unit
if np.isscalar(y): # pragma: no cover
if inputs < x[0]:
y = self.lookup_table[0]
elif inputs > x[-1]:
y = self.lookup_table[-1]
else:
y[inputs < x[0]] = self.lookup_table[0]
y[inputs > x[-1]] = self.lookup_table[-1]
return self._process_neg_flux(inputs, y)
class BaseGaussian1D(_models.Gaussian1D):
"""Same as `astropy.modeling.functional_models.BaseGaussian1D`, except with
``sampleset`` defined.
"""
_sqrt_2_pi = math.sqrt(2 * math.pi)
def sampleset(self, factor_step=0.1, **kwargs):
"""Return ``x`` array that samples the feature.
Parameters
----------
factor_step : float
Factor for sample step calculation. The step is calculated
using ``factor_step * self.stddev``.
kwargs : dict
Keyword(s) for ``bounding_box`` calculation.
Default ``factor`` is set to 5 to be compatible with
ASTROLIB PYSYNPHOT.
"""
if 'factor' not in kwargs:
kwargs['factor'] = 5.0
w1, w2 = self.bounding_box(**kwargs)
dw = factor_step * self.stddev
if self._n_models == 1:
w = np.arange(w1, w2, dw)
else:
w = list(map(np.arange, w1, w2, dw))
return np.asarray(w)
class Gaussian1D(BaseGaussian1D):
"""Same as `astropy.modeling.functional_models.Gaussian1D`, except with
``sampleset`` defined.
"""
def integrate(self, *args):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
stddev = u.Quantity(self.stddev, u.AA)
return self.amplitude * stddev * self._sqrt_2_pi
# TODO: Deprecate this?
# This is not really supported anymore but kept for backward compatibility.
class GaussianAbsorption1D(BaseGaussian1D):
"""Same as ``astropy.modeling.functional_models.GaussianAbsorption1D``,
except with ``sampleset`` defined.
"""
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
GaussianAbsorption1D model function.
"""
return 1.0 - Gaussian1D.evaluate(x, amplitude, mean, stddev)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
GaussianAbsorption1D model function derivatives.
"""
import operator
return list(map(
operator.neg, Gaussian1D.fit_deriv(x, amplitude, mean, stddev)))
class GaussianFlux1D(Gaussian1D):
"""Same as `Gaussian1D` but accepts extra keywords below.
Parameters
----------
amplitude : float
Amplitude of the Gaussian in PHOTLAM.
Also see ``total_flux``.
mean : float
Mean of the Gaussian in Angstrom.
stddev : float
Standard deviation of the Gaussian in Angstrom.
Also see ``fwhm``.
fwhm : float
Full width at half maximum of the Gaussian in Angstrom.
If given, this overrides ``stddev``.
total_flux : float
Total flux under the Gaussian in ``erg/s/cm^2``.
If given, this overrides ``amplitude``.
"""
def __init__(self, *args, **kwargs):
fwhm = kwargs.pop('fwhm', None)
total_flux = kwargs.pop('total_flux', None)
super(GaussianFlux1D, self).__init__(*args, **kwargs)
if fwhm is None:
fwhm = self.stddev * gaussian_sigma_to_fwhm
else:
self.stddev = fwhm * gaussian_fwhm_to_sigma
gaussian_amp_to_totflux = self._sqrt_2_pi * self.stddev
if total_flux is None:
u_str = 'PHOTLAM'
total_flux = self.amplitude * gaussian_amp_to_totflux
else:
u_str = 'FLAM'
# total_flux is passed in unaltered, any conversion error would
# happen here.
tf_unit = u.erg / (u.cm * u.cm * u.s)
if isinstance(total_flux, u.Quantity):
total_flux = total_flux.to(tf_unit)
else:
total_flux = total_flux * tf_unit
self.amplitude = (total_flux / (gaussian_amp_to_totflux * u.AA)).to_value(units.PHOTLAM, u.spectral_density(self.mean.value * u.AA)) # noqa
total_flux = total_flux.value
self.meta['expr'] = 'em({0:g}, {1:g}, {2:g}, {3})'.format(
self.mean.value, fwhm, total_flux, u_str)
def integrate(self, *args):
# TODO: Remove unit hardcoding when we use model with units natively.
return super(GaussianFlux1D, self).integrate(*args) * units.PHOTLAM
class Lorentz1D(_models.Lorentz1D):
"""Same as `astropy.modeling.functional_models.Lorentz1D`, except with
``sampleset`` defined.
"""
def sampleset(self, factor_step=0.05, **kwargs):
"""Return ``x`` array that samples the feature.
Parameters
----------
factor_step : float
Factor for sample step calculation. The step is calculated
using ``factor_step * self.fwhm``.
kwargs : dict
Keyword(s) for ``bounding_box`` calculation.
"""
w1, w2 = self.bounding_box(**kwargs)
dw = factor_step * self.fwhm
if self._n_models == 1:
w = np.arange(w1, w2, dw)
else:
w = list(map(np.arange, w1, w2, dw))
return np.asarray(w)
def integrate(self, x):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
x = u.Quantity(x, u.AA)
x_0 = u.Quantity(self.x_0, u.AA)
gamma = u.Quantity(self.fwhm, u.AA) * 0.5
a1 = np.arctan((min(x) - x_0) / gamma)
a2 = np.arctan((max(x) - x_0) / gamma)
da = (a2 - a1).to(u.dimensionless_unscaled, u.dimensionless_angles())
return self.amplitude * gamma * da
class RickerWavelet1D(_RickerWavelet1D):
"""Same as `astropy.modeling.functional_models.RickerWavelet1D`, except
with ``sampleset`` defined.
"""
def sampleset(self, factor_step=0.1, **kwargs):
"""Return ``x`` array that samples the feature.
Parameters
----------
factor_step : float
Factor for sample step calculation. The step is calculated
using ``factor_step * self.sigma``.
kwargs : dict
Keyword(s) for ``bounding_box`` calculation.
"""
w1, w2 = self.bounding_box(**kwargs)
dw = factor_step * self.sigma
if self._n_models == 1:
w = np.arange(w1, w2, dw)
else:
w = list(map(np.arange, w1, w2, dw))
return np.asarray(w)
def integrate(self, x):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
x = u.Quantity(x, u.AA)
x_0 = u.Quantity(self.x_0, u.AA)
sig = u.Quantity(self.sigma, u.AA)
# Roots, where y=0
root_left = x_0 - sig
root_right = x_0 + sig
x_min = min(x)
x_max = max(x)
if x_min >= root_left or x_max <= root_right:
raise NotImplementedError(
'Partial analytic integration not supported')
sig2 = sig * sig
def _int_subregion(xx1, xx2):
dx_min = xx1 - x_0
dx_max = xx2 - x_0
a1 = dx_min * np.exp(-0.5 * dx_min * dx_min / sig2)
a2 = dx_max * np.exp(-0.5 * dx_max * dx_max / sig2)
return abs(a2 - a1)
# Unsigned area
return self.amplitude * (_int_subregion(x_min, root_left) +
_int_subregion(root_left, root_right) +
_int_subregion(root_right, x_max))
# TODO: Emit proper deprecation warning.
# https://github.com/spacetelescope/synphot_refactor/issues/249
class MexicanHat1D(RickerWavelet1D):
"""This is the deprecated name for `RickerWavelet1D`."""
class PowerLawFlux1D(_models.PowerLaw1D):
"""One dimensional power law model with proper flux handling.
For multiple ``n_models``, this model only accepts parameters of the
same unit; e.g., ``amplitude=[1, 2]`` or
``amplitude=Quantity([1, 2], 'photlam')``.
Also see `~astropy.modeling.powerlaws.PowerLaw1D`.
Parameters
----------
amplitude : number or `~astropy.units.quantity.Quantity`
Model amplitude at the reference point.
If not Quantity, assume the unit of PHOTLAM.
x_0 : number or `~astropy.units.quantity.Quantity`
Reference point.
If not Quantity, assume the unit of Angstrom.
alpha : float
Power law index.
"""
def __init__(self, amplitude, x_0, alpha, **kwargs):
if not isinstance(amplitude, u.Quantity):
amplitude = amplitude * units.PHOTLAM
if (amplitude.unit.physical_type in
('spectral flux density', 'spectral flux density wav',
'photon flux density', 'photon flux density wav')):
self._flux_unit = amplitude.unit
else:
raise NotImplementedError(
'{0} not supported.'.format(amplitude.unit))
if isinstance(x_0, u.Quantity):
x_0 = x_0.to_value(u.AA, u.spectral())
super(PowerLawFlux1D, self).__init__(
amplitude=amplitude.value, x_0=x_0, alpha=alpha, **kwargs)
def evaluate(self, x, *args):
"""Return flux in PHOTLAM. Assume input wavelength is in Angstrom."""
xx = x / self.x_0
y = (self.amplitude * xx ** (-self.alpha)) * self._flux_unit
flux = units.convert_flux(x, y, units.PHOTLAM)
return flux.value
def integrate(self, x):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
x = u.Quantity(x, u.AA)
x_0 = u.Quantity(self.x_0, u.AA)
amp = u.Quantity(self.amplitude, self._flux_unit)
fac = 1 - self.alpha
denom = x_0 ** -self.alpha * fac
return amp * (max(x) ** fac - min(x) ** fac) / denom
class Trapezoid1D(_models.Trapezoid1D):
"""Same as `astropy.modeling.functional_models.Trapezoid1D`, except with
``sampleset`` defined.
"""
def sampleset(self):
"""Return ``x`` array that samples the feature."""
if ASTROPY_LT_5_0:
x1, x4 = self.bounding_box
else:
x1, x4 = tuple(self.bounding_box.bounding_box())
dw = self.width * 0.5
x2 = self.x_0 - dw
x3 = self.x_0 + dw
if self._n_models == 1:
w = [x1, x2, x3, x4]
else:
w = list(zip(x1, x2, x3, x4))
return np.asarray(w)
def integrate(self, *args):
# TODO: Remove unit hardcoding when we use model with units natively.
with u.add_enabled_equivalencies(u.spectral()):
width = u.Quantity(self.width, u.AA)
slope = u.Quantity(self.slope, 1 / u.AA)
return self.amplitude * (width + self.amplitude / slope)
# Functions below are for sampleset magic.
def _get_sampleset(model):
"""Return sampleset of a model or `None` if undefined."""
w = None
if isinstance(model, Model) and hasattr(model, 'sampleset'):
w = model.sampleset()
return w
def _model_tree_evaluate_sampleset(root):
# Not a CompoundModel, grab sampleset and be done.
if not hasattr(root, 'op'):
return _get_sampleset(root)
model1 = root.left
model2 = root.right
# model2 is redshifted, apply the redshift if applicable.
if isinstance(model1, _models.RedshiftScaleFactor):
val = _model_tree_evaluate_sampleset(model2)
if val is None:
w = val
else:
w = model1.inverse(val)
# This should not ever happen, so ignore the redshift.
elif isinstance(model2, _models.RedshiftScaleFactor):
w = _model_tree_evaluate_sampleset(model1)
# One of the models is scaled. Non-redshift scaling does
# not affect sampleset of the model.
elif isinstance(model1, _models.Scale):
w = _model_tree_evaluate_sampleset(model2)
elif isinstance(model2, _models.Scale):
w = _model_tree_evaluate_sampleset(model1)
# Combine sampleset from both models.
else:
w1 = _model_tree_evaluate_sampleset(model1)
w2 = _model_tree_evaluate_sampleset(model2)
w = merge_wavelengths(w1, w2)
return w
def _model_tree_evaluate_sampleset_compat(model):
"""_model_tree_evaluate_sampleset for astropy<4"""
def _get_sampleset_compat(model):
# Return sampleset of a model or `None` if undefined.
# Model could be a real model or evaluated sampleset.
if isinstance(model, Model):
if hasattr(model, 'sampleset'):
w = model.sampleset()
else:
w = None
else:
w = model # Already a sampleset
return w
def _merge_sampleset_compat(model1, model2):
# Simple merge of samplesets.
w1 = _get_sampleset_compat(model1)
w2 = _get_sampleset_compat(model2)
return merge_wavelengths(w1, w2)
def _shift_wavelengths_compat(model1, model2):
# One of the models is either ``RedshiftScaleFactor`` or ``Scale``.
# Possible combos::
# RedshiftScaleFactor | Model
# Scale | Model
# Model | Scale
if isinstance(model1, _models.RedshiftScaleFactor):
val = _get_sampleset_compat(model2)
if val is None:
w = val
else:
w = model1.inverse(val)
elif isinstance(model1, _models.Scale):
w = _get_sampleset_compat(model2)
else:
w = _get_sampleset_compat(model1)
return w
WAVESET_OPERATORS = {
'+': _merge_sampleset_compat,
'-': _merge_sampleset_compat,
'*': _merge_sampleset_compat,
'/': _merge_sampleset_compat,
'**': _merge_sampleset_compat,
'|': _shift_wavelengths_compat,
'&': _merge_sampleset_compat}
if isinstance(model, CompoundModel):
waveset = model._tree.evaluate(WAVESET_OPERATORS, getter=None)
else:
waveset = _get_sampleset_compat(model)
return waveset
def get_waveset(model):
"""Get optimal wavelengths for sampling a given model.
Parameters
----------
model : `~astropy.modeling.Model`
Model.
Returns
-------
waveset : array-like or `None`
Optimal wavelengths. `None` if undefined.
Raises
------
synphot.exceptions.SynphotError
Invalid model.
"""
if not isinstance(model, Model):
raise SynphotError('{0} is not a model.'.format(model))
if ASTROPY_LT_4_0:
waveset = _model_tree_evaluate_sampleset_compat(model)
else:
waveset = _model_tree_evaluate_sampleset(model)
return waveset
# Functions below are for meta magic.
def _get_meta(model):
"""Return metadata of a model."""
w = {}
if isinstance(model, Model):
w = model.meta
return w
def _model_tree_evaluate_metadata(root):
# Not a CompoundModel, grab metadata and be done.
if not hasattr(root, 'op'):
return _get_meta(root)
m1 = _model_tree_evaluate_metadata(root.left)
m2 = _model_tree_evaluate_metadata(root.right)
return metadata.merge(m1, m2, metadata_conflicts='silent')
def _model_tree_evaluate_metadata_compat(model):
"""_model_tree_evaluate_sampleset for astropy<4"""
from collections import defaultdict
def _get_meta_compat(model):
# Return metadata of a model.
# Model could be a real model or evaluated metadata.
if isinstance(model, Model):
w = model.meta
else:
w = model # Already metadata
return w
def _merge_meta_compat(model1, model2):
# Simple merge of samplesets.
m1 = _get_meta_compat(model1)
m2 = _get_meta_compat(model2)
return metadata.merge(m1, m2, metadata_conflicts='silent')
if isinstance(model, CompoundModel):
meta = model._tree.evaluate(
defaultdict(lambda: _merge_meta_compat), getter=None)
else:
meta = deepcopy(model.meta)
return meta
def get_metadata(model):
"""Get metadata for a given model.
Parameters
----------
model : `~astropy.modeling.Model`
Model.
Returns
-------
meta : dict
Metadata for the model.
Raises
------
synphot.exceptions.SynphotError
Invalid model.
"""
if not isinstance(model, Model):
raise SynphotError('{0} is not a model.'.format(model))
if ASTROPY_LT_4_0:
meta = _model_tree_evaluate_metadata_compat(model)
else:
# Deep copy to make sure modiyfing returned metadata
# does not modify input model metadata, especially
# if input model is not a compound model.
meta = deepcopy(_model_tree_evaluate_metadata(model))
return meta
| 30.713852
| 152
| 0.595931
|
57b6cefac16501044d746dd27f7eff7cfe7ef6a1
| 3,829
|
py
|
Python
|
robustcode/parsers/parser.py
|
david-maine/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 8
|
2020-08-16T23:26:37.000Z
|
2021-11-03T06:52:56.000Z
|
robustcode/parsers/parser.py
|
david-maine/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 2
|
2020-08-16T23:11:46.000Z
|
2021-03-30T02:12:23.000Z
|
robustcode/parsers/parser.py
|
eth-sri/robust-code
|
971b7336813bb856fafab368320be47a16919e4c
|
[
"Apache-2.0"
] | 2
|
2021-03-31T04:17:50.000Z
|
2021-10-21T20:49:27.000Z
|
#!/usr/bin/env python
import argparse
import collections
import json
import os
import random
import subprocess
import sys
import requests
from robustcode.analysis.graph import AstTree
def FindSubBinary(binary):
for path in sys.path:
abs_binary_path = os.path.join(os.path.abspath(path), binary)
if os.path.isfile(abs_binary_path):
return abs_binary_path
return binary
Parser = collections.namedtuple(
"Parser", ["language", "file_extension", "path", "prefix"]
)
parsers = [
Parser(
"typescript",
".ts",
FindSubBinary("robustcode/parsers/typescript/parser.js"),
[],
),
]
parsers_server = [] + [
Parser("typescript", ".ts", "http://localhost:{}/api/v1/parse".format(port), None)
for port in range(3000, 3016)
]
def print_ast(nodes, idx=0, depth=0):
print(" " * depth, "id: {}".format(idx), nodes[idx])
if "children" not in nodes[idx]:
return
for child_id in nodes[idx]["children"]:
print_ast(nodes, child_id, depth + 1)
def get_lang_for_filename(filename):
for parser in parsers:
if filename.endswith(parser.file_extension):
return parser.language
return None
def shuffle(data):
random.shuffle(data)
return data
def get_parser_for_filename(filename, server=False):
for parser in parsers if not server else shuffle(parsers_server):
if filename.endswith(parser.file_extension):
return parser
return None
def get_parser_by_name(name, server=False):
for parser in parsers if not server else shuffle(parsers_server):
if name == parser.language:
return parser
return None
"""
Avoid spawning new process whenever file is parsed
The server needs to be started manually before the first call
"""
def parse_file_server(filename, data=None, parser_name=None):
if parser_name is None:
parser = get_parser_for_filename(filename, server=True)
else:
parser = get_parser_by_name(parser_name, server=True)
if parser is None:
return None
headers = {"Content-type": "application/json"}
data = {} if data is None else data
data["filename"] = filename
r = requests.post(url=parser.path, data=json.dumps(data), headers=headers)
if r.text == "SyntaxError":
return None
return json.loads(r.text)
def parse_file(filename, args=None):
parser = get_parser_for_filename(filename)
if parser is None:
return None
args = [] if args is None else args
proc = subprocess.Popen(
parser.prefix + [parser.path, filename] + args, stdout=subprocess.PIPE
)
try:
outs, errs = proc.communicate(timeout=15)
if not outs:
return None
return json.loads(outs)
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file", help="File to parse")
parser.add_argument("--use_server", default=False, action="store_true")
parser.add_argument(
"--options", default=[], nargs="*", help="Options passed to the parser"
)
parser.add_argument("--pretty", default=False, action="store_true")
args = parser.parse_args()
if not os.path.isfile(args.file):
print("File '" + args.file + "' not found!")
exit(1)
if not args.use_server:
ast_json = parse_file(args.file, ["--{}".format(key) for key in args.options])
else:
ast_json = parse_file_server(args.file, {key: True for key in args.options})
if not args.pretty:
print(ast_json)
else:
tree = AstTree.fromJson(ast_json, fields=["target"])
print(tree.dumpFieldsAsString(fields=["target"]))
if __name__ == "__main__":
main()
| 25.871622
| 86
| 0.653173
|
19bab1a39fca4116e1e13f27ef5bd12e7243f4f6
| 1,669
|
py
|
Python
|
twitapp/twitter.py
|
bmulas1535/twit-app
|
8d438ed162ee6c0c8b2cccb0fdbc2455a6b7f5ab
|
[
"MIT"
] | null | null | null |
twitapp/twitter.py
|
bmulas1535/twit-app
|
8d438ed162ee6c0c8b2cccb0fdbc2455a6b7f5ab
|
[
"MIT"
] | 3
|
2021-06-08T20:43:12.000Z
|
2022-01-13T01:54:50.000Z
|
twitapp/twitter.py
|
bmulas1535/twit-app
|
8d438ed162ee6c0c8b2cccb0fdbc2455a6b7f5ab
|
[
"MIT"
] | null | null | null |
"""Retrieve tweets, embedding, save into database"""
import basilica
import tweepy
from decouple import config
from twitapp.models import db, Tweets, Tuser
TWITTER_AUTH = tweepy.OAuthHandler(config('TWITTER_CONSUMER_KEY'),
config('TWITTER_CONSUMER_SECRET'))
TWITTER_AUTH.set_access_token(config('TWITTER_ACCESS_TOKEN'),
config('TWITTER_ACCESS_TOKEN_SECRET'))
TWITTER = tweepy.API(TWITTER_AUTH)
BASILICA = basilica.Connection(config('BASILICA_KEY'))
def add_or_update_user(username):
"""Add or update a user and their Tweets, or else raise"""
try:
twitter_user = TWITTER.get_user(username)
db_user = (Tuser.query.get(twitter_user.id) or
Tuser(id=twitter_user.id, name=username))
db.session.add(db_user)
tweets = twitter_user.timeline(count=200, exclude_replies=True,
include_rts=False,
tweet_mode='extended',
since_id=db_user.newest_tweet_id)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
embedding = BASILICA.embed_sentence(tweet.full_text,
model='twitter')
db_tweet = Tweets(id=tweet.id, content=tweet.full_text[:300],
embedding=embedding)
db_user.tweets.append(db_tweet)
db.session.add(db_tweet)
except Exception as e:
print('Error processing {}: {}'.format(username, e))
raise e
else:
db.session.commit()
| 36.282609
| 73
| 0.589575
|
1a54025f327306a6502bf22bdc5696fd382003b7
| 1,729
|
py
|
Python
|
functions/adjust_cases_functions.py
|
biomac-lab/covid19_forecast
|
6613064f8a6d8023ecbdaddbc2e7525b6ad0796f
|
[
"Apache-2.0"
] | null | null | null |
functions/adjust_cases_functions.py
|
biomac-lab/covid19_forecast
|
6613064f8a6d8023ecbdaddbc2e7525b6ad0796f
|
[
"Apache-2.0"
] | null | null | null |
functions/adjust_cases_functions.py
|
biomac-lab/covid19_forecast
|
6613064f8a6d8023ecbdaddbc2e7525b6ad0796f
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
# Define functions for model
def confirmed_to_onset(confirmed, p_delay, col_name='num_cases', min_onset_date=None):
min_onset_date = pd.to_datetime(min_onset_date)
# Reverse cases so that we convolve into the past
convolved = np.convolve(np.squeeze(confirmed.iloc[::-1].values), p_delay)
# Calculate the new date range
dr = pd.date_range(end=confirmed.index[-1],
periods=len(convolved))
# Flip the values and assign the date range
onset = pd.Series(np.flip(convolved), index=dr, name=col_name)
if min_onset_date:
onset = np.round(onset.loc[min_onset_date:])
else:
onset = np.round(onset.iloc[onset.values>=1])
onset.index.name = 'date'
return pd.DataFrame(onset)
# Smooths cases using a rolling window and gaussian sampling
def prepare_cases(daily_cases, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
daily_cases[out_col] = daily_cases[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2).round()
idx_start = np.searchsorted(daily_cases[out_col], cutoff)
daily_cases[out_col] = daily_cases[out_col].iloc[idx_start:]
return daily_cases
# Smooths cases using a rolling window and gaussian sampling
def smooth_1d(signal, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
signal[out_col] = signal[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2)
idx_start = np.searchsorted(signal[out_col], cutoff)
signal[out_col] = signal[out_col].iloc[idx_start:]
return signal
| 35.285714
| 86
| 0.688259
|
9718090de23d6b4ff645e48b2e25552827e7e4f5
| 1,773
|
py
|
Python
|
linux_metrics/test_mem_stat.py
|
bobf/linux-metrics
|
73e5e68580d4c4018323a2e595a03fff7c8cea35
|
[
"MIT"
] | 48
|
2015-02-06T11:51:59.000Z
|
2021-04-14T10:00:52.000Z
|
linux_metrics/test_mem_stat.py
|
bobf/linux-metrics
|
73e5e68580d4c4018323a2e595a03fff7c8cea35
|
[
"MIT"
] | 2
|
2015-08-25T16:56:17.000Z
|
2019-01-25T16:10:53.000Z
|
linux_metrics/test_mem_stat.py
|
bobf/linux-metrics
|
73e5e68580d4c4018323a2e595a03fff7c8cea35
|
[
"MIT"
] | 20
|
2015-05-01T04:04:20.000Z
|
2021-12-26T07:53:16.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2010-2013 Corey Goldberg (http://goldb.org)
#
# This file is part of linux-metrics
#
# License :: OSI Approved :: MIT License:
# http://www.opensource.org/licenses/mit-license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
from . import mem_stat
import unittest
class TestMemoryStats(unittest.TestCase):
def setUp(self):
(self.mem_active,
self.mem_total,
self.mem_cached,
self.mem_free,
self.swap_total,
self.swap_free) = mem_stat.mem_stats()
def test_mem_active(self):
self.assertTrue(self.mem_active > 0)
def test_mem_total(self):
self.assertTrue(self.mem_total > 0)
def test_mem_cached(self):
self.assertTrue(self.mem_cached > 0)
def test_mem_free(self):
self.assertTrue(self.mem_free > 0)
def test_swap_total(self):
self.assertTrue(self.swap_total > 0)
def test_swap_free(self):
self.assertTrue(self.swap_free > 0)
if __name__ == '__main__':
test_suite = unittest.TestLoader().loadTestsFromTestCase(TestMemoryStats)
unittest.TextTestRunner(verbosity=2).run(test_suite)
| 30.050847
| 84
| 0.681331
|
69c91890572456192e6dc8d3576551a20c36534c
| 100
|
py
|
Python
|
app/api/app/login_signup.py
|
kingyoungfan/v-mall-api
|
86ff05608162f6127d8af59c556ef5586bad8f68
|
[
"MIT"
] | null | null | null |
app/api/app/login_signup.py
|
kingyoungfan/v-mall-api
|
86ff05608162f6127d8af59c556ef5586bad8f68
|
[
"MIT"
] | null | null | null |
app/api/app/login_signup.py
|
kingyoungfan/v-mall-api
|
86ff05608162f6127d8af59c556ef5586bad8f68
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @author: yangyang
# @date 5/4/21 15:30
"""
登录注册接口,只有这个文件下的接口不需要登录验证
"""
| 11.111111
| 24
| 0.59
|
469ea60f51d89c38d596a47a5c98d28812beb465
| 101
|
py
|
Python
|
db_adapter/base/__init__.py
|
CUrW-SL/curw_db_adapter
|
9d9ef24f42080910e0bd251bc7f001b0a4b0ab31
|
[
"MIT"
] | 2
|
2019-04-26T07:50:33.000Z
|
2019-09-28T20:15:33.000Z
|
db_adapter/base/__init__.py
|
CUrW-SL/curw_db_adapter
|
9d9ef24f42080910e0bd251bc7f001b0a4b0ab31
|
[
"MIT"
] | 1
|
2019-04-03T09:30:38.000Z
|
2019-04-20T18:11:59.000Z
|
db_adapter/base/__init__.py
|
shadhini/curw_db_adapter
|
4db8e1ea8794ffbd0dce29ac954a13315e83d843
|
[
"MIT"
] | null | null | null |
from .pymysql_base import get_Pool, destroy_Pool, destroy_Pool, get_connection_for_iterable_cursor
| 25.25
| 98
| 0.871287
|
9ce2725327456b6ccf6f2c80794fc8d0cb5f8928
| 800
|
py
|
Python
|
steenzout/__init__.py
|
mikebohdan/python-object
|
9a757427b5950cda2883b1305a7866437360935a
|
[
"Apache-2.0"
] | 1
|
2015-04-18T01:35:54.000Z
|
2015-04-18T01:35:54.000Z
|
steenzout/__init__.py
|
mikebohdan/python-object
|
9a757427b5950cda2883b1305a7866437360935a
|
[
"Apache-2.0"
] | 21
|
2015-12-24T04:40:30.000Z
|
2019-10-05T22:21:05.000Z
|
steenzout/__init__.py
|
mikebohdan/python-object
|
9a757427b5950cda2883b1305a7866437360935a
|
[
"Apache-2.0"
] | 3
|
2016-08-15T15:46:14.000Z
|
2017-09-12T23:54:39.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Pedro Salgado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""steenzout namespace package."""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| 33.333333
| 74
| 0.74875
|
3007575572f347f07e88d520b286ae5d4b0e8911
| 2,987
|
py
|
Python
|
dbt_gen/py3env/lib/python3.5/site-packages/boto3/__init__.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/lib/python3.5/site-packages/boto3/__init__.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/lib/python3.5/site-packages/boto3/__init__.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.5.26'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: :py:class:`~boto3.session.Session`
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
See :py:meth:`boto3.session.Session.client`.
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
See :py:meth:`boto3.session.Session.resource`.
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| 29
| 81
| 0.705055
|
bfee66e86d8cfbfa37e012361f574917f81842d1
| 620
|
py
|
Python
|
Demo/sgi/audio_stdwin/vumeter.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Demo/sgi/audio_stdwin/vumeter.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Demo/sgi/audio_stdwin/vumeter.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
#! /usr/local/python
import audio
import stdwin
from VUMeter import VUMeter
from WindowParent import WindowParent
import MainLoop
NBUFS=20
BUFSIZE = NBUFS*48
SCALE=128
class MyVUMeter(VUMeter):
def init_reactivity(self):
self.parent.need_mouse(self)
def mouse_down(self, detail):
if self.enabled:
self.stop()
else:
self.start()
def mouse_move(self, detail): pass
def mouse_up(self, detail): pass
def main():
audio.setrate(3)
audio.setoutgain(0)
w = WindowParent().create('VU Meter', (200, 100))
v = MyVUMeter().define(w)
v.start()
w.realize()
while 1:
w.dispatch(stdwin.getevent())
main()
| 17.222222
| 50
| 0.719355
|
732469eb4e760dfdb14730412b65c8d2635ab6e0
| 18,960
|
py
|
Python
|
frappe/model/db_schema.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | null | null | null |
frappe/model/db_schema.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | 9
|
2020-03-24T18:09:18.000Z
|
2022-03-12T00:12:04.000Z
|
frappe/model/db_schema.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | 3
|
2018-01-16T17:59:55.000Z
|
2019-09-24T16:02:10.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Syncs a database table to the `DocType` (metadata)
.. note:: This module is only used internally
"""
import re
import os
import frappe
from frappe import _
from frappe.utils import cstr, cint, flt
# imports - third-party imports
import pymysql
from pymysql.constants import ER
class InvalidColumnName(frappe.ValidationError): pass
varchar_len = '140'
standard_varchar_columns = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
type_map = {
'Currency': ('decimal', '18,6'),
'Int': ('int', '11'),
'Long Int': ('bigint', '20'), # convert int to bigint if length is more than 11
'Float': ('decimal', '18,6'),
'Percent': ('decimal', '18,6'),
'Check': ('int', '1'),
'Small Text': ('text', ''),
'Long Text': ('longtext', ''),
'Code': ('longtext', ''),
'Text Editor': ('longtext', ''),
'Date': ('date', ''),
'Datetime': ('datetime', '6'),
'Time': ('time', '6'),
'Text': ('text', ''),
'Data': ('varchar', varchar_len),
'Link': ('varchar', varchar_len),
'Dynamic Link': ('varchar', varchar_len),
'Password': ('varchar', varchar_len),
'Select': ('varchar', varchar_len),
'Read Only': ('varchar', varchar_len),
'Attach': ('text', ''),
'Attach Image': ('text', ''),
'Signature': ('longtext', ''),
'Color': ('varchar', varchar_len),
'Barcode': ('longtext', ''),
'Geolocation': ('longtext', '')
}
default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner',
'docstatus', 'parent', 'parentfield', 'parenttype', 'idx']
optional_columns = ["_user_tags", "_comments", "_assign", "_liked_by"]
default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
def updatedb(dt, meta=None):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = frappe.db.sql("select issingle from tabDocType where name=%s", (dt,))
if not res:
raise Exception('Wrong doctype "%s" in updatedb' % dt)
if not res[0][0]:
tab = DbTable(dt, 'tab', meta)
tab.validate()
frappe.db.commit()
tab.sync()
frappe.db.begin()
class DbTable:
def __init__(self, doctype, prefix = 'tab', meta = None):
self.doctype = doctype
self.name = prefix + doctype
self.columns = {}
self.current_columns = {}
self.meta = meta
if not self.meta:
self.meta = frappe.get_meta(self.doctype)
# lists for change
self.add_column = []
self.change_type = []
self.add_index = []
self.drop_index = []
self.set_default = []
# load
self.get_columns_from_docfields()
def validate(self):
"""Check if change in varchar length isn't truncating the columns"""
if self.is_new():
return
self.get_columns_from_db()
columns = [frappe._dict({"fieldname": f, "fieldtype": "Data"}) for f in standard_varchar_columns]
columns += self.columns.values()
for col in columns:
if len(col.fieldname) >= 64:
frappe.throw(_("Fieldname is limited to 64 characters ({0})")
.format(frappe.bold(col.fieldname)))
if col.fieldtype in type_map and type_map[col.fieldtype][0]=="varchar":
# validate length range
new_length = cint(col.length) or cint(varchar_len)
if not (1 <= new_length <= 1000):
frappe.throw(_("Length of {0} should be between 1 and 1000").format(col.fieldname))
current_col = self.current_columns.get(col.fieldname, {})
if not current_col:
continue
current_type = self.current_columns[col.fieldname]["type"]
current_length = re.findall('varchar\(([\d]+)\)', current_type)
if not current_length:
# case when the field is no longer a varchar
continue
current_length = current_length[0]
if cint(current_length) != cint(new_length):
try:
# check for truncation
max_length = frappe.db.sql("""select max(char_length(`{fieldname}`)) from `tab{doctype}`"""\
.format(fieldname=col.fieldname, doctype=self.doctype))
except pymysql.InternalError as e:
if e.args[0] == ER.BAD_FIELD_ERROR:
# Unknown column 'column_name' in 'field list'
continue
else:
raise
if max_length and max_length[0][0] and max_length[0][0] > new_length:
if col.fieldname in self.columns:
self.columns[col.fieldname].length = current_length
frappe.msgprint(_("Reverting length to {0} for '{1}' in '{2}'; Setting the length as {3} will cause truncation of data.")\
.format(current_length, col.fieldname, self.doctype, new_length))
def sync(self):
if self.is_new():
self.create()
else:
self.alter()
def is_new(self):
return self.name not in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name)
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar({varchar_len}) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar({varchar_len}),
owner varchar({varchar_len}),
docstatus int(1) not null default '0',
parent varchar({varchar_len}),
parentfield varchar({varchar_len}),
parenttype varchar({varchar_len}),
idx int(8) not null default '0',
%sindex parent(parent),
index modified(modified))
ENGINE={engine}
ROW_FORMAT=COMPRESSED
CHARACTER SET=utf8mb4
COLLATE=utf8mb4_unicode_ci""".format(varchar_len=varchar_len,
engine=self.meta.get("engine") or 'InnoDB') % (self.name, add_text))
def get_column_definitions(self):
column_list = [] + default_columns
ret = []
for k in self.columns.keys():
if k not in column_list:
d = self.columns[k].get_definition()
if d:
ret.append('`'+ k + '` ' + d)
column_list.append(k)
return ret
def get_index_definitions(self):
ret = []
for key, col in self.columns.items():
if col.set_index and not col.unique and col.fieldtype in type_map and \
type_map.get(col.fieldtype)[0] not in ('text', 'longtext'):
ret.append('index `' + key + '`(`' + key + '`)')
return ret
def get_columns_from_docfields(self):
"""
get columns from docfields and custom fields
"""
fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1)
lengths = {}
precisions = {}
uniques = {}
# optional fields like _comments
if not self.meta.istable:
for fieldname in optional_columns:
fl.append({
"fieldname": fieldname,
"fieldtype": "Text"
})
# add _seen column if track_seen
if getattr(self.meta, 'track_seen', False):
fl.append({
'fieldname': '_seen',
'fieldtype': 'Text'
})
if not frappe.flags.in_install_db and frappe.flags.in_install != "frappe":
custom_fl = frappe.db.sql("""\
SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1)
if custom_fl: fl += custom_fl
# apply length, precision and unique from property setters
for ps in frappe.get_all("Property Setter", fields=["field_name", "property", "value"],
filters={
"doc_type": self.doctype,
"doctype_or_field": "DocField",
"property": ["in", ["precision", "length", "unique"]]
}):
if ps.property=="length":
lengths[ps.field_name] = cint(ps.value)
elif ps.property=="precision":
precisions[ps.field_name] = cint(ps.value)
elif ps.property=="unique":
uniques[ps.field_name] = cint(ps.value)
for f in fl:
self.columns[f['fieldname']] = DbColumn(self, f['fieldname'],
f['fieldtype'], lengths.get(f["fieldname"]) or f.get('length'), f.get('default'), f.get('search_index'),
f.get('options'), uniques.get(f["fieldname"], f.get('unique')), precisions.get(f['fieldname']) or f.get('precision'))
def get_columns_from_db(self):
self.show_columns = frappe.db.sql("desc `%s`" % self.name)
for c in self.show_columns:
self.current_columns[c[0].lower()] = {'name': c[0],
'type':c[1], 'index':c[3]=="MUL", 'default':c[4], "unique":c[3]=="UNI"}
# GET foreign keys
def get_foreign_keys(self):
fk_list = []
txt = frappe.db.sql("show create table `%s`" % self.name)[0][1]
for line in txt.split('\n'):
if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1:
try:
fk_list.append((line.split('`')[3], line.split('`')[1]))
except IndexError:
pass
return fk_list
# Drop foreign keys
def drop_foreign_keys(self):
if not self.drop_foreign_key:
return
fk_list = self.get_foreign_keys()
# make dictionary of constraint names
fk_dict = {}
for f in fk_list:
fk_dict[f[0]] = f[1]
# drop
for col in self.drop_foreign_key:
frappe.db.sql("set foreign_key_checks=0")
frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname]))
frappe.db.sql("set foreign_key_checks=1")
def alter(self):
for col in self.columns.values():
col.build_for_alter_table(self.current_columns.get(col.fieldname.lower(), None))
query = []
for col in self.add_column:
query.append("add column `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.change_type:
current_def = self.current_columns.get(col.fieldname.lower(), None)
query.append("change `{}` `{}` {}".format(current_def["name"], col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("""show index from `{0}`
where key_name=%s
and Non_unique=%s""".format(self.name), (col.fieldname, col.unique)):
query.append("drop index `{}`".format(col.fieldname))
for col in self.set_default:
if col.fieldname=="name":
continue
if col.fieldtype in ("Check", "Int"):
col_default = cint(col.default)
elif col.fieldtype in ("Currency", "Float", "Percent"):
col_default = flt(col.default)
elif not col.default:
col_default = "null"
else:
col_default = '"{}"'.format(col.default.replace('"', '\\"'))
query.append('alter column `{}` set default {}'.format(col.fieldname, col_default))
if query:
try:
frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query)))
except Exception as e:
# sanitize
if e.args[0]==1060:
frappe.throw(str(e))
elif e.args[0]==1062:
fieldname = str(e).split("'")[-2]
frappe.throw(_("{0} field cannot be set as unique in {1}, as there are non-unique existing values".format(fieldname, self.name)))
else:
raise e
class DbColumn:
def __init__(self, table, fieldname, fieldtype, length, default,
set_index, options, unique, precision):
self.table = table
self.fieldname = fieldname
self.fieldtype = fieldtype
self.length = length
self.set_index = set_index
self.default = default
self.options = options
self.unique = unique
self.precision = precision
def get_definition(self, with_default=1):
column_def = get_definition(self.fieldtype, precision=self.precision, length=self.length)
if not column_def:
return column_def
if self.fieldtype in ("Check", "Int"):
default_value = cint(self.default) or 0
column_def += ' not null default {0}'.format(default_value)
elif self.fieldtype in ("Currency", "Float", "Percent"):
default_value = flt(self.default) or 0
column_def += ' not null default {0}'.format(default_value)
elif self.default and (self.default not in default_shortcuts) \
and not self.default.startswith(":") and column_def not in ('text', 'longtext'):
column_def += ' default "' + self.default.replace('"', '\"') + '"'
if self.unique and (column_def not in ('text', 'longtext')):
column_def += ' unique'
return column_def
def build_for_alter_table(self, current_def):
column_def = get_definition(self.fieldtype, self.precision, self.length)
# no columns
if not column_def:
return
# to add?
if not current_def:
self.fieldname = validate_column_name(self.fieldname)
self.table.add_column.append(self)
return
# type
if (current_def['type'] != column_def) or\
self.fieldname != current_def['name'] or\
((self.unique and not current_def['unique']) and column_def not in ('text', 'longtext')):
self.table.change_type.append(self)
else:
# default
if (self.default_changed(current_def) \
and (self.default not in default_shortcuts) \
and not cstr(self.default).startswith(":") \
and not (column_def in ['text','longtext'])):
self.table.set_default.append(self)
# index should be applied or dropped irrespective of type change
if ( (current_def['index'] and not self.set_index and not self.unique)
or (current_def['unique'] and not self.unique) ):
# to drop unique you have to drop index
self.table.drop_index.append(self)
elif (not current_def['index'] and self.set_index) and not (column_def in ('text', 'longtext')):
self.table.add_index.append(self)
def default_changed(self, current_def):
if "decimal" in current_def['type']:
return self.default_changed_for_decimal(current_def)
else:
return current_def['default'] != self.default
def default_changed_for_decimal(self, current_def):
try:
if current_def['default'] in ("", None) and self.default in ("", None):
# both none, empty
return False
elif current_def['default'] in ("", None):
try:
# check if new default value is valid
float(self.default)
return True
except ValueError:
return False
elif self.default in ("", None):
# new default value is empty
return True
else:
# NOTE float() raise ValueError when "" or None is passed
return float(current_def['default'])!=float(self.default)
except TypeError:
return True
class DbManager:
"""
Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc...
#TODO:
0. Simplify / create settings for the restore database source folder
0a. Merge restore database and extract_sql(from frappe_server_tools).
1. Setter and getter for different mysql variables.
2. Setter and getter for mysql variables at global level??
"""
def __init__(self,db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_current_host(self):
return self.db.sql("select user()")[0][0].split('@')[1]
def get_variables(self,regex):
"""
Get variables that match the passed pattern regex
"""
return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex))
def get_table_schema(self,table):
"""
Just returns the output of Desc tables.
"""
return list(self.db.sql("DESC `%s`"%table))
def get_tables_list(self,target=None):
"""get list of tables"""
if target:
self.db.use(target)
return [t[0] for t in self.db.sql("SHOW TABLES")]
def create_user(self, user, password, host=None):
#Create user if it doesn't exist.
if not host:
host = self.get_current_host()
if password:
self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user[:16], host, password))
else:
self.db.sql("CREATE USER '%s'@'%s';" % (user[:16], host))
def delete_user(self, target, host=None):
if not host:
host = self.get_current_host()
try:
self.db.sql("DROP USER '%s'@'%s';" % (target, host))
except Exception as e:
if e.args[0]==1396:
pass
else:
raise
def create_database(self,target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE `%s` ;" % target)
def drop_database(self,target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target)
def grant_all_privileges(self, target, user, host=None):
if not host:
host = self.get_current_host()
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target,
user, host))
def grant_select_privilges(self, db, table, user, host=None):
if not host:
host = self.get_current_host()
if table:
self.db.sql("GRANT SELECT ON %s.%s to '%s'@'%s';" % (db, table, user, host))
else:
self.db.sql("GRANT SELECT ON %s.* to '%s'@'%s';" % (db, user, host))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
def restore_database(self,target,source,user,password):
from frappe.utils import make_esc
esc = make_esc('$ ')
os.system("mysql -u %s -p%s -h%s %s < %s" % \
(esc(user), esc(password), esc(frappe.db.host), esc(target), source))
def drop_table(self,table_name):
"""drop table if exists"""
if not table_name in self.get_tables_list():
return
self.db.sql("DROP TABLE IF EXISTS %s "%(table_name))
def validate_column_name(n):
special_characters = re.findall("[\W]", n, re.UNICODE)
if special_characters:
special_characters = ", ".join('"{0}"'.format(c) for c in special_characters)
frappe.throw(_("Fieldname {0} cannot have special characters like {1}").format(frappe.bold(cstr(n)), special_characters), InvalidColumnName)
return n
def validate_column_length(fieldname):
""" In MySQL maximum column length is 64 characters,
ref: https://dev.mysql.com/doc/refman/5.5/en/identifiers.html"""
if len(fieldname) > 64:
frappe.throw(_("Fieldname is limited to 64 characters ({0})").format(fieldname))
def remove_all_foreign_keys():
frappe.db.sql("set foreign_key_checks = 0")
frappe.db.commit()
for t in frappe.db.sql("select name from tabDocType where issingle=0"):
dbtab = DbTable(t[0])
try:
fklist = dbtab.get_foreign_keys()
except Exception as e:
if e.args[0]==1146:
fklist = []
else:
raise
for f in fklist:
frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1]))
def get_definition(fieldtype, precision=None, length=None):
d = type_map.get(fieldtype)
# convert int to long int if the length of the int is greater than 11
if fieldtype == "Int" and length and length>11:
d = type_map.get("Long Int")
if not d:
return
coltype = d[0]
size = None
if d[1]:
size = d[1]
if size:
if fieldtype in ["Float", "Currency", "Percent"] and cint(precision) > 6:
size = '21,9'
if coltype == "varchar" and length:
size = length
if size is not None:
coltype = "{coltype}({size})".format(coltype=coltype, size=size)
return coltype
def add_column(doctype, column_name, fieldtype, precision=None):
if column_name in frappe.db.get_table_columns(doctype):
# already exists
return
frappe.db.commit()
frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype,
column_name, get_definition(fieldtype, precision)))
| 29.905363
| 142
| 0.664768
|
1a01be00c9592661de79b0c2130c3f998679576a
| 332
|
py
|
Python
|
2020/day1/part1.py
|
FranciscoAT/advent-of-code
|
69f20696e4c59ff6dfa010b22dd3593ea3d12208
|
[
"MIT"
] | null | null | null |
2020/day1/part1.py
|
FranciscoAT/advent-of-code
|
69f20696e4c59ff6dfa010b22dd3593ea3d12208
|
[
"MIT"
] | null | null | null |
2020/day1/part1.py
|
FranciscoAT/advent-of-code
|
69f20696e4c59ff6dfa010b22dd3593ea3d12208
|
[
"MIT"
] | null | null | null |
from pathlib import Path
def main() -> None:
values = [int(value.rstrip()) for value in Path("day-1.in").open().readlines()]
for value in values:
required_value = 2020 - value
if required_value in values:
print(value * required_value)
break
if __name__ == "__main__":
main()
| 20.75
| 83
| 0.599398
|
e8bb02426f3a563e1721c5c70de7ae8c6056bdb2
| 20,675
|
py
|
Python
|
autotest/test_gwt_dsp02.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | 3
|
2019-07-10T21:16:57.000Z
|
2021-10-08T00:56:20.000Z
|
autotest/test_gwt_dsp02.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | null | null | null |
autotest/test_gwt_dsp02.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | 3
|
2019-11-28T16:26:50.000Z
|
2020-02-05T11:08:37.000Z
|
"""
MODFLOW 6 Autotest
Test the dispersion schemes in the gwt dispersion package for a one-dimensional
model grid of triangular cells. The cells are created by starting with a
regular grid of squares and then cutting every cell into a triangle, except
the first and last.
"""
import os
import pytest
import sys
import numpy as np
try:
import flopy
import flopy.utils.cvfdutil
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["dsp02a", "dsp02b"]
xt3d = [True, False]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
def grid_triangulator(itri, delr, delc):
nrow, ncol = itri.shape
if np.isscalar(delr):
delr = delr * np.ones(ncol)
if np.isscalar(delc):
delc = delc * np.ones(nrow)
regular_grid = flopy.discretization.StructuredGrid(delc, delr)
vertdict = {}
icell = 0
for i in range(nrow):
for j in range(ncol):
vs = regular_grid.get_cell_vertices(i, j)
if itri[i, j] == 0:
vertdict[icell] = [vs[0], vs[1], vs[2], vs[3], vs[0]]
icell += 1
elif itri[i, j] == 1:
vertdict[icell] = [vs[0], vs[1], vs[3], vs[0]]
icell += 1
vertdict[icell] = [vs[3], vs[1], vs[2], vs[3]]
icell += 1
elif itri[i, j] == 2:
vertdict[icell] = [vs[0], vs[2], vs[3], vs[0]]
icell += 1
vertdict[icell] = [vs[0], vs[1], vs[2], vs[0]]
icell += 1
else:
raise Exception("Unknown itri value: {}".format(itri[i, j]))
verts, iverts = flopy.utils.cvfdutil.to_cvfd(vertdict)
return verts, iverts
def cvfd_to_cell2d(verts, iverts):
vertices = []
for i in range(verts.shape[0]):
x = verts[i, 0]
y = verts[i, 1]
vertices.append([i, x, y])
cell2d = []
for icell2d, vs in enumerate(iverts):
points = [tuple(verts[ip]) for ip in vs]
xc, yc = flopy.utils.cvfdutil.centroid_of_polygon(points)
cell2d.append([icell2d, xc, yc, len(vs), *vs])
return vertices, cell2d
def build_model(idx, dir):
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
nstp = [200]
tsmult = [1.0]
steady = [True]
delr = 1.0
delc = 1.0
top = 1.0
botm = [0.0]
strt = 1.0
hk = 1.0
laytyp = 0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=gwfname,
save_flows=True,
model_nam_file="{}.nam".format(gwfname),
)
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="CG",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
sim.register_ims_package(imsgwf, [gwf.name])
itri = np.zeros((nrow, ncol), dtype=int)
itri[:, 1 : ncol - 1] = 1
verts, iverts = grid_triangulator(itri, delr, delc)
vertices, cell2d = cvfd_to_cell2d(verts, iverts)
ncpl = len(cell2d)
nvert = len(verts)
# constant heads on left and right so there is no flow
c = {0: [((0, 0), 0.0000000), ((0, ncpl - 1), 0.0000000)]}
disv = flopy.mf6.ModflowGwfdisv(
gwf,
nlay=nlay,
ncpl=ncpl,
nvert=nvert,
top=top,
botm=botm,
vertices=vertices,
cell2d=cell2d,
filename="{}.disv".format(gwfname),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(
gwf, strt=strt, filename="{}.ic".format(gwfname)
)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_flows=False,
icelltype=laytyp,
k=hk,
k33=hk,
save_specific_discharge=True,
)
# chd files
chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(
gwf,
maxbound=len(c),
stress_period_data=c,
save_flows=False,
pname="CHD-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# create gwt model
gwtname = "gwt_" + name
gwt = flopy.mf6.MFModel(
sim,
model_type="gwt6",
modelname=gwtname,
model_nam_file="{}.nam".format(gwtname),
)
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwtname),
)
sim.register_ims_package(imsgwt, [gwt.name])
disv = flopy.mf6.ModflowGwtdisv(
gwt,
nlay=nlay,
ncpl=ncpl,
nvert=nvert,
top=top,
botm=botm,
vertices=vertices,
cell2d=cell2d,
filename="{}.disv".format(gwtname),
)
# initial conditions
ic = flopy.mf6.ModflowGwtic(
gwt, strt=0.0, filename="{}.ic".format(gwtname)
)
# advection
adv = flopy.mf6.ModflowGwtadv(
gwt, scheme="upstream", filename="{}.adv".format(gwtname)
)
# dispersion
xt3d_off = not xt3d[idx]
dsp = flopy.mf6.ModflowGwtdsp(
gwt,
xt3d_off=xt3d_off,
diffc=100.0,
alh=0.0,
alv=0.0,
ath1=0.0,
atv=0.0,
filename="{}.dsp".format(gwtname),
)
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=0.1)
# constant concentration
cncs = {0: [[(0, 0), 1.0]]}
cnc = flopy.mf6.ModflowGwtcnc(
gwt, stress_period_data=cncs, save_flows=False, pname="CNC-1"
)
# sources
ssm = flopy.mf6.ModflowGwtssm(
gwt, sources=[[]], filename="{}.ssm".format(gwtname)
)
# output control
oc = flopy.mf6.ModflowGwtoc(
gwt,
budget_filerecord="{}.cbc".format(gwtname),
concentration_filerecord="{}.ucn".format(gwtname),
concentrationprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
)
# GWF GWT exchange
gwfgwt = flopy.mf6.ModflowGwfgwt(
sim,
exgtype="GWF6-GWT6",
exgmnamea=gwfname,
exgmnameb=gwtname,
filename="{}.gwfgwt".format(name),
)
return sim, None
def eval_transport(sim):
print("evaluating transport...")
name = ex[sim.idxsim]
gwtname = "gwt_" + name
fpth = os.path.join(sim.simpath, "{}.ucn".format(gwtname))
try:
cobj = flopy.utils.HeadFile(
fpth, precision="double", text="CONCENTRATION"
)
conc = cobj.get_data()
except:
assert False, 'could not load data from "{}"'.format(fpth)
# This is the answer to this problem. These concentrations are for
# time step 200.
cres1 = [
[
[
1.0,
0.97843231,
0.97001134,
0.95317805,
0.94476996,
0.9279709,
0.91958413,
0.90283612,
0.89447905,
0.87779875,
0.86947965,
0.85288352,
0.84461056,
0.82811481,
0.81989601,
0.80351654,
0.79535977,
0.77911214,
0.77102507,
0.75492445,
0.74691457,
0.73097567,
0.72305022,
0.70728731,
0.69945329,
0.68388011,
0.67614426,
0.66077401,
0.65314277,
0.63798806,
0.63046759,
0.61554043,
0.60813657,
0.59344832,
0.5861666,
0.57172796,
0.56457354,
0.55039454,
0.54337227,
0.52946222,
0.52257658,
0.50894408,
0.50219919,
0.48885212,
0.48225173,
0.46919723,
0.46274472,
0.44998918,
0.44368756,
0.43123664,
0.42508856,
0.41294716,
0.40695489,
0.39512719,
0.38929262,
0.37778204,
0.37210672,
0.36091599,
0.35540108,
0.3445322,
0.33917854,
0.3286328,
0.32344087,
0.3132189,
0.30818885,
0.2982906,
0.29342225,
0.28384706,
0.2791399,
0.26988646,
0.26533971,
0.25640614,
0.25201871,
0.24340255,
0.23917307,
0.23087133,
0.22679818,
0.21880735,
0.21488866,
0.20720475,
0.20343841,
0.19605698,
0.19244068,
0.18535688,
0.18188808,
0.17509667,
0.17177266,
0.16526803,
0.16208593,
0.15586217,
0.15281894,
0.14686982,
0.14396229,
0.13828133,
0.13550621,
0.13008669,
0.12744057,
0.12227559,
0.11975497,
0.11483745,
0.11243875,
0.10776147,
0.10548106,
0.10103668,
0.09887086,
0.09465197,
0.09259703,
0.08859614,
0.08664832,
0.08285793,
0.08101347,
0.07742605,
0.07568121,
0.07228925,
0.07064027,
0.06743631,
0.06587947,
0.06285607,
0.06138769,
0.05853752,
0.05715393,
0.05446974,
0.05316736,
0.050642,
0.04941727,
0.04704373,
0.04589318,
0.04366456,
0.04258479,
0.04049435,
0.03948204,
0.03752321,
0.03657509,
0.03474147,
0.0338544,
0.03213974,
0.03131065,
0.02970893,
0.02893484,
0.02744019,
0.02671823,
0.02532501,
0.02465239,
0.02335515,
0.02272921,
0.02152269,
0.02094085,
0.01982002,
0.01927982,
0.01823984,
0.01773891,
0.01677517,
0.01631126,
0.01541935,
0.0149903,
0.01416602,
0.01376979,
0.01300915,
0.0126438,
0.01194301,
0.01160669,
0.01096219,
0.01065317,
0.01006157,
0.00977821,
0.00923636,
0.00897713,
0.00848205,
0.0082455,
0.00779441,
0.00757922,
0.00716954,
0.00697446,
0.0066038,
0.00642767,
0.00609383,
0.0059356,
0.00563654,
0.00549525,
0.00522914,
0.00510391,
0.00486909,
0.00475913,
0.0045541,
0.00445871,
0.00428216,
0.00420072,
0.00405151,
0.0039835,
0.00386064,
0.0038056,
0.00370829,
0.00366586,
0.00359344,
0.00356334,
0.00351533,
0.00349735,
0.00346792,
]
]
]
cres1 = np.array(cres1)
cres2 = [
[
[
1.0,
0.9789382,
0.97051702,
0.95368454,
0.94527607,
0.92847748,
0.92009017,
0.90334227,
0.89498447,
0.87830394,
0.86998394,
0.85338722,
0.84511319,
0.82861649,
0.82039646,
0.80401567,
0.79585751,
0.7796082,
0.77151959,
0.75541693,
0.74740534,
0.73146407,
0.72353676,
0.70777112,
0.69993509,
0.68435885,
0.67662084,
0.6612472,
0.65361367,
0.63845524,
0.63093235,
0.61600116,
0.60859476,
0.59390218,
0.58661778,
0.57217453,
0.56501733,
0.55083343,
0.54380828,
0.52989307,
0.52300445,
0.50936655,
0.50261858,
0.48926588,
0.48266233,
0.46960198,
0.46314624,
0.45038465,
0.44407974,
0.43162258,
0.42547115,
0.41332335,
0.40732768,
0.39549342,
0.38965542,
0.37813816,
0.37245936,
0.36126184,
0.35574344,
0.34486766,
0.3395105,
0.32895779,
0.32376235,
0.31353335,
0.30849978,
0.29859447,
0.29372261,
0.28414033,
0.27942969,
0.27016915,
0.26561893,
0.25667827,
0.25228741,
0.24366417,
0.23943131,
0.23112253,
0.22704603,
0.21904822,
0.21512624,
0.20743541,
0.20366584,
0.19627758,
0.19265809,
0.18556756,
0.18209565,
0.17529761,
0.17197055,
0.16545942,
0.16227435,
0.1560442,
0.15299808,
0.14704271,
0.14413238,
0.13844531,
0.13566746,
0.130242,
0.12759323,
0.12242247,
0.11989929,
0.11497616,
0.11257499,
0.10789226,
0.10560948,
0.10115983,
0.09899173,
0.09476774,
0.0927106,
0.0887048,
0.08675489,
0.08295976,
0.0811133,
0.07752134,
0.07577458,
0.07237826,
0.07072746,
0.06751932,
0.06596074,
0.06293336,
0.06146332,
0.05860935,
0.0572242,
0.05453638,
0.05323252,
0.05070371,
0.0494776,
0.04710077,
0.04594892,
0.04371719,
0.04263619,
0.04054281,
0.03952934,
0.03756773,
0.03661854,
0.03478229,
0.03389421,
0.0321771,
0.03134706,
0.02974302,
0.02896805,
0.02747124,
0.02674846,
0.02535321,
0.02467983,
0.02338069,
0.02275405,
0.02154576,
0.02096328,
0.0198408,
0.0193,
0.0182585,
0.01775702,
0.01679186,
0.01632745,
0.01543423,
0.01500472,
0.01417922,
0.01378257,
0.01302081,
0.01265508,
0.01195326,
0.0116166,
0.01097115,
0.01066181,
0.01006936,
0.00978571,
0.00924308,
0.00898359,
0.00848779,
0.00825101,
0.00779927,
0.00758388,
0.00717361,
0.00697834,
0.00660715,
0.00643086,
0.00609654,
0.00593817,
0.00563869,
0.00549728,
0.0052308,
0.00510545,
0.0048703,
0.00476025,
0.00455494,
0.00445946,
0.00428267,
0.00420117,
0.00405175,
0.00398368,
0.00386065,
0.00380557,
0.00370812,
0.00366566,
0.00359314,
0.00356302,
0.00351495,
0.00349695,
0.00346702,
]
]
]
cres2 = np.array(cres2)
creslist = [cres1, cres2]
assert np.allclose(
creslist[sim.idxsim], conc
), "simulated concentrations do not match with known solution."
return
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the models
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_transport, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# build the models
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 26.781088
| 79
| 0.438114
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.