blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fbc5efe97124e98fc31e9a8ac8fb8c69185eb8f | 98420fdd66b8dce46ef88cd34fcace36777fa232 | /obsolete/py/logreg/nonlinear_cg.py | d638ea1131e453f55add6479b98188360c900d80 | [] | no_license | Daiver/jff | f972fe7464f78ba6008a036b697ea3f04b7010a4 | 33d6a781af8d7f6ae60c25e10051977af2fef1b9 | refs/heads/master | 2023-04-07T06:33:41.487938 | 2022-05-03T10:07:32 | 2022-05-03T10:07:32 | 12,180,634 | 1 | 1 | null | 2023-04-03T19:25:00 | 2013-08-17T15:03:14 | C++ | UTF-8 | Python | false | false | 2,049 | py | import numpy as np
import linesearch
def nonLinCG(func, grad, initialX, nIter):
x = initialX
r0 = -grad(x)
d0 = r0
gradLen = r0.shape[0]
curSteps = 0
for iter in xrange(nIter):
curSteps += 1
#alpha = linesearch.quadLineSearchIter(
#5, 0.00001, 0.0001, x, d0, grad)
alpha = linesearch.quadLineSearch(0.000001, x, -r0, d0, grad)
x = x + alpha * d0
r1 = -grad(x)
beta = r1.dot(r1 - r0)/(r0.dot(r0))
if beta < 0:
beta = 0
elif curSteps > gradLen:
curSteps = 0
beta = 0
d0 = r1 + beta * d0
r0 = r1
err = func(x)
print iter, 'err', err
if err < 0.00001:
break
return x
def nonLinCGSeq(func, grad, initialX, nIter):
x = initialX
xs = [x]
errs = [func(x)]
r0 = -grad(x)
d0 = r0
gradLen = r0.shape[0]
curSteps = 0
for iter in xrange(nIter):
curSteps += 1
#alpha = linesearch.quadLineSearchIter(
#5, 0.00001, 0.0001, x, d0, grad)
alpha = linesearch.quadLineSearch(0.0001, x, -r0, d0, grad)
x = x + alpha * d0
r1 = -grad(x)
beta = r1.dot(r1 - r0)/(r0.dot(r0))
if beta < 0:
beta = 0
elif curSteps > gradLen:
curSteps = 0
beta = 0
d0 = r1 + beta * d0
r0 = r1
err = func(x)
xs.append(x)
errs.append(err)
print iter, 'err', err
if err < 0.00001:
break
return np.array(xs), np.array(errs)
if __name__ == '__main__':
from scipy.optimize import rosen, rosen_der, minimize
#import plot_rosen
# print nonLinCG(
#rosen,
#rosen_der,
#np.array([-0.1, -1.0]),
#50)
# xss, zs = nonLinCGSeq(
#rosen,
#rosen_der,
#np.array([-2.1, 1.0]),
#50)
#xs, ys = xss[:, 0], xss[:, 1]
#plot_rosen.plotRosenbrock([xs, ys, zs])
| [
"ra22341@ya.ru"
] | ra22341@ya.ru |
d66c873a607287eb0a30c82aefc37211defedc4d | ed54290846b5c7f9556aacca09675550f0af4c48 | /salt/salt/modules/ps.py | bc62b11abac02673a674029c114937e483096fb8 | [
"Apache-2.0"
] | permissive | smallyear/linuxLearn | 87226ccd8745cd36955c7e40cafd741d47a04a6f | 342e5020bf24b5fac732c4275a512087b47e578d | refs/heads/master | 2022-03-20T06:02:25.329126 | 2019-08-01T08:39:59 | 2019-08-01T08:39:59 | 103,765,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,306 | py | # -*- coding: utf-8 -*-
'''
A salt interface to psutil, a system and process library.
See http://code.google.com/p/psutil.
:depends: - psutil Python module, version 0.3.0 or later
- python-utmp package (optional)
'''
# Import python libs
from __future__ import absolute_import
import time
import datetime
# Import salt libs
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
import salt.ext.six as six
# pylint: disable=import-error
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0)
except ImportError:
HAS_PSUTIL = False
# pylint: enable=import-error
def __virtual__():
if not HAS_PSUTIL:
return False, 'The ps module cannot be loaded: python module psutil not installed.'
# Functions and attributes used in this execution module seem to have been
# added as of psutil 0.3.0, from an inspection of the source code. Only
# make this module available if the version of psutil is >= 0.3.0. Note
# that this may need to be tweaked if we find post-0.3.0 versions which
# also have problems running the functions in this execution module, but
# most distributions have already moved to later versions (for example,
# as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.).
if psutil.version_info >= (0, 3, 0):
return True
return False, 'The ps module cannot be loaded: psutil must be version 0.3.0 or greater.'
def _get_proc_cmdline(proc):
'''
Returns the cmdline of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.cmdline() if PSUTIL2 else proc.cmdline
except (psutil.NoSuchProcess, psutil.AccessDenied):
return ''
def _get_proc_create_time(proc):
'''
Returns the create_time of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.create_time() if PSUTIL2 else proc.create_time
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
def _get_proc_name(proc):
'''
Returns the name of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.name() if PSUTIL2 else proc.name
except (psutil.NoSuchProcess, psutil.AccessDenied):
return []
def _get_proc_status(proc):
'''
Returns the status of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.status() if PSUTIL2 else proc.status
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
def _get_proc_username(proc):
'''
Returns the username of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.username() if PSUTIL2 else proc.username
except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):
return None
def _get_proc_pid(proc):
'''
Returns the pid of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
return proc.pid
def top(num_processes=5, interval=3):
'''
Return a list of top CPU consuming processes during the interval.
num_processes = return the top N CPU consuming processes
interval = the number of seconds to sample CPU usage over
CLI Examples:
.. code-block:: bash
salt '*' ps.top
salt '*' ps.top 5 10
'''
result = []
start_usage = {}
for pid in psutil.pids():
try:
process = psutil.Process(pid)
user, system = process.cpu_times()
except psutil.NoSuchProcess:
continue
start_usage[process] = user + system
time.sleep(interval)
usage = set()
for process, start in six.iteritems(start_usage):
try:
user, system = process.cpu_times()
except psutil.NoSuchProcess:
continue
now = user + system
diff = now - start
usage.add((diff, process))
for idx, (diff, process) in enumerate(reversed(sorted(usage))):
if num_processes and idx >= num_processes:
break
if len(_get_proc_cmdline(process)) == 0:
cmdline = _get_proc_name(process)
else:
cmdline = _get_proc_cmdline(process)
info = {'cmd': cmdline,
'user': _get_proc_username(process),
'status': _get_proc_status(process),
'pid': _get_proc_pid(process),
'create_time': _get_proc_create_time(process),
'cpu': {},
'mem': {},
}
for key, value in six.iteritems(process.cpu_times()._asdict()):
info['cpu'][key] = value
for key, value in six.iteritems(process.memory_info()._asdict()):
info['mem'][key] = value
result.append(info)
return result
def get_pid_list():
'''
Return a list of process ids (PIDs) for all running processes.
CLI Example:
.. code-block:: bash
salt '*' ps.get_pid_list
'''
return psutil.pids()
def proc_info(pid, attrs=None):
'''
Return a dictionary of information for a process id (PID).
CLI Example:
.. code-block:: bash
salt '*' ps.proc_info 2322
salt '*' ps.proc_info 2322 attrs='["pid", "name"]'
pid
PID of process to query.
attrs
Optional list of desired process attributes. The list of possible
attributes can be found here:
http://pythonhosted.org/psutil/#psutil.Process
'''
try:
proc = psutil.Process(pid)
return proc.as_dict(attrs)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc:
raise CommandExecutionError(exc)
def kill_pid(pid, signal=15):
'''
Kill a process by PID.
.. code-block:: bash
salt 'minion' ps.kill_pid pid [signal=signal_number]
pid
PID of process to kill.
signal
Signal to send to the process. See manpage entry for kill
for possible values. Default: 15 (SIGTERM).
**Example:**
Send SIGKILL to process with PID 2000:
.. code-block:: bash
salt 'minion' ps.kill_pid 2000 signal=9
'''
try:
psutil.Process(pid).send_signal(signal)
return True
except psutil.NoSuchProcess:
return False
def pkill(pattern, user=None, signal=15, full=False):
'''
Kill processes matching a pattern.
.. code-block:: bash
salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\
[full=(true|false)]
pattern
Pattern to search for in the process list.
user
Limit matches to the given username. Default: All users.
signal
Signal to send to the process(es). See manpage entry for kill
for possible values. Default: 15 (SIGTERM).
full
A boolean value indicating whether only the name of the command or
the full command line should be matched against the pattern.
**Examples:**
Send SIGHUP to all httpd processes on all 'www' minions:
.. code-block:: bash
salt 'www.*' ps.pkill httpd signal=1
Send SIGKILL to all bash processes owned by user 'tom':
.. code-block:: bash
salt '*' ps.pkill bash signal=9 user=tom
'''
killed = []
for proc in psutil.process_iter():
name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
else pattern in _get_proc_name(proc)
user_match = True if user is None else user == _get_proc_username(proc)
if name_match and user_match:
try:
proc.send_signal(signal)
killed.append(_get_proc_pid(proc))
except psutil.NoSuchProcess:
pass
if not killed:
return None
else:
return {'killed': killed}
def pgrep(pattern, user=None, full=False):
'''
Return the pids for processes matching a pattern.
If full is true, the full command line is searched for a match,
otherwise only the name of the command is searched.
.. code-block:: bash
salt '*' ps.pgrep pattern [user=username] [full=(true|false)]
pattern
Pattern to search for in the process list.
user
Limit matches to the given username. Default: All users.
full
A boolean value indicating whether only the name of the command or
the full command line should be matched against the pattern.
**Examples:**
Find all httpd processes on all 'www' minions:
.. code-block:: bash
salt 'www.*' ps.pgrep httpd
Find all bash processes owned by user 'tom':
.. code-block:: bash
salt '*' ps.pgrep bash user=tom
'''
procs = []
for proc in psutil.process_iter():
name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
else pattern in _get_proc_name(proc)
user_match = True if user is None else user == _get_proc_username(proc)
if name_match and user_match:
procs.append(_get_proc_pid(proc))
return procs or None
def cpu_percent(interval=0.1, per_cpu=False):
'''
Return the percent of time the CPU is busy.
interval
the number of seconds to sample CPU usage over
per_cpu
if True return an array of CPU percent busy for each CPU, otherwise
aggregate all percents into one number
CLI Example:
.. code-block:: bash
salt '*' ps.cpu_percent
'''
if per_cpu:
result = list(psutil.cpu_percent(interval, True))
else:
result = psutil.cpu_percent(interval)
return result
def cpu_times(per_cpu=False):
'''
Return the percent of time the CPU spends in each state,
e.g. user, system, idle, nice, iowait, irq, softirq.
per_cpu
if True return an array of percents for each CPU, otherwise aggregate
all percents into one number
CLI Example:
.. code-block:: bash
salt '*' ps.cpu_times
'''
if per_cpu:
result = [dict(times._asdict()) for times in psutil.cpu_times(True)]
else:
result = dict(psutil.cpu_times(per_cpu)._asdict())
return result
def virtual_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.virtual_memory()._asdict())
def swap_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes swap memory statistics.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.swap_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'swap_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.swap_memory()._asdict())
def disk_partitions(all=False):
'''
Return a list of disk partitions and their device, mount point, and
filesystem type.
all
if set to False, only return local, physical partitions (hard disk,
USB, CD/DVD partitions). If True, return all filesystems.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_partitions
'''
result = [dict(partition._asdict()) for partition in
psutil.disk_partitions(all)]
return result
def disk_usage(path):
'''
Given a path, return a dict listing the total available space as well as
the free space, and used space.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_usage /home
'''
return dict(psutil.disk_usage(path)._asdict())
def disk_partition_usage(all=False):
'''
Return a list of disk partitions plus the mount point, filesystem and usage
statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_partition_usage
'''
result = disk_partitions(all)
for partition in result:
partition.update(disk_usage(partition['mountpoint']))
return result
def total_physical_memory():
'''
Return the total number of bytes of physical memory.
CLI Example:
.. code-block:: bash
salt '*' ps.total_physical_memory
'''
try:
return psutil.virtual_memory().total
except AttributeError:
# TOTAL_PHYMEM is deprecated but with older psutil versions this is
# needed as a fallback.
return psutil.TOTAL_PHYMEM
def num_cpus():
'''
Return the number of CPUs.
CLI Example:
.. code-block:: bash
salt '*' ps.num_cpus
'''
try:
return psutil.cpu_count()
except AttributeError:
# NUM_CPUS is deprecated but with older psutil versions this is needed
# as a fallback.
return psutil.NUM_CPUS
def boot_time(time_format=None):
'''
Return the boot time in number of seconds since the epoch began.
CLI Example:
time_format
Optionally specify a `strftime`_ format string. Use
``time_format='%c'`` to get a nicely-formatted locale specific date and
time (i.e. ``Fri May 2 19:08:32 2014``).
.. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
.. versionadded:: 2014.1.4
.. code-block:: bash
salt '*' ps.boot_time
'''
try:
b_time = int(psutil.boot_time())
except AttributeError:
# get_boot_time() has been removed in newer psutil versions, and has
# been replaced by boot_time() which provides the same information.
b_time = int(psutil.boot_time())
if time_format:
# Load epoch timestamp as a datetime.datetime object
b_time = datetime.datetime.fromtimestamp(b_time)
try:
return b_time.strftime(time_format)
except TypeError as exc:
raise SaltInvocationError('Invalid format string: {0}'.format(exc))
return b_time
def network_io_counters(interface=None):
'''
Return network I/O statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.network_io_counters
salt '*' ps.network_io_counters interface=eth0
'''
if not interface:
return dict(psutil.net_io_counters()._asdict())
else:
stats = psutil.net_io_counters(pernic=True)
if interface in stats:
return dict(stats[interface]._asdict())
else:
return False
def disk_io_counters(device=None):
'''
Return disk I/O statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_io_counters
salt '*' ps.disk_io_counters device=sda1
'''
if not device:
return dict(psutil.disk_io_counters()._asdict())
else:
stats = psutil.disk_io_counters(perdisk=True)
if device in stats:
return dict(stats[device]._asdict())
else:
return False
def get_users():
'''
Return logged-in users.
CLI Example:
.. code-block:: bash
salt '*' ps.get_users
'''
try:
recs = psutil.users()
return [dict(x._asdict()) for x in recs]
except AttributeError:
# get_users is only present in psutil > v0.5.0
# try utmp
try:
import utmp # pylint: disable=import-error
result = []
while True:
rec = utmp.utmpaccess.getutent()
if rec is None:
return result
elif rec[0] == 7:
started = rec[8]
if isinstance(started, tuple):
started = started[0]
result.append({'name': rec[4], 'terminal': rec[2],
'started': started, 'host': rec[5]})
except ImportError:
return False
| [
"5931263123@163.com"
] | 5931263123@163.com |
d4a36982621b4e47fd2f469d8c9e3939a3c7ea5e | 03c48a78ce3bda31bbeb1d0e85af6d56d252bddc | /bin/logic/BO/__init__.py | 0343e2023c8c2148acea53be8bb0ff3c00a512b1 | [] | no_license | windyStreet/python_data_statistics | bf7e0a3b24157550ff8d5d346597d7ee932a0ac7 | 5623f9f5ad9062039233c0f2b8e29848fcc15bac | refs/heads/master | 2021-01-20T02:37:20.806518 | 2017-06-09T10:27:41 | 2017-06-09T10:27:41 | 89,428,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | BASE_statistic_res = "statistic_res"
BASE_statistical_item = "statistical_item"
| [
"yq904276384@foxmail.com"
] | yq904276384@foxmail.com |
c49a3366869f240a88591a16171f85912531ef19 | b99b32fb0b4597bee94809ebd3b2ddae43064bee | /landmark_detection/menpofit/clm/__init__.py | 4fec6db002ccf5d5441b4403563a053bda94ebe0 | [] | no_license | HongwenZhang/ECT-FaceAlignment | c0129dc2aa20bc2bdba03a9ed1cabebcd5e5d848 | e94b446db73fca5ba751d6d9a81d42633208f228 | refs/heads/master | 2023-01-29T14:25:19.502350 | 2020-12-13T09:18:55 | 2020-12-13T09:18:55 | 111,511,579 | 31 | 19 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from .base import CLM
from .fitter import GradientDescentCLMFitter
from .algorithm import ActiveShapeModel, RegularisedLandmarkMeanShift
from .expert import (CorrelationFilterExpertEnsemble, FcnFilterExpertEnsemble,
IncrementalCorrelationFilterThinWrapper)
| [
"hongwen.zhang@cripac.ia.ac.cn"
] | hongwen.zhang@cripac.ia.ac.cn |
60d9e0d451482ce4ec684636a7e97aae7388e9ee | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/bgp/learnedroute.py | dd34ddef0142c7ceacb986d045cee05335610b46 | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,841 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedRoute(Base):
"""The LearnedRoute class encapsulates a system managed learnedRoute node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedRoute property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedRoute'
def __init__(self, parent):
super(LearnedRoute, self).__init__(parent)
@property
def AsPath(self):
"""Indicates the local IP address of the BGP router.
Returns:
str
"""
return self._get_attribute('asPath')
@property
def BlockOffset(self):
"""The Label Block Offset (VBO) is the value used to help define this specific label block uniquely-as a subset of all of the possible labels.
Returns:
number
"""
return self._get_attribute('blockOffset')
@property
def BlockSize(self):
"""The size of the label block, in bytes.
Returns:
number
"""
return self._get_attribute('blockSize')
@property
def ControlWordEnabled(self):
"""Indicates if the label uses a control word.
Returns:
bool
"""
return self._get_attribute('controlWordEnabled')
@property
def IpPrefix(self):
"""The route IP address prefix.
Returns:
str
"""
return self._get_attribute('ipPrefix')
@property
def LabelBase(self):
"""The first label in the learned information.
Returns:
number
"""
return self._get_attribute('labelBase')
@property
def LocalPreference(self):
"""Indicates the value of the local preference attribute.
Returns:
number
"""
return self._get_attribute('localPreference')
@property
def MaxLabel(self):
"""The last label to use.
Returns:
number
"""
return self._get_attribute('maxLabel')
@property
def MultiExitDiscriminator(self):
"""A metric field of the route file.
Returns:
number
"""
return self._get_attribute('multiExitDiscriminator')
@property
def Neighbor(self):
"""The local IP address for this Ixia-emulated BGP neighbor/peer.
Returns:
str
"""
return self._get_attribute('neighbor')
@property
def NextHop(self):
"""The next hop on the path to the destination network in the learned route.
Returns:
str
"""
return self._get_attribute('nextHop')
@property
def OriginType(self):
"""An indication of where the route entry originated.
Returns:
str
"""
return self._get_attribute('originType')
@property
def PrefixLength(self):
"""The prefix length of the route.
Returns:
number
"""
return self._get_attribute('prefixLength')
@property
def RouteDistinguisher(self):
"""The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
Returns:
str
"""
return self._get_attribute('routeDistinguisher')
@property
def SeqDeliveryEnabled(self):
"""Indicates if sequencial delivery is enabled.
Returns:
bool
"""
return self._get_attribute('seqDeliveryEnabled')
@property
def SiteId(self):
"""The site ID.
Returns:
number
"""
return self._get_attribute('siteId')
def find(self, AsPath=None, BlockOffset=None, BlockSize=None, ControlWordEnabled=None, IpPrefix=None, LabelBase=None, LocalPreference=None, MaxLabel=None, MultiExitDiscriminator=None, Neighbor=None, NextHop=None, OriginType=None, PrefixLength=None, RouteDistinguisher=None, SeqDeliveryEnabled=None, SiteId=None):
"""Finds and retrieves learnedRoute data from the server.
All named parameters support regex and can be used to selectively retrieve learnedRoute data from the server.
By default the find method takes no parameters and will retrieve all learnedRoute data from the server.
Args:
AsPath (str): Indicates the local IP address of the BGP router.
BlockOffset (number): The Label Block Offset (VBO) is the value used to help define this specific label block uniquely-as a subset of all of the possible labels.
BlockSize (number): The size of the label block, in bytes.
ControlWordEnabled (bool): Indicates if the label uses a control word.
IpPrefix (str): The route IP address prefix.
LabelBase (number): The first label in the learned information.
LocalPreference (number): Indicates the value of the local preference attribute.
MaxLabel (number): The last label to use.
MultiExitDiscriminator (number): A metric field of the route file.
Neighbor (str): The local IP address for this Ixia-emulated BGP neighbor/peer.
NextHop (str): The next hop on the path to the destination network in the learned route.
OriginType (str): An indication of where the route entry originated.
PrefixLength (number): The prefix length of the route.
RouteDistinguisher (str): The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
SeqDeliveryEnabled (bool): Indicates if sequencial delivery is enabled.
SiteId (number): The site ID.
Returns:
self: This instance with matching learnedRoute data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedRoute data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedRoute data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
f3fcf8f91f0fd949a8c11936eec2e76ad1cdc0d7 | 26605ec8a8bdd64a45af7d444d097d9e2f832dc9 | /electrum_xazab/plugins/scan_over_gap/qt.py | 89ab97ee24ca59217792e82b0f14916d89401274 | [
"MIT"
] | permissive | nunumichael/electrum-xazab | b67f821fd4a19e924d8ad902f076223df9b7511f | f128c765f451b418a418f9cd8b8e24fd8f66df74 | refs/heads/master | 2023-05-05T05:30:03.935745 | 2021-05-26T19:12:47 | 2021-05-26T19:12:47 | 370,091,240 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,408 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
from PyQt5.Qt import Qt
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QGridLayout, QLabel, QSpinBox, QPushButton,
QTreeWidget, QTreeWidgetItem, QHeaderView,
QProgressBar, QHBoxLayout)
from electrum_xazab.network import Network
from electrum_xazab.gui.qt.util import (EnterButton, WindowModalDialog, WWLabel,
CloseButton)
from .scan_over_gap import ScanOverGapPlugin
class ProgressSignal(QObject):
s = pyqtSignal(object, float)
class CompletedSignal(QObject):
s = pyqtSignal(object)
class ErrorSignal(QObject):
s = pyqtSignal(object, object)
class ScanListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.wallet = parent.wallet
self.plugin = plugin = parent.plugin
self.format_amount = plugin.format_amount
self.setHeaderLabels(plugin.COLUMN_HEADERS)
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(plugin.Columns.KEY, mode)
h.setSectionResizeMode(plugin.Columns.TITLE, mode)
h.setSectionResizeMode(plugin.Columns.START_IDX, mode)
h.setSectionResizeMode(plugin.Columns.SCANNED_CNT, mode)
h.setSectionResizeMode(plugin.Columns.FOUND_BALANCE, mode)
self.setColumnHidden(plugin.Columns.KEY, True)
def update(self, *, items_enabled=True):
ws = self.plugin.wallet_scans.get(self.wallet)
if not ws or not ws.scans:
return
self.clear()
scans = ws.scans
for s in scans.values():
scanned_cnt = s.next_idx - s.start_idx
found_balance = self.format_amount(sum(s.balances.values()))
scan_item = QTreeWidgetItem([s.key, s.title, str(s.start_idx),
str(scanned_cnt), found_balance])
check_state = Qt.Checked if s.active else Qt.Unchecked
scan_item.setCheckState(self.plugin.Columns.TITLE, check_state)
if not items_enabled:
scan_item.setFlags(scan_item.flags() ^ Qt.ItemIsEnabled)
self.addTopLevelItem(scan_item)
super().update()
class ScanOverGapDialog(WindowModalDialog):
def __init__(self, window, plugin):
WindowModalDialog.__init__(self, window, plugin.MSG_TITLE)
self.setMinimumSize(800, 400)
self.wallet = w = window.parent().wallet
self.plugin = plugin
self.config = plugin.config
self.network = Network.get_instance()
coro = plugin.init_scans(w)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
fut.result()
g = QGridLayout(self)
g.addWidget(WWLabel(plugin.MSG_SCAN_TITLE), 0, 0, 1, -1)
self.scan_list = ScanListWidget(self)
self.scan_list.update()
self.scan_list.itemChanged.connect(self.scan_list_item_changed)
g.addWidget(self.scan_list, 1, 0, 1, -1)
g.addWidget(QLabel(plugin.MSG_SCAN_COUNT), 2, 0)
self.scan_cnt_sb = QSpinBox()
self.scan_cnt_sb.setRange(plugin.MIN_SCAN_CNT, plugin.MAX_SCAN_CNT)
self.scan_cnt_sb.setValue(plugin.DEF_SCAN_CNT)
self.scan_cnt_sb.setSingleStep(plugin.DEF_SCAN_CNT)
self.scan_cnt_sb.valueChanged.connect(self.on_scan_cnt_value_changed)
g.addWidget(self.scan_cnt_sb, 2, 1)
self.do_scan_btn = QPushButton()
self.set_do_scan_btn_txt(plugin.DEF_SCAN_CNT)
self.do_scan_btn.clicked.connect(partial(self.do_scan, w))
g.addWidget(self.do_scan_btn, 2, 3)
g.addWidget(QLabel(plugin.MSG_PROGRESS), 2, 4)
self.scan_progress_pb = QProgressBar()
self.scan_progress_pb.setValue(0)
g.addWidget(self.scan_progress_pb , 2, 5)
g.setRowStretch(10, 1)
hbox = QHBoxLayout()
self.reset_scans_btn = QPushButton(plugin.MSG_RESET)
self.reset_scans_btn.clicked.connect(partial(self.reset_scans, w))
hbox.addWidget(self.reset_scans_btn)
hbox.addStretch(1)
self.add_found_btn = QPushButton(plugin.MSG_ADD_FOUND)
self.add_found_btn.clicked.connect(partial(self.add_found_coins, w))
hbox.addWidget(self.add_found_btn)
hbox.addWidget(CloseButton(self))
g.addLayout(hbox, 11, 0, 1, -1)
self.plugin.progress_sig.s.connect(self.on_progress_qt)
self.plugin.completed_sig.s.connect(self.on_completed_qt)
self.plugin.error_sig.s.connect(self.on_error_qt)
self.cleaned_up = False
ws = self.plugin.wallet_scans.get(w)
self.scan_progress_pb.setValue(ws.progress)
if ws.running:
self.scan_cnt_sb.setEnabled(False)
self.do_scan_btn.setEnabled(False)
self.reset_scans_btn.setEnabled(False)
self.set_add_found_bnt_state()
def set_add_found_bnt_state(self):
enabled = False
ws = self.plugin.wallet_scans.get(self.wallet)
if ws and not ws.running:
for s in ws.scans.values():
if sum(s.balances.values()):
enabled = True
break
self.add_found_btn.setEnabled(enabled)
def set_do_scan_btn_txt(self, cnt):
self.do_scan_btn.setText(self.plugin.MSG_SCAN_NEXT.format(cnt))
def on_scan_cnt_value_changed(self, value):
self.set_do_scan_btn_txt(value)
def scan_list_item_changed(self, item, col):
ws = self.plugin.wallet_scans.get(self.wallet)
if not ws:
return
if col != self.plugin.Columns.TITLE:
return
key = item.data(self.plugin.Columns.KEY, Qt.DisplayRole)
scan = ws.scans.get(key)
if scan:
scan.active = (item.checkState(col) == Qt.Checked)
def closeEvent(self, event):
if self.cleaned_up:
return
self.plugin.progress_sig.s.disconnect(self.on_progress_qt)
self.plugin.completed_sig.s.disconnect(self.on_completed_qt)
self.plugin.error_sig.s.disconnect(self.on_error_qt)
self.cleaned_up = True
def do_scan(self, wallet):
self.scan_cnt_sb.setEnabled(False)
self.do_scan_btn.setEnabled(False)
self.reset_scans_btn.setEnabled(False)
self.add_found_btn.setEnabled(False)
self.scan_list.update(items_enabled=False)
self.scan_progress_pb.setValue(0)
coro = self.plugin.do_scan(wallet, self.scan_cnt_sb.value())
asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
def add_found_coins(self, wallet):
self.scan_cnt_sb.setEnabled(False)
self.do_scan_btn.setEnabled(False)
self.reset_scans_btn.setEnabled(False)
self.add_found_btn.setEnabled(False)
self.scan_list.update(items_enabled=False)
coro = self.plugin.add_found(wallet)
asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
def reset_scans(self, wallet):
if not self.question(self.plugin.MSG_Q_RESET):
return
self.scan_list.update(items_enabled=False)
coro = self.plugin.init_scans(wallet, reset=True)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
fut.result()
self.scan_list.update()
def on_progress_qt(self, wallet, progress):
if self.wallet != wallet:
return
self.scan_progress_pb.setValue(progress)
def on_completed_qt(self, wallet):
if self.wallet != wallet:
return
self.scan_cnt_sb.setEnabled(True)
self.do_scan_btn.setEnabled(True)
self.reset_scans_btn.setEnabled(True)
self.set_add_found_bnt_state()
self.scan_list.update()
def on_error_qt(self, wallet, e):
if self.wallet != wallet:
return
self.scan_cnt_sb.setEnabled(True)
self.do_scan_btn.setEnabled(True)
self.reset_scans_btn.setEnabled(True)
self.set_add_found_bnt_state()
self.scan_list.update()
self.show_error(str(e))
class Plugin(ScanOverGapPlugin):
def __init__(self, parent, config, name):
super(Plugin, self).__init__(parent, config, name)
self.progress_sig = ProgressSignal()
self.completed_sig = CompletedSignal()
self.error_sig = ErrorSignal()
def on_close(self):
super(Plugin, self).on_close()
self.progress_sig = None
self.completed_sig = None
self.error_sig = None
async def on_progress(self, wallet, progress):
await super(Plugin, self).on_progress(wallet, progress)
self.progress_sig.s.emit(wallet, progress)
async def on_completed(self, wallet):
await super(Plugin, self).on_completed(wallet)
self.completed_sig.s.emit(wallet)
async def on_error(self, wallet, e):
await super(Plugin, self).on_error(wallet, e)
self.error_sig.s.emit(wallet, e)
def requires_settings(self) -> bool:
return True
def settings_widget(self, window):
settings_dialog_partial = partial(self.settings_dialog, window)
return EnterButton(self.MSG_SCAN, settings_dialog_partial)
def settings_dialog(self, window):
d = ScanOverGapDialog(window, self)
d.exec_()
| [
"71531505+xazab@users.noreply.github.com"
] | 71531505+xazab@users.noreply.github.com |
9775bb5f936d4af199cf7fc5aa3c8750d2ef5547 | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190421180237.py | 392c16248e60017f46b8d9db2c0cf22366f97902 | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,455 | py | # Jiaxi Zhang
# George McAlear
# If you run into an "[NSApplication _setup] unrecognized selector" problem on macOS,
# try uncommenting the following snippet
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import cozmo
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
from cozmo.util import distance_mm, degrees, speed_mmps
from rrt import *
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
# tmp cache
flag_odom_init = False
# map
global camera_settings
# pick up location for the robot to drive to, (x, y, theta)
async def run(robot: cozmo.robot.Robot):
global grid, gui, pf
###################
# pickup point
#pickup_node = Node((153, 240))
# dropoff_node = Node((544, 344))
# localize the robot
await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
print("LAST POSE IS:", last_pose)
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
directions = goal_pose - last_pose
current_pose = last_pose
last_robot_pose = robot.pose
print("SETTING LAST ROBOT POSE TO: ", last_robot_pose)
print("SO WE GOING TO FOLLOW THIS TO PICKUP ZONE:", directions)
await execute_directions(robot, directions)
await robot.turn_in_place(angle=cozmo.util.Angle(degrees=45)).wait_for_completed()
print("LAST ROBOT POSE IS: ", last_robot_pose)
print("CURRENT POSE IS:", robot.pose)
print("WE THINK WE MOVED THIS MUCH TO GO TO PICKUP ZONE: ", convertPoseToInches(robot.pose - last_robot_pose))
current_pose = current_pose + convertPoseToInches(rotate_point(robot.pose, - last_robot_pose)
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER DRIVING TO PICKUPZONE: ", current_pose)
# await robot.say_text('Ready for pick up!').wait_for_completed()
drop_off_directions = [(3, 4.5, 0), (21.75, 4.5, 90), (21.75, 13.75, 90)]
pick_up_directions = [(21.75, 4.5, 90), (3, 4.5, 0), (4.5, 20)]
while True:
cube = await robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
await robot.pickup_object(cube, num_retries=5).wait_for_completed()
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO PICK UP CUBE: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
#cosimo.update_pose()
print("COZMO THINKS IT IS AT AFTER PICKING UP CUBE: ", current_pose)
#await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
#current_pose = last_pose
# rrt to drop zone and drop off cube
for destination in drop_off_directions:
directions = convertInchesToPose(destination) - current_pose
await execute_directions(robot,directions)
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO FOLLOW DIRECTIONS: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER FOLLOWING DIRECTIONS: ", current_pose)
#await cosimo.go_to_goal(goal_node=dropoff_node)
await robot.set_lift_height(0.0).wait_for_completed()
# rrt to just in front of pick up zone
# await cosimo.go_to_goal(goal_node=pickup_node)
def CozmoWarehouseWorker:
def __init__(self, robot, current_arena_pose, ):
self.current_arena_pose = current_arena_pose
self.last_robot_pose = robot.pose
self.robot = robot
# start streaming
await robot.set_head_angle(cozmo.util.degrees(3)).wait_for_completed()
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
self.gui = GUIWindow(self.grid, show_camera=True)
async def execute_directions(directions):
print("Robot is at: ", self.robot.pose)
await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
await self.robot.turn_in_place(angle=cozmo.util.Angle(degrees=90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
async def localize(self):
# reset our location estimates
conf = False
self.current_arena_pose = cozmo.util.Pose(0,0,0,angle_z=cozmo.util.Angle(degrees=0))
self.pf = ParticleFilter(grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(cozmo.util.degrees(3)).wait_for_completed()
while not conf:
# move a little
self.last_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=cozmo.util.Angle(degrees=20)).wait_for_completed()
odometry = compute_odometry()
detected_markers, camera_image = await marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = cozmo.util.Pose(curr_x , curr_y, 0, angle_z=cozmo.util.Angle(degrees=curr_h))
def compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
self.last_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / grid.scale, dy / grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
global grid
# Wait for the latest image from Cozmo
image_event = await robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/grid.scale, y/grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
# init
gui.show_particles(pf.particles)
gui.show_mean(0, 0, 0)
gui.start() | [
"josh@lawn-143-215-110-217.lawn.gatech.edu"
] | josh@lawn-143-215-110-217.lawn.gatech.edu |
10c49e36a3733040248f87d8388adf44086acf6d | 1e508a8d3a491acfb7a58d7917bad6a1cd08aab1 | /ocdskingfisher/cli/commands/run.py | 2780206131c9aa7b6f200f2225379beaf5bbc8d9 | [
"BSD-3-Clause"
] | permissive | spendnetwork/kingfisher-scrape | fcbbd8fa5278bbc40b5517b70632de73b6831634 | 053910b278eed9b3e79ca2f05a04086300b8bc21 | refs/heads/master | 2020-05-20T17:13:35.846004 | 2019-11-19T06:51:19 | 2019-11-19T06:51:19 | 185,683,174 | 0 | 0 | BSD-3-Clause | 2019-05-08T21:42:05 | 2019-05-08T21:42:05 | null | UTF-8 | Python | false | false | 3,855 | py | import ocdskingfisher.cli.commands.base
import ocdskingfisher.sources_util
class RunCLICommand(ocdskingfisher.cli.commands.base.CLICommand):
command = 'run'
def __init__(self, config=None):
self.config = config
self.sources = ocdskingfisher.sources_util.gather_sources()
def configure_subparser(self, subparser):
subparser.add_argument("source", help="run one or more sources", nargs="*")
subparser.add_argument("--all", help="run all sources", action="store_true")
subparser.add_argument("--sample", help="Run sample only", action="store_true")
subparser.add_argument("--dataversion", help="Specify a data version to resume")
subparser.add_argument("--newversion",
help="Forces the creation of a new data version (If you don't specify this or " +
"--dataversion, the latest version will be used. If there are no versions, a new one will be created.)",
action="store_true")
subparser.add_argument("--note", help="Specify a note to save")
for source_id, source_class in self.sources.items():
for argument_definition in source_class.argument_definitions:
subparser.add_argument('--' + argument_definition['name'], help=argument_definition['help'])
def run_command(self, args):
run = []
if args.all and args.source:
print("You need to either specify a source or use --all flag, not both.")
quit(-1)
if args.all:
for source_id, source_class in self.sources.items():
run.append({'id': source_id, 'source_class': source_class})
elif args.source:
for selected_source in args.source:
if selected_source in self.sources:
run.append({'id': selected_source, 'source_class': self.sources[selected_source]})
else:
print("We can not find a source that you requested! You requested: %s" % selected_source)
quit(-1)
if not run:
print("You have not specified anything to run! Try listing your sources names or flag --all")
print("You can run:")
for source_id, source_info in sorted(self.sources.items()):
print(" - %s" % source_id)
quit(-1)
remove_dir = False
sample_mode = args.sample
data_version = args.dataversion
new_version = args.newversion
if args.verbose:
print("We will run: ")
for sourceInfo in run:
print(" - %s" % sourceInfo['id'])
if sample_mode:
print("Sample mode is on!")
else:
print("Sample mode is off.")
for source_info in run:
instance = source_info['source_class'](self.config.data_dir,
remove_dir=remove_dir,
sample=sample_mode,
data_version=data_version,
new_version=new_version,
config=self.config,
note=args.note,
)
instance.set_arguments(args)
if args.verbose:
print("Now running: %s (Output Dir: %s, Data Version: %s)" % (source_info['id'], instance.output_directory, instance.data_version))
if args.verbose:
print(" - gathering ...")
instance.run_gather()
if args.verbose:
print(" - fetching ...")
instance.run_fetch()
| [
"james.baster@opendataservices.coop"
] | james.baster@opendataservices.coop |
a881e9229b13b365c7d06bed9b0336d131436f71 | 8b1aa2e61f1b41b8f75a0e94aa5f53fc801dc357 | /Python/maximum-distance-in-arrays.py | 4a24c82f43ddecd8279b927fc09b57c4b8d9a723 | [
"MIT"
] | permissive | aditya-AI/LeetCode | 539cae41f8b494ed39bec0a10c561f17c40dad38 | 0fe4a3c3a1d31230c9b5c931ff1e33584f1ccd4e | refs/heads/master | 2021-01-02T08:24:36.970321 | 2017-07-31T13:56:23 | 2017-07-31T13:56:23 | 99,005,326 | 5 | 1 | null | 2017-08-01T13:38:32 | 2017-08-01T13:38:31 | null | UTF-8 | Python | false | false | 1,328 | py | # Time: O(n)
# Space: O(1)
# Given m arrays, and each array is sorted in ascending order.
# Now you can pick up two integers from two different arrays (each array picks one)
# and calculate the distance.
# We define the distance between two integers a and b to be their absolute difference |a-b|.
# Your task is to find the maximum distance.
#
# Example 1:
# Input:
# [[1,2,3],
# [4,5],
# [1,2,3]]
# Output: 4
# Explanation:
# One way to reach the maximum distance 4 is to pick 1 in the first or third array
# and pick 5 in the second array.
# Note:
# Each given array will have at least 1 number. There will be at least two non-empty arrays.
# The total number of the integers in all the m arrays will be in the range of [2, 10000].
# The integers in the m arrays will be in the range of [-10000, 10000].
class Solution(object):
def maxDistance(self, arrays):
"""
:type arrays: List[List[int]]
:rtype: int
"""
result, min_val, max_val = 0, arrays[0][0], arrays[0][-1]
for i in xrange(1, len(arrays)):
result = max(result, \
max(max_val - arrays[i][0], \
arrays[i][-1] - min_val))
min_val = min(min_val, arrays[i][0])
max_val = max(max_val, arrays[i][-1])
return result | [
"kamyu104@gmail.com"
] | kamyu104@gmail.com |
c335767482abc80f60cd1001ca1f8ca1c2cc765f | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/gen/view_models/views/lobby/premacc/piggybank_base_model.py | 6c977d428f53ef168370fd9f52f075809bda33f8 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,633 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/premacc/piggybank_base_model.py
from frameworks.wulf import ViewModel
class PiggybankBaseModel(ViewModel):
__slots__ = ()
def __init__(self, properties=6, commands=0):
super(PiggybankBaseModel, self).__init__(properties=properties, commands=commands)
def getMaxAmount(self):
return self._getNumber(0)
def setMaxAmount(self, value):
self._setNumber(0, value)
def getMaxAmountStr(self):
return self._getString(1)
def setMaxAmountStr(self, value):
self._setString(1, value)
def getCurrentAmount(self):
return self._getNumber(2)
def setCurrentAmount(self, value):
self._setNumber(2, value)
def getCurrentAmountStr(self):
return self._getString(3)
def setCurrentAmountStr(self, value):
self._setString(3, value)
def getIsTankPremiumActive(self):
return self._getBool(4)
def setIsTankPremiumActive(self, value):
self._setBool(4, value)
def getTimeleft(self):
return self._getNumber(5)
def setTimeleft(self, value):
self._setNumber(5, value)
def _initialize(self):
super(PiggybankBaseModel, self)._initialize()
self._addNumberProperty('maxAmount', 1)
self._addStringProperty('maxAmountStr', '0')
self._addNumberProperty('currentAmount', 0)
self._addStringProperty('currentAmountStr', '0')
self._addBoolProperty('isTankPremiumActive', False)
self._addNumberProperty('timeleft', 0)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
8558428dc9866e41b404653a6c5542655b4bfcfc | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/prompt_toolkit/key_binding/vi_state.py | 10593a82e6288ad9a157bba04e72661e880d3e25 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 3,360 | py | from enum import Enum
from typing import TYPE_CHECKING, Callable, Dict, Optional
from prompt_toolkit.clipboard import ClipboardData
if TYPE_CHECKING:
from .key_bindings.vi import TextObject
from .key_processor import KeyPressEvent
__all__ = [
"InputMode",
"CharacterFind",
"ViState",
]
class InputMode(str, Enum):
value: str
INSERT = "vi-insert"
INSERT_MULTIPLE = "vi-insert-multiple"
NAVIGATION = "vi-navigation" # Normal mode.
REPLACE = "vi-replace"
REPLACE_SINGLE = "vi-replace-single"
class CharacterFind:
def __init__(self, character: str, backwards: bool = False) -> None:
self.character = character
self.backwards = backwards
class ViState:
"""
Mutable class to hold the state of the Vi navigation.
"""
def __init__(self) -> None:
#: None or CharacterFind instance. (This is used to repeat the last
#: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.)
self.last_character_find: Optional[CharacterFind] = None
# When an operator is given and we are waiting for text object,
# -- e.g. in the case of 'dw', after the 'd' --, an operator callback
# is set here.
self.operator_func: Optional[
Callable[["KeyPressEvent", "TextObject"], None]
] = None
self.operator_arg: Optional[int] = None
#: Named registers. Maps register name (e.g. 'a') to
#: :class:`ClipboardData` instances.
self.named_registers: Dict[str, ClipboardData] = {}
#: The Vi mode we're currently in to.
self.__input_mode = InputMode.INSERT
#: Waiting for digraph.
self.waiting_for_digraph = False
self.digraph_symbol1: Optional[str] = None # (None or a symbol.)
#: When true, make ~ act as an operator.
self.tilde_operator = False
#: Register in which we are recording a macro.
#: `None` when not recording anything.
# Note that the recording is only stored in the register after the
# recording is stopped. So we record in a separate `current_recording`
# variable.
self.recording_register: Optional[str] = None
self.current_recording: str = ""
# Temporary navigation (normal) mode.
# This happens when control-o has been pressed in insert or replace
# mode. The user can now do one navigation action and we'll return back
# to insert/replace.
self.temporary_navigation_mode = False
@property
def input_mode(self) -> InputMode:
"Get `InputMode`."
return self.__input_mode
@input_mode.setter
def input_mode(self, value: InputMode) -> None:
"Set `InputMode`."
if value == InputMode.NAVIGATION:
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
self.__input_mode = value
def reset(self) -> None:
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = InputMode.INSERT
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
# Reset recording state.
self.recording_register = None
self.current_recording = ""
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
7000e234acfa2ecca481572aa5c2302dd89b9646 | b0f0473f10df2fdb0018165785cc23c34b0c99e7 | /Tools/Python/selenium-2.37.2/py/selenium/webdriver/remote/webelement.py | 95eb21084afb5a54e95d77960ba9362dada62c05 | [] | no_license | wimton/Meter-peach | d9294a56ec0c1fb2d1a2a4acec1c2bf47b0932df | af0302d1789a852746a3c900c6129ed9c15fb0f4 | refs/heads/master | 2023-04-25T22:54:31.696184 | 2021-05-19T13:14:55 | 2021-05-19T13:14:55 | 355,202,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,265 | py | # Copyright 2008-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebElement implementation."""
import os
import zipfile
try:
from StringIO import StringIO
except ImportError: # 3+
from io import StringIO
import base64
from .command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
try:
str = basestring
except NameError:
pass
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value."""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = resp['value']
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected."""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element by name."""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element by link text."""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath."""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath."""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element by their class name."""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds elements by their class name."""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element by CSS selector."""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements by CSS selector."""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element."""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""CONSIDERED LIABLE TO CHANGE WITHOUT WARNING. Use this to discover where on the screen an
element is so that we can click it. This method should cause the element to be scrolled
into view.
Returns the top lefthand corner location on the screen, or None if the element is not visible"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def parent(self):
return self._parent
@property
def id(self):
return self._id
def __eq__(self, element):
if self._id == element.id:
return True
else:
return self._execute(Command.ELEMENT_EQUALS, {'other': element.id})['value']
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def _upload(self, filename):
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
try:
return self._execute(Command.UPLOAD_FILE,
{'file': base64.encodestring(fp.getvalue())})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
try:
if os.path.isfile(file_path):
return file_path
except:
pass
return None
| [
"wimton@yahoo.com"
] | wimton@yahoo.com |
f045587943372d8a259207ccd82edaa468953613 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Box/Files/DownloadFile.py | 76b407e839ac439a4c20ae70900db1f6240c8bd1 | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DownloadFile
# Retrieves the contents of a specified file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DownloadFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DownloadFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DownloadFile, self).__init__(temboo_session, '/Library/Box/Files/DownloadFile')
def new_input_set(self):
return DownloadFileInputSet()
def _make_result_set(self, result, path):
return DownloadFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DownloadFileChoreographyExecution(session, exec_id, path)
class DownloadFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DownloadFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(DownloadFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(DownloadFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to download.)
"""
super(DownloadFileInputSet, self)._set_input('FileID', value)
class DownloadFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DownloadFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The Base64 encoded contents of the downloaded file.)
"""
return self._output.get('Response', None)
class DownloadFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DownloadFileResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
790e7eb171f917eca8b75daf5bb8c255e365a672 | d8c3bd4bac8e19d71e87f721c2a4702e384eb685 | /tests/handlers/test_emailaddr.py | a2ae6e8d9d62b91df932b1fabd54cd86e0802efe | [
"MIT"
] | permissive | remcoboerma/Authl | 8f2b2cba0e5159ec0b469415d9d4a6f7150555b7 | 1e1f4e0500be25b61426c50c3192367cf0effc7a | refs/heads/main | 2023-08-28T02:14:00.063297 | 2021-10-14T20:20:08 | 2021-10-14T20:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,400 | py | """ Tests for email login """
# pylint:disable=missing-docstring
import logging
from authl import disposition, tokens
from authl.handlers import email_addr
from . import parse_args
LOGGER = logging.getLogger(__name__)
def test_basics():
handler = email_addr.EmailAddress(None, None, tokens.DictStore())
assert handler.service_name == 'Email'
assert handler.url_schemes
assert 'email' in handler.description
assert handler.cb_id == 'e'
assert handler.logo_html[0][1] == 'Email'
assert handler.handles_url('foo@bar.baz') == 'mailto:foo@bar.baz'
assert handler.handles_url('mailto:foo@bar.baz') == 'mailto:foo@bar.baz'
# email addresses must be well-formed
assert not handler.handles_url('mailto:foobar.baz')
# don't support other schemas
assert not handler.handles_url('email:foo@bar.baz')
assert not handler.handles_url('@foo@bar.baz')
assert not handler.handles_url('https://example.com/')
# handle leading/trailing spaces correctly
assert handler.handles_url(' foo@bar.baz') == 'mailto:foo@bar.baz'
assert handler.handles_url('mailto: foo@bar.baz') == 'mailto:foo@bar.baz'
assert handler.handles_url('mailto:foo@bar.baz ') == 'mailto:foo@bar.baz'
# but don't allow embedded spaces
assert not handler.handles_url(' foo @bar.baz')
# email address must be valid
assert not handler.handles_url(' asdf[]@poiu_foo.baz!')
# don't allow bang-paths
assert not handler.handles_url('bang!path!is!fun!bob')
assert not handler.handles_url('bang.com!path!is!fun!bob')
assert not handler.handles_url('bang!path!is!fun!bob@example.com')
# strip out non-email-address components
assert handler.handles_url('mailto:foo@example.com?subject=pwned') == 'mailto:foo@example.com'
# handle case correctly
assert handler.handles_url('MailtO:Foo@Example.Com') == 'mailto:foo@example.com'
def test_success():
store = {}
def do_callback(message):
assert message['To'] == 'user@example.com'
url = message.get_payload().strip()
args = parse_args(url)
assert url.startswith('http://example/cb/')
result = handler.check_callback(url, parse_args(url), {})
LOGGER.info('check_callback(%s,%s): %s', url, args, result)
assert isinstance(result, disposition.Verified)
store['result'] = result
store['is_done'] = result.identity
handler = email_addr.EmailAddress(do_callback, 'some data', tokens.DictStore(store),
email_template_text='{url}')
result = handler.initiate_auth('mailto:user@example.com', 'http://example/cb/', '/redir')
LOGGER.info('initiate_auth: %s', result)
assert isinstance(result, disposition.Notify)
assert result.cdata == 'some data'
assert store['result'].identity == 'mailto:user@example.com'
assert store['result'].redir == '/redir'
def test_failures(mocker):
store = {}
pending = {}
def accept(message):
url = message.get_payload().strip()
pending[message['To']] = url
handler = email_addr.EmailAddress(accept,
'some data', tokens.DictStore(store),
10,
email_template_text='{url}')
# must be well-formed mailto: URL
for malformed in ('foo@bar.baz', 'http://foo.bar/', 'mailto:blahblahblah'):
assert 'Malformed' in str(handler.initiate_auth(malformed,
'http://example.cb/',
'/malformed'))
# check for missing or invalid tokens
assert 'Missing token' in str(handler.check_callback('foo', {}, {}))
assert 'Invalid token' in str(handler.check_callback('foo', {'t': 'bogus'}, {}))
def initiate(addr, redir):
result = handler.initiate_auth('mailto:' + addr, 'http://example/', redir)
assert isinstance(result, disposition.Notify)
assert result.cdata == 'some data'
def check_pending(addr):
url = pending[addr]
return handler.check_callback(url, parse_args(url), {})
# check for timeout failure
mock_time = mocker.patch('time.time')
mock_time.return_value = 30
assert len(store) == 0
initiate('timeout@example.com', '/timeout')
assert len(store) == 1
mock_time.return_value = 20000
result = check_pending('timeout@example.com')
assert isinstance(result, disposition.Error)
assert 'timed out' in result.message
assert result.redir == '/timeout'
assert len(store) == 0
# check for replay attacks
assert len(store) == 0
initiate('replay@example.com', '/replay')
assert len(store) == 1
result1 = check_pending('replay@example.com')
result2 = check_pending('replay@example.com')
assert len(store) == 0
assert isinstance(result1, disposition.Verified)
assert result1.identity == 'mailto:replay@example.com'
assert result1.redir == '/replay'
assert isinstance(result2, disposition.Error)
assert 'Invalid token' in str(result2)
def test_connector(mocker):
import ssl
mock_smtp_ssl = mocker.patch('smtplib.SMTP_SSL')
mock_ssl = mocker.patch('ssl.SSLContext')
conn = mocker.MagicMock()
mock_smtp_ssl.return_value = conn
connector = email_addr.smtplib_connector('localhost', 25,
'test', 'poiufojar',
use_ssl=True)
connector()
mock_smtp_ssl.assert_called_with('localhost', 25)
mock_ssl.assert_called_with(ssl.PROTOCOL_TLS_CLIENT)
conn.ehlo.assert_called()
conn.starttls.assert_called()
conn.login.assert_called_with('test', 'poiufojar')
def test_simple_sendmail(mocker):
connector = mocker.MagicMock(name='connector')
import email
message = email.message.EmailMessage()
message['To'] = 'recipient@bob.example'
message.set_payload('test body')
sender = email_addr.simple_sendmail(connector, 'sender@bob.example', 'test subject')
sender(message)
connector.assert_called_once()
with connector() as conn:
conn.sendmail.assert_called_with('sender@bob.example',
'recipient@bob.example',
str(message))
assert message['From'] == 'sender@bob.example'
assert message['Subject'] == 'test subject'
def test_from_config(mocker):
store = {}
mock_open = mocker.patch('builtins.open', mocker.mock_open(read_data='template'))
mock_smtp = mocker.patch('smtplib.SMTP')
conn = mocker.MagicMock()
mock_smtp.return_value = conn
handler = email_addr.from_config({
'EMAIL_FROM': 'sender@example.com',
'EMAIL_SUBJECT': 'test subject',
'EMAIL_CHECK_MESSAGE': 'check yr email',
'EMAIL_TEMPLATE_FILE': 'template.txt',
'EMAIL_EXPIRE_TIME': 37,
'SMTP_HOST': 'smtp.example.com',
'SMTP_PORT': 587,
'SMTP_USE_SSL': False,
}, tokens.DictStore(store))
mock_open.assert_called_with('template.txt', encoding='utf-8')
res = handler.initiate_auth('mailto:alice@bob.example', 'http://cb/', '/redir')
assert res.cdata['message'] == 'check yr email'
assert len(store) == 1
mock_smtp.assert_called_with('smtp.example.com', 587)
def test_please_wait(mocker):
token_store = tokens.DictStore()
pending = {}
mock_send = mocker.MagicMock()
handler = email_addr.EmailAddress(mock_send, "this is data", token_store,
expires_time=60,
pending_storage=pending)
mock_time = mocker.patch('time.time')
assert mock_send.call_count == 0
mock_time.return_value = 10
# First auth should call mock_send
handler.initiate_auth('mailto:foo@bar.com', 'http://example/', 'blop')
assert mock_send.call_count == 1
assert 'foo@bar.com' in pending
token_value = pending['foo@bar.com']
# Second auth should not
handler.initiate_auth('mailto:foo@bar.com', 'http://example/', 'blop')
assert mock_send.call_count == 1
assert 'foo@bar.com' in pending
assert token_value == pending['foo@bar.com']
# Using the link should remove the pending item
handler.check_callback('http://example/', {'t': pending['foo@bar.com']}, {})
assert 'foo@bar.com' not in pending
# Next auth should call mock_send again
handler.initiate_auth('mailto:foo@bar.com', 'http://example/', 'blop')
assert mock_send.call_count == 2
assert 'foo@bar.com' in pending
assert token_value != pending['foo@bar.com']
token_value = pending['foo@bar.com']
# Timing out the token should cause it to send again
mock_time.return_value = 1000
handler.initiate_auth('mailto:foo@bar.com', 'http://example/', 'blop')
assert mock_send.call_count == 3
assert 'foo@bar.com' in pending
assert token_value != pending['foo@bar.com']
token_value = pending['foo@bar.com']
# And anything else that removes the token from the token_store should as well
token_store.remove(pending['foo@bar.com'])
handler.initiate_auth('mailto:foo@bar.com', 'http://example/', 'blop')
assert mock_send.call_count == 4
assert token_value != pending['foo@bar.com']
token_value = pending['foo@bar.com']
| [
"fluffy@beesbuzz.biz"
] | fluffy@beesbuzz.biz |
84a54d88f7d38d8c85b43397ef42d1dd097a06d5 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/15 Threading and Concurrency/MultiProcessing/05-sharing-state-using-pipes.py | b1e3550193886be92e3125e26cf78b7aef3064e4 | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | ############################################################
#
# sharing state between processes
#
############################################################
import multiprocessing as mp
N = 20
def fn(connection, results, N):
for n in range(N):
results.append(n*n)
connection.send(results)
if __name__ == '__main__':
pipe_parent, pipe_child = mp.Pipe()
results = []
p = mp.Process(target=fn, args=(pipe_child, results, N))
p.start()
reply = pipe_parent.recv()
p.join()
print(reply[:])
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
893fab1cf48b44a1a717f23ed3e257b60fdaec80 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /McZF4JRhPus5DtRA4_8.py | 5cf5c4df707900be969ed7763295e7061f6a9041 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | """
Transcribe the given DNA strand into corresponding mRNA - a type of RNA, that
will be formed from it after transcription. DNA has the bases A, T, G and C,
while RNA converts to U, A, C and G respectively.
### Examples
dna_to_rna("ATTAGCGCGATATACGCGTAC") ➞ "UAAUCGCGCUAUAUGCGCAUG"
dna_to_rna("CGATATA") ➞ "GCUAUAU"
dna_to_rna("GTCATACGACGTA") ➞ "CAGUAUGCUGCAU"
### Notes
* Transcription is the process of making complementary strand.
* A, T, G and C in DNA converts to U, A, C and G respectively, when in mRNA.
"""
def dna_to_rna(dna):
dic={
"A":"U", "T":"A", "G":"C", "C":"G"
}
s=""
for i in dna:
if i in dic:
s+=dic[i]
return s
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b10689338c8fe9bae2a8bab3464158b39483a4e6 | f0181afd2eea9b086ce9487fb8d7fd949282140a | /ncbi/product_protein_seq.py | 5202a7a52dbb2c185fac59cd4b6857e3623f29ca | [
"MIT"
] | permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 1,140 | py | """
Extract the locus information, gene product, and translation from a genbank file
"""
import os
import sys
import argparse
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Extract the locus information, gene product, and translation from a genbank file")
parser.add_argument('-f', help='genbank file', required=True)
args = parser.parse_args()
for seq in SeqIO.parse(args.f, 'genbank'):
for feature in seq.features:
pi = 'None'
if 'protein_id' in feature.qualifiers:
pi = feature.qualifiers['protein_id'][0]
gs = "None"
if 'gene' in feature.qualifiers:
gs = feature.qualifiers['gene'][0]
pd = 'None'
if 'product' in feature.qualifiers:
pd = feature.qualifiers['product'][0]
tl = "None"
if 'translation' in feature.qualifiers:
tl = feature.qualifiers['translation'][0]
if 'gpA' in gs or 'gpA' in pd:
print("\t".join([seq.id, seq.annotations['organism'], pi, gs, pd, tl]))
| [
"raedwards@gmail.com"
] | raedwards@gmail.com |
7cfb52ce8dde90c5c4dfd516b5e220b99066792d | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /zuora_client/models/credit_memo_unapply_invoice_item_request_type.py | 9f2fe81059b5e3942742a9926bab5a9211ac28b2 | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 47,165 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreditMemoUnapplyInvoiceItemRequestType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'float',
'credit_memo_item_id': 'str',
'credit_tax_item_id': 'str',
'invoice_item_id': 'str',
'tax_item_id': 'str'
}
attribute_map = {
'amount': 'amount',
'credit_memo_item_id': 'creditMemoItemId',
'credit_tax_item_id': 'creditTaxItemId',
'invoice_item_id': 'invoiceItemId',
'tax_item_id': 'taxItemId'
}
def __init__(self, amount=None, credit_memo_item_id=None, credit_tax_item_id=None, invoice_item_id=None, tax_item_id=None): # noqa: E501
"""CreditMemoUnapplyInvoiceItemRequestType - a model defined in Swagger""" # noqa: E501
self._amount = None
self._credit_memo_item_id = None
self._credit_tax_item_id = None
self._invoice_item_id = None
self._tax_item_id = None
self.discriminator = None
self.amount = amount
if credit_memo_item_id is not None:
self.credit_memo_item_id = credit_memo_item_id
if credit_tax_item_id is not None:
self.credit_tax_item_id = credit_tax_item_id
if invoice_item_id is not None:
self.invoice_item_id = invoice_item_id
if tax_item_id is not None:
self.tax_item_id = tax_item_id
@property
def amount(self):
"""Gets the amount of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
The amount that is unapplied from the specific item. # noqa: E501
:return: The amount of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this CreditMemoUnapplyInvoiceItemRequestType.
The amount that is unapplied from the specific item. # noqa: E501
:param amount: The amount of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:type: float
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def credit_memo_item_id(self):
"""Gets the credit_memo_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
The ID of the credit memo item. # noqa: E501
:return: The credit_memo_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:rtype: str
"""
return self._credit_memo_item_id
@credit_memo_item_id.setter
def credit_memo_item_id(self, credit_memo_item_id):
"""Sets the credit_memo_item_id of this CreditMemoUnapplyInvoiceItemRequestType.
The ID of the credit memo item. # noqa: E501
:param credit_memo_item_id: The credit_memo_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:type: str
"""
self._credit_memo_item_id = credit_memo_item_id
@property
def credit_tax_item_id(self):
"""Gets the credit_tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
The ID of the credit memo taxation item. # noqa: E501
:return: The credit_tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:rtype: str
"""
return self._credit_tax_item_id
@credit_tax_item_id.setter
def credit_tax_item_id(self, credit_tax_item_id):
"""Sets the credit_tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType.
The ID of the credit memo taxation item. # noqa: E501
:param credit_tax_item_id: The credit_tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:type: str
"""
self._credit_tax_item_id = credit_tax_item_id
@property
def invoice_item_id(self):
"""Gets the invoice_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
The ID of the invoice item that the credit memo item is unapplied from. # noqa: E501
:return: The invoice_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:rtype: str
"""
return self._invoice_item_id
@invoice_item_id.setter
def invoice_item_id(self, invoice_item_id):
"""Sets the invoice_item_id of this CreditMemoUnapplyInvoiceItemRequestType.
The ID of the invoice item that the credit memo item is unapplied from. # noqa: E501
:param invoice_item_id: The invoice_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:type: str
"""
self._invoice_item_id = invoice_item_id
@property
def tax_item_id(self):
"""Gets the tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
The ID of the invoice taxation item that the credit memo taxation item is unapplied from. # noqa: E501
:return: The tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:rtype: str
"""
return self._tax_item_id
@tax_item_id.setter
def tax_item_id(self, tax_item_id):
"""Sets the tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType.
The ID of the invoice taxation item that the credit memo taxation item is unapplied from. # noqa: E501
:param tax_item_id: The tax_item_id of this CreditMemoUnapplyInvoiceItemRequestType. # noqa: E501
:type: str
"""
self._tax_item_id = tax_item_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CreditMemoUnapplyInvoiceItemRequestType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreditMemoUnapplyInvoiceItemRequestType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"brian.lucas@optimizely.com"
] | brian.lucas@optimizely.com |
ac0f42eac7171b440802678af175d2cde73c0016 | 328afd873e3e4fe213c0fb4ce6621cb1a450f33d | /W3School/SearchandSorting/4.py | 725a50b72b117040dbec1999d45747df6ffd7017 | [] | no_license | TorpidCoder/Python | 810371d1bf33c137c025344b8d736044bea0e9f5 | 9c46e1de1a2926e872eee570e6d49f07dd533956 | refs/heads/master | 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | def bubble(arr):
for i in range(0,len(arr)):
for j in range(0,len(arr)-i-1):
if(arr[j]>arr[j+1]):
arr[j],arr[j+1] = arr[j+1],arr[j]
return arr
arr = [14,46,43,27,57,41,45,21,70]
print(bubble(arr))
| [
"sahilexemplary@gmail.com"
] | sahilexemplary@gmail.com |
5dda6f74cdb6d17620d1222030e998748d74087e | 3b2ead608d71da663af69f8260d9b05312a10571 | /Lesson_3/lesson3_6.py | eb119d846411f5370e7efa10885444adf3aa5d1e | [] | no_license | ddobik/Introduction-Python | 938f5c7dff0d51d0fe33c4ee1484747b27203805 | 1025e2c320671908595f6fccc16990756af9b6c4 | refs/heads/main | 2023-07-20T17:34:09.910739 | 2021-08-19T14:38:37 | 2021-08-19T14:38:37 | 376,647,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | x1 = int(input('Enter x1:'))
y1 = int(input('Enter y1:'))
x2 = int(input('Enter x2:'))
y2 = int(input('Enter y2:'))
if abs(x1 - x2) == 1 and abs(y1 - y2) == 2:
print('Yes')
elif abs(x1 - x2) == 2 and abs(y1 - y2) == 1:
print('Yes')
else:
print('No')
| [
"you@example.com"
] | you@example.com |
2047adcb0b6e70a58b42333cf77af87aa957ccad | ec4707af55a84914b9304d2a7ec2d89e79219b03 | /backend/home/migrations/0001_load_initial_data.py | c02063f882fbc83fcc8371b54075d30fa27d1502 | [] | no_license | crowdbotics-apps/billsengine-31836 | a8565f4ae4611d07d3254c48fc699b35ed2bb21c | f67d69303f16f3612b32f66f139e93d3a382effb | refs/heads/master | 2023-08-31T22:45:55.425225 | 2021-11-05T09:24:59 | 2021-11-05T09:24:59 | 424,891,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "billsengine-31836.botics.co"
site_params = {
"name": "billsengine",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b7de0ef299592d09e5807e075a0b09fca0ee41f7 | e342c4daf69dcacc34431e731e69b7171fc85bf1 | /core/views.py | 5c367a033eb5f645a23e3791a52ea8beaa34b238 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | nixoz/babybuddy | 5b2b4939af8ec4e8314ee297a1f653d121f1143d | 7b6c9fb3f342b1f5fbabd0b4360ee95925b67047 | refs/heads/master | 2021-07-24T05:33:54.038678 | 2017-11-02T10:39:32 | 2017-11-02T10:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,349 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse
from django.views.generic.base import RedirectView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django_filters.views import FilterView
from .models import Child, DiaperChange, Feeding, Note, Sleep, Timer, TummyTime
from .forms import (ChildForm, ChildDeleteForm, DiaperChangeForm, FeedingForm,
SleepForm, TimerForm, TummyTimeForm)
class ChildList(PermissionRequiredMixin, FilterView):
model = Child
template_name = 'core/child_list.html'
permission_required = ('core.view_child',)
paginate_by = 10
class ChildAdd(PermissionRequiredMixin, CreateView):
model = Child
permission_required = ('core.add_child',)
form_class = ChildForm
success_url = '/children'
class ChildDetail(PermissionRequiredMixin, DetailView):
model = Child
permission_required = ('core.view_child',)
class ChildUpdate(PermissionRequiredMixin, UpdateView):
model = Child
permission_required = ('core.change_child',)
form_class = ChildForm
success_url = '/children'
class ChildDelete(PermissionRequiredMixin, UpdateView):
model = Child
form_class = ChildDeleteForm
template_name = 'core/child_confirm_delete.html'
permission_required = ('core.delete_child',)
success_url = '/children'
class DiaperChangeList(PermissionRequiredMixin, FilterView):
model = DiaperChange
template_name = 'core/diaperchange_list.html'
permission_required = ('core.view_diaperchange',)
paginate_by = 10
class DiaperChangeAdd(PermissionRequiredMixin, CreateView):
model = DiaperChange
permission_required = ('core.add_diaperchange',)
form_class = DiaperChangeForm
success_url = '/changes'
class DiaperChangeUpdate(PermissionRequiredMixin, UpdateView):
model = DiaperChange
permission_required = ('core.change_diaperchange',)
form_class = DiaperChangeForm
success_url = '/changes'
class DiaperChangeDelete(PermissionRequiredMixin, DeleteView):
model = DiaperChange
permission_required = ('core.delete_diaperchange',)
success_url = '/changes'
class FeedingList(PermissionRequiredMixin, FilterView):
model = Feeding
template_name = 'core/feeding_list.html'
permission_required = ('core.view_feeding',)
paginate_by = 10
class FeedingAdd(PermissionRequiredMixin, CreateView):
model = Feeding
permission_required = ('core.add_feeding',)
form_class = FeedingForm
success_url = '/feedings'
def get_form_kwargs(self):
kwargs = super(FeedingAdd, self).get_form_kwargs()
# Add timer to be used by FeedingForm.__init__
kwargs.update({'timer': self.request.GET.get('timer', None)})
return kwargs
class FeedingUpdate(PermissionRequiredMixin, UpdateView):
model = Feeding
permission_required = ('core.change_feeding',)
form_class = FeedingForm
success_url = '/feedings'
class FeedingDelete(PermissionRequiredMixin, DeleteView):
model = Feeding
permission_required = ('core.delete_feeding',)
success_url = '/feedings'
class NoteList(PermissionRequiredMixin, FilterView):
model = Note
template_name = 'core/note_list.html'
permission_required = ('core.view_note',)
paginate_by = 10
class NoteAdd(PermissionRequiredMixin, CreateView):
model = Note
permission_required = ('core.add_note',)
fields = ['child', 'note']
success_url = '/notes'
class NoteUpdate(PermissionRequiredMixin, UpdateView):
model = Note
permission_required = ('core.change_note',)
fields = ['child', 'note']
success_url = '/notes'
class NoteDelete(PermissionRequiredMixin, DeleteView):
model = Note
permission_required = ('core.delete_note',)
success_url = '/notes'
class SleepList(PermissionRequiredMixin, FilterView):
model = Sleep
template_name = 'core/sleep_list.html'
permission_required = ('core.view_sleep',)
paginate_by = 10
class SleepAdd(PermissionRequiredMixin, CreateView):
model = Sleep
permission_required = ('core.add_sleep',)
form_class = SleepForm
success_url = '/sleep'
def get_form_kwargs(self):
kwargs = super(SleepAdd, self).get_form_kwargs()
# Add timer to be used by SleepForm.__init__
kwargs.update({'timer': self.request.GET.get('timer', None)})
return kwargs
class SleepUpdate(PermissionRequiredMixin, UpdateView):
model = Sleep
permission_required = ('core.change_sleep',)
form_class = SleepForm
success_url = '/sleep'
class SleepDelete(PermissionRequiredMixin, DeleteView):
model = Sleep
permission_required = ('core.delete_sleep',)
success_url = '/sleep'
class TimerList(PermissionRequiredMixin, FilterView):
model = Timer
template_name = 'core/timer_list.html'
permission_required = ('core.view_timer',)
paginate_by = 10
class TimerDetail(PermissionRequiredMixin, DetailView):
model = Timer
permission_required = ('core.view_timer',)
class TimerAdd(PermissionRequiredMixin, CreateView):
model = Timer
permission_required = ('core.add_timer',)
form_class = TimerForm
success_url = '/timers'
def get_form_kwargs(self):
kwargs = super(TimerAdd, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
class TimerUpdate(PermissionRequiredMixin, UpdateView):
model = Timer
permission_required = ('core.change_timer',)
form_class = TimerForm
success_url = '/timers'
def get_form_kwargs(self):
kwargs = super(TimerUpdate, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
def get_success_url(self):
instance = self.get_object()
return '/timer/{}/'.format(instance.id)
class TimerAddQuick(PermissionRequiredMixin, RedirectView):
permission_required = ('core.add_timer',)
def get(self, request, *args, **kwargs):
instance = Timer.objects.create(user=request.user)
instance.save()
self.url = request.GET.get(
'next', reverse('timer-detail', args={instance.id}))
return super(TimerAddQuick, self).get(request, *args, **kwargs)
class TimerRestart(PermissionRequiredMixin, RedirectView):
permission_required = ('core.change_timer',)
def get(self, request, *args, **kwargs):
instance = Timer.objects.get(id=kwargs['pk'])
instance.restart()
return super(TimerRestart, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
return '/timer/{}'.format(kwargs['pk'])
class TimerStop(PermissionRequiredMixin, RedirectView):
permission_required = ('core.change_timer',)
def get(self, request, *args, **kwargs):
instance = Timer.objects.get(id=kwargs['pk'])
instance.stop()
return super(TimerStop, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
return '/timer/{}'.format(kwargs['pk'])
class TimerDelete(PermissionRequiredMixin, DeleteView):
model = Timer
permission_required = ('core.delete_timer',)
success_url = '/'
class TummyTimeList(PermissionRequiredMixin, FilterView):
model = TummyTime
template_name = 'core/tummytime_list.html'
permission_required = ('core.view_tummytime',)
paginate_by = 10
class TummyTimeAdd(PermissionRequiredMixin, CreateView):
model = TummyTime
permission_required = ('core.add_tummytime',)
form_class = TummyTimeForm
success_url = '/tummy-time'
def get_form_kwargs(self):
kwargs = super(TummyTimeAdd, self).get_form_kwargs()
# Add timer to be used by TummyTimeForm.__init__
kwargs.update({'timer': self.request.GET.get('timer', None)})
return kwargs
class TummyTimeUpdate(PermissionRequiredMixin, UpdateView):
model = TummyTime
permission_required = ('core.change_tummytime',)
form_class = TummyTimeForm
success_url = '/tummy-time'
class TummyTimeDelete(PermissionRequiredMixin, DeleteView):
model = TummyTime
permission_required = ('core.delete_tummytime',)
success_url = '/tummy-time'
| [
"chris@chrxs.net"
] | chris@chrxs.net |
b5fec5082d173d47411898924befc9beb396fd8c | 49d8827695d1fa9076467fd1ec2e26afcf56e2dc | /comparser.py | ddf4df8146946d26150c96b895fa8c93ac27fd9b | [] | no_license | mhearne-usgs/comcatloader | 8eb8c2214915b13403b35f5b6a991ce9fb67090c | 35f00d33356f72484ffc5be1bdd7a9c2e7a3c090 | refs/heads/master | 2020-04-01T16:46:47.101082 | 2016-02-05T15:54:06 | 2016-02-05T15:54:06 | 9,404,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,113 | py | #!/usr/bin/env python
#stdlib imports
import sys
import os.path
import datetime
import optparse
import importlib
from xml.dom import minidom
#local imports
import comquakeml
TIMEFMT = '%Y-%m-%d %H:%M:%S'
DEFAULT_START = datetime.datetime(1000,1,1)
DEFAULT_END = datetime.datetime(3000,1,1)
def getEventTime(xmlfile):
root = minidom.parse(xmlfile)
origin = root.getElementsByTagName('origin')[0] #we don't care which origin
timestr = origin.getElementsByTagName('time')[0].getElementsByTagName('value')[0].firstChild.data
#2012-09-04T06:55:01Z
time = datetime.datetime.strptime(timestr[0:19],'%Y-%m-%dT%H:%M:%S')
root.unlink()
return time
def getSummary(event,origins,oidx):
mag = event['magnitude'][0]['mag']
fmt = '%s M%.1f (%.4f,%.4f) %.1f km'
tpl = (event['time'].strftime('%Y-%m-%d %H:%M:%S'),mag,event['lat'],event['lon'],event['depth'])
eventdesc = fmt % tpl
summary = ''
if not len(origins):
summary = 'No ComCat origins were associated with event %s' % eventdesc
return summary
if oidx > -1:
summary += 'Event %s was associated with event %i:\n' % (eventdesc,oidx+1)
i = 1
for o in origins:
if o['mag'] is None:
o['mag'] = 0.0
fmt = '\t%i) %s M%.1f (%.4f,%.4f) %.1f km (%.1f seconds, %.1f km distance)\n'
if o.has_key('triggerlat'):
tpl = (i,o['triggertime'].strftime('%Y-%m-%d %H:%M:%S'),o['mag'],o['triggerlat'],o['triggerlon'],
o['triggerdepth'],o['timedelta'],o['distance'])
else:
tpl = (i,o['time'].strftime('%Y-%m-%d %H:%M:%S'),o['mag'],o['lat'],o['lon'],o['depth'],
o['timedelta'],o['distance'])
summary += fmt % tpl
i += 1
return summary
if oidx == -1:
summary += 'Event %s was associated with NONE of the following events:\n' % (eventdesc)
i = 1
for o in origins:
fmt = '\t%i) %s M%.1f (%.4f,%.4f) %.1f km (%.1f seconds, %.1f km distance)'
if o.has_key('triggerlat'):
tpl = (i,o['triggertime'].strftime('%Y-%m-%d %H:%M:%S'),o['mag'],o['triggerlat'],o['triggerlon'],
o['triggerdepth'],o['timedelta'],o['distance'])
else:
tpl = (i,o['time'].strftime('%Y-%m-%d %H:%M:%S'),o['mag'],o['lat'],o['lon'],o['depth'],
o['timedelta'],o['distance'])
summary += fmt % tpl
i += 1
return summary
def processEvent(quake,event,origins,events,numevents,ievent):
filename = None
norg = len(origins)
nevents = len(events)
mag = event['magnitude'][0]['mag']
eventdesc = '%s: %s M%.1f (%.4f,%.4f)' % (event['id'],str(event['time']),mag,event['lat'],event['lon'])
ofmt = '\t%i) %s M%.1f (%.4f,%.4f) %.1f km - %.1f km distance, %i seconds - %s'
oidx = -1
if norg == 1:
filename = quake.renderXML(event,origins[0])
print 'Writing event %s to file (%i of %i).' % (eventdesc,ievent,numevents)
if norg == 0:
filename = quake.renderXML(event)
print 'Rendering origin %s to XML' % eventdesc
if norg > 1:
fmt = 'Event %s M%.1f (%.4f,%.4f) %.1f km has %i possible associations:'
tpl = (event['time'],mag,event['lat'],event['lon'],event['depth']/1000,norg)
print
print fmt % tpl
ic = 0
for origin in origins:
time = origin['time']
mag = origin['mag']
if mag is None:
mag = 0.0
lat = origin['lat']
lon = origin['lon']
depth = origin['depth']
timedelta = origin['timedelta']
distance = origin['distance']
tpl = (ic,time,mag,lat,lon,depth,distance,timedelta,origin['id'])
try:
print ofmt % tpl
except:
pass
ic += 1
print '\t%i) None of the above (do not associate)' % (ic)
nresp = 0
maxresp = 3
ischar = True
oidx = -1
while oidx < 0 and nresp <= maxresp:
resp = raw_input('Choose one of the options above: ')
try:
oidx = int(resp)
except:
pass
nresp += 1
if oidx >= 0:
if oidx < ic:
filename = quake.renderXML(event,origins[oidx])
output_origin = origins[oidx].copy()
else:
print 'Not associating event, as requested.'
else:
print "You obviously can't read. Moving on."
if (oidx < 0 and norg > 1) or filename is None:
x = 1
return (filename,oidx)
#this should be a generator
def getEvents():
#yield event
pass
def main(options,args):
if options.trumpWeight is not None:
try:
int(options.trumpWeight)
except ValueError:
print 'Trump weight must be an integer value.'
sys.exit(1)
modulefile = args[0]
if not os.path.isfile(modulefile):
print 'Module file %s does not exist!'
sys.exit(1)
module = None
mpath,mfile = os.path.split(modulefile)
modname,modext = os.path.splitext(mfile)
try:
module = importlib.import_module(modname)
except ImportError:
print '%s does not appear to be a valid Python module.' % modname
sys.exit(1)
if not module.__dict__.has_key('getEvents'):
print '%s does not appear to have the required function getEvents().'
sys.exit(1)
twindow = 16
dwindow = 100
if options.timewindow is not None:
twindow = int(options.timewindow)
if options.distance is not None:
dwindow = float(options.distance)
catalog = 'us'
agency = ''
if options.agency is not None:
agency = options.agency
contributor = comquakeml.DEFAULT_SOURCE
triggersource = None
method = None
ptype = 'origin'
startdate = DEFAULT_START
enddate = DEFAULT_END
folder = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
if options.catalog is not None:
catalog = options.catalog
if options.contributor is not None:
contributor = options.contributor
if options.method is not None:
method = options.method
if options.folder is not None:
folder = options.folder
if options.triggersource is not None:
triggersource = options.triggersource
if options.beginDate is not None:
try:
startdate = datetime.datetime.strptime(options.beginDate,'%Y%m%d')
except:
print 'Could not parse start date "%s"' % options.beginDate
sys.exit(1)
if options.endDate is not None:
try:
enddate = datetime.datetime.strptime(options.endDate,'%Y%m%d')
except:
print 'Could not parse end date "%s"' % options.endDate
sys.exit(1)
if options.producttype is not None:
types = [comquakeml.ORIGIN,comquakeml.FOCAL,comquakeml.TENSOR]
ptype = options.producttype
if ptype not in types:
print '%s not in %s. Exiting.' % (ptype,','.join(types))
sys.exit(1)
quake = comquakeml.QuakeML(ptype,folder,catalog=catalog,agency=agency,
triggersource=triggersource,contributor=contributor,
method=method,timewindow=twindow,distwindow=dwindow)
if options.clear:
resp = raw_input('You set the option to clear all existing QuakeML output. Are you sure? Y/[n]')
if resp.strip().lower() == 'y':
quake.clearOutput()
else:
print 'Not clearing QuakeML output.'
#parse the input data from file, database, webserver, whatever
earliest = datetime.datetime(3000,1,1)
latest = datetime.datetime(1,1,1)
xmlfiles = []
if options.delete:
numdeleted = 0
for event in module.getEvents(args[1:],startDate=startdate,endDate=enddate):
quake.delete(event)
numdeleted += 1
print '%i events were deleted. Exiting.' % numdeleted
sys.exit(0)
numevents = 0
#the module getEvents() function doesn't have to do anything with the startDate and endDate parameters
for event in module.getEvents(args[1:],startDate=startdate,endDate=enddate):
if event['time'] < earliest:
earliest = event['time']
if event['time'] > latest:
latest = event['time']
xmlfile = os.path.join(quake.xmlfolder,'%s.xml' % event['id'])
if os.path.isfile(xmlfile):
xmlfiles.append(xmlfile)
continue
if event['time'] > datetime.datetime(2007,9,30):
pass
quake.add(event)
sys.stderr.write('Parsing event %s\n' % event['time'])
numevents += 1
numnear = len(quake.NearEventIndices)
numprocessed = 0
summary = [] #list of events that were not associated, or were associated manually
for event,origins,events in quake.generateEvents():
xmlfile,oidx = processEvent(quake,event,origins,events,numevents,numprocessed)
if xmlfile is None:
x = 1
if len(origins) != 1 and options.producttype != 'origin':
summary.append(getSummary(event,origins,oidx))
xmlfiles.append(xmlfile)
numprocessed += 1
if options.load:
for xmlfile in xmlfiles:
if xmlfile is None:
continue
etime = getEventTime(xmlfile)
nelapsed = (datetime.datetime.utcnow() - etime).days
res,output,errors = quake.push(xmlfile,options.trumpWeight,nelapsed=nelapsed)
p,fname = os.path.split(xmlfile)
if not res:
print 'Failed to send quakeML file %s. Output: "%s" Error: "%s"' % (fname,output,errors)
else:
print 'Sent quakeML file %s, output %s.' % (fname,output)
if not len(summary):
sys.exit(0)
DAYFMT = '%Y-%m-%d'
print
print 'Summary for period %s to %s:' % (earliest.strftime(DAYFMT),latest.strftime(DAYFMT))
for eventinfo in summary:
print eventinfo
print
if __name__ == '__main__':
types = [comquakeml.ORIGIN,comquakeml.FOCAL,comquakeml.TENSOR]
usage = '''usage: %prog [options] modulefile arg2 ... argN'''
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--agency", dest="agency",
help="Set the agency ID", metavar="AGENCY")
parser.add_option("-t", "--timewindow", dest="timewindow",
help="change to TIME timewindow from 16 sec default", metavar="TIME")
parser.add_option("-d", "--distance", dest="distance",
help="change to DISTANCE search radius from 100 km default", metavar="DISTANCE")
parser.add_option("-g", "--catalog", dest="catalog",
help="Set the catalog name", metavar="CATALOG")
parser.add_option("-o", "--contributor", dest="contributor",
help="Set the contributor for this data", metavar="CONTRIBUTOR")
parser.add_option("-u", "--trumpweight", dest="trumpWeight",
help="Set the trump weight for this catalog", metavar="TRUMPWEIGHT")
parser.add_option("-r", "--triggersource", dest="triggersource",
help="Set the trigger source for this data (what catalog should this data associate with)", metavar="TRIGGERSOURCE")
parser.add_option("-m", "--method", dest="method",
help="Set the method used to determine catalog (Mww, Mwc, etc.)", metavar="METHOD")
parser.add_option("-l", "--load", dest="load",default=False,action="store_true",
help="Load catalog of created XML into ComCat")
parser.add_option("-f", "--folder", dest="folder",
help="""Set folder for output QuakeML, appended to config output folder.
Defaults to current date/time""", metavar="FOLDER")
parser.add_option("-b", "--beginDate", dest="beginDate",
help="""Specify starting date for loading from input catalog
(YYYYMMDD) (defaults to 18000101)""",metavar="BEGINDATE")
parser.add_option("-e", "--endDate", dest="endDate",
help="""Specify ending date for loading from input catalog
(YYYYMMDD) (defaults to 30000101)""",metavar="ENDDATE")
parser.add_option("-p", "--producttype", dest="producttype",
help="Define type of product (one of %s) (default to %s)" % (','.join(types),comquakeml.ORIGIN),
metavar="PRODUCTTYPE")
parser.add_option("-c", "--clear",
action="store_true", dest="clear", default=False,
help="Clear XML output")
parser.add_option("-x", "--delete",
action="store_true", dest="delete", default=False,
help="Delete specified products")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(0)
main(options,args)
| [
"mhearne@usgs.gov"
] | mhearne@usgs.gov |
c10eb3fcc7871f0054c20e215d8aadf633ad154e | 7a10bf8748c7ce9c24c5461c21b5ebf420f18109 | /ml_training/PythonCode/P7_Descriptive+Statistics+in+Python.py | 0b7d4e91d52fa214f3d26c9a09981be833cfffa8 | [] | no_license | VishalChak/machine_learning | aced4b4bf65bbbd08c966a2f028f217a918186d5 | c6e29abe0509a43713f35ebf53da29cd1f0314c1 | refs/heads/master | 2021-06-15T07:13:56.583097 | 2019-10-05T06:01:58 | 2019-10-05T06:01:58 | 133,164,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py |
# coding: utf-8
# View first 20 rows
import pandas
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = pandas.read_csv(url, names=names)
peek = data.head()
print(peek)
# Dimensions of your data
shape = data.shape
print(shape)
# Data Types for Each Attribute
types = data.dtypes
print(types)
# Statistical Summary
pandas.set_option('display.width', 100)
pandas.set_option('precision', 3)
description = data.describe()
print(description)
# Class Distribution
class_counts = data.groupby('class').size()
print(class_counts)
# Pairwise Pearson correlations
correlations = data.corr(method='pearson')
print(correlations)
# Skew for each attribute
skew = data.skew()
print(skew)
# The skew result show a positive (right) or negative (left) skew. Values closer to zero show less skew.
| [
"vishalbabu.in@gmail.com"
] | vishalbabu.in@gmail.com |
1a0250d644e2af667ae09c2e86f0e6739e15ac4e | a637b5db5cc2ead56d9da84ca4e2d2282b393a5c | /config/settings/common.py | 25ad754154d24f00b373fd82ed9b6cf72fd6abcf | [
"MIT"
] | permissive | noisy/SpisTresci | 7d136214877f1ac5592560ca85781a1acdfe0bd9 | 7f871f74a0576d91b62c151ea526af11b3acda2c | refs/heads/master | 2021-01-18T01:20:47.933982 | 2016-04-24T13:02:16 | 2016-04-24T13:02:16 | 56,930,392 | 1 | 0 | null | 2016-06-08T19:11:27 | 2016-04-23T16:55:42 | Python | UTF-8 | Python | false | false | 8,938 | py | # -*- coding: utf-8 -*-
"""
Django settings for SpisTresci project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('spistresci')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'spistresci.users', # custom users app
'spistresci.products',
'spistresci.stores',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'spistresci.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Krzysztof Szumny""", 'krzysztof.szumny@spistresci.pl'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///spistresci"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Warsaw'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
ACCOUNT_ADAPTER = 'spistresci.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'spistresci.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('spistresci.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env("CELERY_BROKER_URL", default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
ST_STORES_CONFIG = env("ST_STORES_CONFIG", default=ROOT_DIR('stores.yml'))
ST_STORES_DATA_DIR = env("ST_STORES_DATA_DIR", default=ROOT_DIR('data'))
| [
"noisy.pl@gmail.com"
] | noisy.pl@gmail.com |
42542463b033ad43ac9316b25e1bad450db6a910 | 8f70ad12af7eba07efa52eb29b8f99ed3900dbb9 | /AGTGA data/AGTGA/LifeHack/LifeHack 1/TestSuite/TestSuite/TestCase01.py | 19b97f56691b365278a3ad089a95877fcf12e31f | [] | no_license | Georgesarkis/AGTGARowData | 768952dc03dc342bcbe0902bf2fb1720853d0e14 | e1faa7dc820b051a73b0844eac545e597a97da16 | refs/heads/master | 2022-10-01T17:06:04.758751 | 2020-06-05T07:25:41 | 2020-06-05T07:25:41 | 267,772,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from TestSuite.TestSuiteHelper import ElementFinder
port = 'http://localhost:4723/wd/hub'
driver = webdriver.Remote(command_executor=port, desired_capabilities={'automationName' : 'UiAutomator2','deviceName': 'Moto G (5)','platformName': 'Android', 'app': 'C:/Users/ze0396/Desktop/AGTGA/APKS/LifeHack.apk' , 'autoGrantPermissions' : 'true', 'appWaitActivity' : '*.*','fullreset' : 'false','noReset' : 'true' } )
time.sleep(2)
time.sleep(2)
el = ElementFinder(driver, 385,208)
el.click()
time.sleep(2)
el = ElementFinder(driver, 822,144)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,1320)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,1046)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,497)
el.click()
driver.press_keycode(3)
driver.close_app()
driver.quit()
print('TestCase finished successfully') | [
"32592901+Georgesarkis@users.noreply.github.com"
] | 32592901+Georgesarkis@users.noreply.github.com |
de0e31075c2459156e28261e945074f0a71f18d5 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_39866.py | 6b0f02744bf0d3584e6abef5175582abde3a3540 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,846 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((650.12, 448.706, 425.976), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((607.214, 500.621, 408.832), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((548.512, 555.598, 392.465), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((524.414, 451.502, 481.851), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((447.42, 708.895, 332.232), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((619.262, 478.164, 407.77), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((619.992, 477.194, 407.434), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((609.798, 451.435, 408.562), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((592.669, 462.651, 388.695), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((572.3, 449.555, 373.812), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((568.678, 428.533, 355.152), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((590.579, 413.606, 345.388), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((645.863, 473.63, 416.975), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((544.566, 359.013, 268.472), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((475.568, 547.541, 245.038), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((475.568, 547.541, 245.038), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((493.447, 533.176, 261.827), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((511.738, 520.475, 279.578), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((531.096, 512.152, 298.631), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((550.592, 509.811, 319.092), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((569.316, 513.201, 340.093), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((590.398, 513.995, 359.473), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((406.402, 411.931, 209.628), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((776.695, 607.993, 513.921), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((587.075, 555.002, 363.635), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((587.075, 555.002, 363.635), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((582.615, 562.062, 391.058), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((570.569, 554.463, 415.918), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((544.818, 542.826, 423.698), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((613.779, 451.324, 471.614), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((465.789, 629.068, 377.48), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((601.983, 486.867, 433.358), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((601.948, 486.824, 433.449), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((574.977, 492.401, 439.763), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((572.968, 468.779, 423.873), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((577.51, 443.056, 411.977), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((592.073, 428.749, 390.979), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((606.836, 434.346, 365.973), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((615.579, 432.769, 337.387), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((650.554, 509.579, 347.944), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((579.756, 354.725, 316.665), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((647.169, 541.662, 380.983), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((626.077, 536.503, 394.449), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((579.305, 524.025, 425.455), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((530.798, 515.075, 455.316), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((582.932, 490.847, 511.889), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((425.753, 521.393, 455.007), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((529.076, 489.016, 503.218), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((546.208, 503.994, 486.015), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((563.4, 513.943, 465.074), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((567.459, 540.448, 453.668), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((566.607, 568.813, 446.166), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((568.419, 592.509, 428.651), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((615.295, 528.041, 417.595), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((524, 661.837, 432.536), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
8890d1e85684f3a78ab85eba661048c7c7206fa6 | 93bf4bbafe0524335ea1216f7f2941348c2cd1bd | /tensorflow/python/ops/math_ops_test.py | 9d126f7542dcf71295f7cf32c8eb18a10d536bde | [
"Apache-2.0"
] | permissive | sachinpro/sachinpro.github.io | c4951734b09588cad58711a76fe657f110163c11 | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | refs/heads/master | 2022-12-23T10:00:13.902459 | 2016-06-27T13:18:27 | 2016-06-27T13:25:58 | 25,289,839 | 1 | 1 | Apache-2.0 | 2022-12-15T00:45:03 | 2014-10-16T06:44:30 | C++ | UTF-8 | Python | false | false | 3,159 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session():
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
x = [0.49, 0.7, -0.3, -0.8]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
y = np.array([-3, -2, -1], dtype=np.int32)
z = (x - y)*(x - y)
with self.test_session():
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
if __name__ == "__main__":
googletest.main()
| [
"x0234443@ti.com"
] | x0234443@ti.com |
83be1591ac8d48a88e57e32741e7a9ba266abfec | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Wyx_w_M_w_Sob_to_Wz_focus/Sob_Wxy/Sob_k05_s001_EroM_Mae_s001/pyr_Tcrop256_p20_j15/pyr_3s/L6/step10_b2_post.py | cd3a09eb1c8607d3719fbaee93dd56be09354fb6 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,742 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
### 所有 指令 統一寫這邊
from step10_c_exp_command import *
######################################################################################################################
import subprocess as sb
# sb.run(cmd_python_step10_a + [f"ch032_1side_1__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_3__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_3__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_3__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_4__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_4__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_4__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_4__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_5__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_5__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_5__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_5__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_5__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_6__2side_6__3side_6.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_1__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_2__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_2__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_3__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_3__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_3__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_4__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_4__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_4__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_4__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_5__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_5__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_5__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_5__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_5__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_6__3side_6.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_1.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_2.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_3.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_4.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_5.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_6.{train}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_7__2side_7__3side_7.{compress_all}"])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
6fa776e5d7257a1496f3e5754db3d168fdb937b0 | d8076e1d19882e4816bcbd7f2b71039007462624 | /Fe_plot_sub.py | 6f436b80946a9b901bce9a849e19272ec985c30d | [] | no_license | kfinn6561/Light_Echoes | e87186c257228f1d191db7dbe7b74e8ddab35e22 | 15f5ed00e26606a68f0544659745c421bb985abb | refs/heads/main | 2023-04-03T16:35:14.992398 | 2021-03-23T10:20:09 | 2021-03-23T10:20:09 | 350,667,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | import numpy as np
from scipy.io.idl import readsav
import matplotlib.pylab as pl
import pylabsetup
import sys
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import readcol
import time
import glob
from matplotlib.backends.backend_pdf import PdfPages
import pickle as pkl
from colour_tools import kelly_colors
sne=['sn1998fa','sn2000H','sn2004ff','sn2006T','sn2006el','sn2008bo','sn2009mg','sn2011fu',
'sn1996cb','sn2008ax','sn2011dh','sn2011ei','sn1993J','sn2003bg']
alphabetical_sne=['sn1998fa','sn2000H','sn2004ff','sn2006T','sn2006el','sn2008bo','sn2009mg','sn2011fu','sn1993J',
'sn1996cb','sn2003bg','sn2008ax','sn2011dh','sn2011ei']
sne_handles={}
data_fol='yuqian_plot_data/'
colours=iter(kelly_colors[6:])
sne_colours={}
def plot_indi(vel_list,vel_dir,xr,yr,save_plot, annot = ''):
# Kieran's color
sn_c = pkl.load(open("sn_colors.pkl",'rb'))
key = sn_c.keys()
fig,ax = pl.subplots(figsize=(7.5,7.5))
pl.xlim(xr[0], xr[1])
pl.ylim(yr[0], yr[1]) # in unit of 1000 km/s
filename=open(data_fol+vel_list,'r').read()
sn_name_list=filename.split('\n')
symbol=['-<','->','-^','-v','-*','-d','-s','-p', '-h']
j = 0
sne_files={sn.split('_')[0]:sn for sn in sn_name_list}
for i, sn in enumerate(sne):
sn_name=sne_files[sn]
if any(sn_name.split('_')[0] in s for s in key): # to be consistent with color in Kieran's paper
res = [x for x in key if sn_name.split('_')[0] in x]
spec, phase, vel,velerr=readcol.readcol(vel_dir+sn_name,twod=False)
MFC='none' if sn not in ['sn1993J', 'sn2003bg'] else sn_c[res[0]]
sne_handles[sn]=ax.errorbar(phase, vel/1000, yerr=[velerr/1000, velerr/1000],capthick=2,fmt='-o',ms=6.5,
label=sn_name.split('_')[0],color=sn_c[res[0]],mec=sn_c[res[0]],mfc=MFC,mew=1.5)
else:
spec, phase, vel,velerr=readcol.readcol(vel_dir+sn_name,twod=False)
try:
c=sne_colours[sn]
except KeyError:
c=next(colours)
sne_colours[sn]=c
sne_handles[sn]=ax.errorbar(phase, vel/1000, yerr=[velerr/1000, velerr/1000],capthick=2,fmt=symbol[j%9],mew=0,ms=8,
label=sn_name.split('_')[0],color='gray')
j = j+1
pl.text((xr[1]-xr[0])*0.1+xr[0],(yr[1]-yr[0])*0.9+yr[0],annot,fontsize=20)
pl.xlabel("Phase since V-band maximum (days)",fontsize=20)
pl.ylabel("Absorption velocity ($10^3$ km s$^{-1}$)",fontsize=20)
minorLocatory = MultipleLocator(1000)
minorLocatorx = MultipleLocator(10)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
pl.legend(handles=[sne_handles[sn] for sn in alphabetical_sne],fontsize=15,mode="expand",loc=3,ncol=2,bbox_to_anchor=(0.3, .6, 0.7, 1))
pl.subplots_adjust(left=0.15)
pl.savefig(save_plot)
#pl.close()
plot_indi("inputIIb_HeI5875",data_fol,[-25,130],[-3,-17],'plots/IIb_HeI5876_vabs.pdf',annot = 'He I')
plot_indi("inputIIb_Halpha",data_fol,[-25,70],[-8,-25],'plots/IIb_Halpha_vabs.pdf', annot =r'H$\alpha$')
pl.show() | [
"kieran.finn@hotmail.com"
] | kieran.finn@hotmail.com |
f6d33989a70a78864ba865f803548086bdc9e55a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04029/s916243192.py | 75b46b03d432155a39cdbb1c9c8a2ca16bbff062 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | x=[i for i in range(1,int(input())+1)]
print(sum(x)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e966f611456988fe1af4938564b6c442113b17dc | 13f148c314b638c7ca810b5d09bdfd79248283de | /manage.py | 329e9d1514df82a6f71165998d08b60e8faeca1e | [] | no_license | akx/drf_unique_together_testcase | bb6888ce2a718f186d3063daccf947e143de7e79 | 132ef026a6a547fce9723ea2cef4606a1172c721 | refs/heads/master | 2020-03-08T15:06:17.559107 | 2018-04-05T12:25:24 | 2018-04-05T12:25:24 | 128,202,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "drf_unique_together_testcase.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"akx@iki.fi"
] | akx@iki.fi |
30a212edab958dd3871d929e5cc8050d3814812b | c0cffe5f31070ac163e3963b8616da10d131e355 | /env/lib/python3.6/site-packages/virtualenv.py | fd7974de812848425d79847966ced39177ab7269 | [
"Apache-2.0"
] | permissive | SanaAwan5/smart_contracts7 | 1bc9a0bf2a700e527436d1f6cee8e0251c0e651b | 40a487cb3843e86ab5e4cb50b1aafa2095f648cd | refs/heads/master | 2021-08-31T21:50:35.086907 | 2019-06-25T05:22:19 | 2019-06-25T05:22:19 | 193,639,676 | 0 | 1 | Apache-2.0 | 2021-08-11T03:42:07 | 2019-06-25T05:22:07 | Python | UTF-8 | Python | false | false | 103,703 | py | #!/usr/bin/env python
"""Create a "virtual" Python installation"""
# fmt: off
import os # isort:skip
import sys # isort:skip
# If we are running in a new interpreter to create a virtualenv,
# we do NOT want paths from our existing location interfering with anything,
# So we remove this file's directory from sys.path - most likely to be
# the previous interpreter's site-packages. Solves #705, #763, #779
if os.environ.get("VIRTUALENV_INTERPRETER_RUNNING"):
for path in sys.path[:]:
if os.path.realpath(os.path.dirname(__file__)) == os.path.realpath(path):
sys.path.remove(path)
# fmt: on
import ast
import base64
import codecs
import contextlib
import distutils.spawn
import distutils.sysconfig
import errno
import glob
import logging
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
import textwrap
import zipfile
import zlib
from distutils.util import strtobool
from os.path import join
try:
import ConfigParser
except ImportError:
# noinspection PyPep8Naming
import configparser as ConfigParser
__version__ = "16.4.1"
virtualenv_version = __version__ # legacy
DEBUG = os.environ.get("_VIRTUALENV_DEBUG", None) == "1"
if sys.version_info < (2, 7):
print("ERROR: {}".format(sys.exc_info()[1]))
print("ERROR: this script requires Python 2.7 or greater.")
sys.exit(101)
HERE = os.path.dirname(os.path.abspath(__file__))
IS_ZIPAPP = os.path.isfile(HERE)
try:
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
basestring
except NameError:
basestring = str
PY_VERSION = "python{}.{}".format(sys.version_info[0], sys.version_info[1])
IS_JYTHON = sys.platform.startswith("java")
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_WIN = sys.platform == "win32"
IS_CYGWIN = sys.platform == "cygwin"
IS_DARWIN = sys.platform == "darwin"
ABI_FLAGS = getattr(sys, "abiflags", "")
USER_DIR = os.path.expanduser("~")
if IS_WIN:
DEFAULT_STORAGE_DIR = os.path.join(USER_DIR, "virtualenv")
else:
DEFAULT_STORAGE_DIR = os.path.join(USER_DIR, ".virtualenv")
DEFAULT_CONFIG_FILE = os.path.join(DEFAULT_STORAGE_DIR, "virtualenv.ini")
if IS_PYPY:
EXPECTED_EXE = "pypy"
elif IS_JYTHON:
EXPECTED_EXE = "jython"
else:
EXPECTED_EXE = "python"
# Return a mapping of version -> Python executable
# Only provided for Windows, where the information in the registry is used
if not IS_WIN:
def get_installed_pythons():
return {}
else:
try:
import winreg
except ImportError:
# noinspection PyUnresolvedReferences
import _winreg as winreg
def get_installed_pythons():
exes = dict()
# If both system and current user installations are found for a
# particular Python version, the current user one is used
for key in (winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER):
try:
python_core = winreg.CreateKey(key, "Software\\Python\\PythonCore")
except WindowsError:
# No registered Python installations
continue
i = 0
while True:
try:
version = winreg.EnumKey(python_core, i)
i += 1
try:
at_path = winreg.QueryValue(python_core, "{}\\InstallPath".format(version))
except WindowsError:
continue
exes[version] = join(at_path, "python.exe")
except WindowsError:
break
winreg.CloseKey(python_core)
# For versions that track separate 32-bit (`X.Y-32`) & 64-bit (`X-Y`)
# installation registrations, add a `X.Y-64` version tag and make the
# extensionless `X.Y` version tag represent the 64-bit installation if
# available or 32-bit if it is not
updated = {}
for ver in exes:
if ver < "3.5":
continue
if ver.endswith("-32"):
base_ver = ver[:-3]
if base_ver not in exes:
updated[base_ver] = exes[ver]
else:
updated[ver + "-64"] = exes[ver]
exes.update(updated)
# Add the major versions
# Sort the keys, then repeatedly update the major version entry
# Last executable (i.e., highest version) wins with this approach,
# 64-bit over 32-bit if both are found
for ver in sorted(exes):
exes[ver[0]] = exes[ver]
return exes
REQUIRED_MODULES = [
"os",
"posix",
"posixpath",
"nt",
"ntpath",
"genericpath",
"fnmatch",
"locale",
"encodings",
"codecs",
"stat",
"UserDict",
"readline",
"copy_reg",
"types",
"re",
"sre",
"sre_parse",
"sre_constants",
"sre_compile",
"zlib",
]
REQUIRED_FILES = ["lib-dynload", "config"]
MAJOR, MINOR = sys.version_info[:2]
if MAJOR == 2:
if MINOR >= 6:
REQUIRED_MODULES.extend(["warnings", "linecache", "_abcoll", "abc"])
if MINOR >= 7:
REQUIRED_MODULES.extend(["_weakrefset"])
elif MAJOR == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(
[
"_abcoll",
"warnings",
"linecache",
"abc",
"io",
"_weakrefset",
"copyreg",
"tempfile",
"random",
"__future__",
"collections",
"keyword",
"tarfile",
"shutil",
"struct",
"copy",
"tokenize",
"token",
"functools",
"heapq",
"bisect",
"weakref",
"reprlib",
]
)
if MINOR >= 2:
REQUIRED_FILES[-1] = "config-{}".format(MAJOR)
if MINOR >= 3:
import sysconfig
platform_dir = sysconfig.get_config_var("PLATDIR")
REQUIRED_FILES.append(platform_dir)
REQUIRED_MODULES.extend(["base64", "_dummy_thread", "hashlib", "hmac", "imp", "importlib", "rlcompleter"])
if MINOR >= 4:
REQUIRED_MODULES.extend(["operator", "_collections_abc", "_bootlocale"])
if MINOR >= 6:
REQUIRED_MODULES.extend(["enum"])
if IS_PYPY:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(["traceback", "linecache"])
if MAJOR == 3:
# _functools is needed to import locale during stdio initialization and
# needs to be copied on PyPy because it's not built in
REQUIRED_MODULES.append("_functools")
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError("You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if self.in_progress_hanging and consumer in (sys.stdout, sys.stderr):
self.in_progress_hanging = False
print("")
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = " " * self.indent + rendered
if hasattr(consumer, "write"):
consumer.write(rendered + "\n")
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, "Tried to start_progress({!r}) while in_progress {!r}".format(
msg, self.in_progress
)
if self.level_matches(self.NOTIFY, self._stdout_level()):
print(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg="done."):
assert self.in_progress, "Tried to end_progress without start_progress"
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
print("...{}{}".format(self.in_progress, msg))
sys.stdout.flush()
else:
print(msg)
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
print(".")
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
@staticmethod
def level_matches(level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(at_path):
if not os.path.exists(at_path):
logger.info("Creating %s", at_path)
os.makedirs(at_path)
else:
logger.info("Directory %s already exists", at_path)
def copy_file_or_folder(src, dest, symlink=True):
if os.path.isdir(src):
shutil.copytree(src, dest, symlink)
else:
shutil.copy2(src, dest)
def symlink_file_or_folder(src, dest):
if os.path.exists(dest):
return
if os.path.isdir(src):
if not os.path.exists(dest):
os.makedirs(dest)
for fn in os.listdir(src):
symlink_file_or_folder(os.path.join(src, fn), os.path.join(dest, fn))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if not srcpath[0] == "/":
# relative symlink, needs to be prefixed with absolute source path
srcpath = os.path.join(os.path.dirname(src), srcpath)
srcpath = os.path.abspath(srcpath)
logger.info("Symlinking %s :: %s -> %s", src, srcpath, dest)
os.symlink(srcpath, dest)
def copyfile(src, dest, symlink=True):
if symlink and hasattr(os, "symlink") and not IS_WIN:
return symlink_file_or_folder(src, dest)
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn("Cannot find file %s (bad symlink)", src)
return
if os.path.exists(dest):
logger.debug("File %s already exists", dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info("Creating parent directories for %s", os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
logger.info("Copying to %s", dest)
copy_file_or_folder(src, dest, symlink)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info("Writing %s", dest)
with open(dest, "wb") as f:
f.write(content.encode("utf-8"))
return
else:
with open(dest, "rb") as f:
c = f.read()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify("File %s exists with different content; not overwriting", dest)
return
logger.notify("Overwriting %s with new content", dest)
with open(dest, "wb") as f:
f.write(content.encode("utf-8"))
else:
logger.info("Content %s already in place", dest)
def rm_tree(folder):
if os.path.exists(folder):
logger.notify("Deleting tree %s", folder)
shutil.rmtree(folder)
else:
logger.info("Do not need to delete %s; already gone", folder)
def make_exe(fn):
if hasattr(os, "chmod"):
old_mode = os.stat(fn).st_mode & 0xFFF # 0o7777
new_mode = (old_mode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, new_mode)
logger.info("Changed mode of %s to %s", fn, oct(new_mode))
def _find_file(filename, folders):
for folder in reversed(folders):
files = glob.glob(os.path.join(folder, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
@contextlib.contextmanager
def virtualenv_support_dirs():
"""Context manager yielding either [virtualenv_support_dir] or []"""
# normal filesystem installation
if os.path.isdir(join(HERE, "virtualenv_support")):
yield [join(HERE, "virtualenv_support")]
elif IS_ZIPAPP:
tmpdir = tempfile.mkdtemp()
try:
with zipfile.ZipFile(HERE) as zipf:
for member in zipf.namelist():
if os.path.dirname(member) == "virtualenv_support":
zipf.extract(member, tmpdir)
yield [join(tmpdir, "virtualenv_support")]
finally:
shutil.rmtree(tmpdir)
# probably a bootstrap script
elif os.path.splitext(os.path.dirname(__file__))[0] != "virtualenv":
try:
# noinspection PyUnresolvedReferences
import virtualenv
except ImportError:
yield []
else:
yield [join(os.path.dirname(virtualenv.__file__), "virtualenv_support")]
# we tried!
else:
yield []
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
@staticmethod
def get_config_files():
config_file = os.environ.get("VIRTUALENV_CONFIG_FILE", False)
if config_file and os.path.exists(config_file):
return [config_file]
return [DEFAULT_CONFIG_FILE]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section("virtualenv")))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace("_", "-")
if not key.startswith("--"):
key = "--{}".format(key) # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == "append":
val = val.split()
else:
option.nargs = 1
if option.action == "store_false":
val = not strtobool(val)
elif option.action in ("store_true", "count"):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: {!r}".format(e))
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix="VIRTUALENV_"):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, "").lower(), val)
def get_default_values(self):
"""
Overriding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version, usage="%prog [OPTIONS] DEST_DIR", formatter=UpdatingDefaultsHelpFormatter()
)
parser.add_option(
"-v", "--verbose", action="count", dest="verbose", default=5 if DEBUG else 0, help="Increase verbosity."
)
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0, help="Decrease verbosity.")
parser.add_option(
"-p",
"--python",
dest="python",
metavar="PYTHON_EXE",
help="The Python interpreter to use, e.g., --python=python3.5 will use the python3.5 "
"interpreter to create the new environment. The default is the interpreter that "
"virtualenv was installed with ({})".format(sys.executable),
)
parser.add_option(
"--clear", dest="clear", action="store_true", help="Clear out the non-root install and start from scratch."
)
parser.set_defaults(system_site_packages=False)
parser.add_option(
"--no-site-packages",
dest="system_site_packages",
action="store_false",
help="DEPRECATED. Retained only for backward compatibility. "
"Not having access to global site-packages is now the default behavior.",
)
parser.add_option(
"--system-site-packages",
dest="system_site_packages",
action="store_true",
help="Give the virtual environment access to the global site-packages.",
)
parser.add_option(
"--always-copy",
dest="symlink",
action="store_false",
default=True,
help="Always copy files rather than symlinking.",
)
parser.add_option(
"--relocatable",
dest="relocatable",
action="store_true",
help="Make an EXISTING virtualenv environment relocatable. "
"This fixes up scripts and makes all .pth files relative.",
)
parser.add_option(
"--no-setuptools",
dest="no_setuptools",
action="store_true",
help="Do not install setuptools in the new virtualenv.",
)
parser.add_option("--no-pip", dest="no_pip", action="store_true", help="Do not install pip in the new virtualenv.")
parser.add_option(
"--no-wheel", dest="no_wheel", action="store_true", help="Do not install wheel in the new virtualenv."
)
parser.add_option(
"--extra-search-dir",
dest="search_dirs",
action="append",
metavar="DIR",
default=[],
help="Directory to look for setuptools/pip distributions in. " "This option can be used multiple times.",
)
parser.add_option(
"--download",
dest="download",
default=True,
action="store_true",
help="Download pre-installed packages from PyPI.",
)
parser.add_option(
"--no-download",
"--never-download",
dest="download",
action="store_false",
help="Do not download pre-installed packages from PyPI.",
)
parser.add_option("--prompt", dest="prompt", help="Provides an alternative prompt prefix for this environment.")
parser.add_option(
"--setuptools",
dest="setuptools",
action="store_true",
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.",
)
parser.add_option(
"--distribute",
dest="distribute",
action="store_true",
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.",
)
parser.add_option(
"--unzip-setuptools",
action="store_true",
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.",
)
if "extend_parser" in globals():
# noinspection PyUnresolvedReferences
extend_parser(parser) # noqa: F821
options, args = parser.parse_args()
global logger
if "adjust_options" in globals():
# noinspection PyUnresolvedReferences
adjust_options(options, args) # noqa: F821
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get("VIRTUALENV_INTERPRETER_RUNNING"):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn("Already using interpreter {}".format(interpreter))
else:
logger.notify("Running virtualenv with interpreter {}".format(interpreter))
env["VIRTUALENV_INTERPRETER_RUNNING"] = "true"
file = __file__
if file.endswith(".pyc"):
file = file[:-1]
elif IS_ZIPAPP:
file = HERE
sub_process_call = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(sub_process_call.wait())
if not args:
print("You must provide a DEST_DIR")
parser.print_help()
sys.exit(2)
if len(args) > 1:
print("There must be only one argument: DEST_DIR (you gave {})".format(" ".join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.path.exists(home_dir) and os.path.isfile(home_dir):
logger.fatal("ERROR: File already exists and is not a directory.")
logger.fatal("Please provide a different path or delete the file.")
sys.exit(3)
if os.environ.get("WORKING_ENV"):
logger.fatal("ERROR: you cannot run virtualenv while in a working env")
logger.fatal("Please deactivate your working env, then re-run this script")
sys.exit(3)
if "PYTHONHOME" in os.environ:
logger.warn("PYTHONHOME is set. You *must* activate the virtualenv before using it")
del os.environ["PYTHONHOME"]
if options.relocatable:
make_environment_relocatable(home_dir)
return
with virtualenv_support_dirs() as search_dirs:
create_environment(
home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
prompt=options.prompt,
search_dirs=search_dirs + options.search_dirs,
download=options.download,
no_setuptools=options.no_setuptools,
no_pip=options.no_pip,
no_wheel=options.no_wheel,
symlink=options.symlink,
)
if "after_install" in globals():
# noinspection PyUnresolvedReferences
after_install(options, home_dir) # noqa: F821
def call_subprocess(
cmd,
show_stdout=True,
filter_stdout=None,
cwd=None,
raise_on_return_code=True,
extra_env=None,
remove_from_env=None,
stdin=None,
):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20] + "..." + part[-20:]
if " " in part or "\n" in part or '"' in part or "'" in part:
part = '"{}"'.format(part.replace('"', '\\"'))
if hasattr(part, "decode"):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = " ".join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command {}".format(cmd_desc))
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for var_name in remove_from_env:
env.pop(var_name, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
stdin=None if stdin is None else subprocess.PIPE,
stdout=stdout,
cwd=cwd,
env=env,
)
except Exception:
e = sys.exc_info()[1]
logger.fatal("Error {} while executing command {}".format(e, cmd_desc))
raise
all_output = []
if stdout is not None:
if stdin is not None:
with proc.stdin:
proc.stdin.write(stdin)
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
with proc.stdout as stdout:
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate(stdin)
proc.wait()
if proc.returncode:
if raise_on_return_code:
if all_output:
logger.notify("Complete output from command {}:".format(cmd_desc))
logger.notify("\n".join(all_output) + "\n----------------------------------------")
raise OSError("Command {} failed with error code {}".format(cmd_desc, proc.returncode))
else:
logger.warn("Command {} had error code {}".format(cmd_desc, proc.returncode))
return all_output
def filter_install_output(line):
if line.strip().startswith("running"):
return Logger.INFO
return Logger.DEBUG
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + "-*.whl"))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal("Cannot find a wheel for {}".format(project))
return wheels
def install_wheel(project_names, py_executable, search_dirs=None, download=False):
if search_dirs is None:
search_dirs_context = virtualenv_support_dirs
else:
@contextlib.contextmanager
def search_dirs_context():
yield search_dirs
with search_dirs_context() as search_dirs:
_install_wheel_with_search_dir(download, project_names, py_executable, search_dirs)
def _install_wheel_with_search_dir(download, project_names, py_executable, search_dirs):
wheels = find_wheels(["setuptools", "pip"], search_dirs)
python_path = os.pathsep.join(wheels)
# PIP_FIND_LINKS uses space as the path separator and thus cannot have paths
# with spaces in them. Convert any of those to local file:// URL form.
try:
from urlparse import urljoin
from urllib import pathname2url
except ImportError:
from urllib.parse import urljoin
from urllib.request import pathname2url
def space_path2url(p):
if " " not in p:
return p
return urljoin("file:", pathname2url(os.path.abspath(p)))
find_links = " ".join(space_path2url(d) for d in search_dirs)
extra_args = ["--ignore-installed"]
if DEBUG:
extra_args.append("-v")
if IS_JYTHON:
extra_args.append("--no-cache")
config = _pip_config(py_executable, python_path)
defined_cert = bool(config.get("install.cert") or config.get(":env:.cert") or config.get("global.cert"))
script = textwrap.dedent(
"""
import sys
import pkgutil
import tempfile
import os
defined_cert = {defined_cert}
try:
from pip._internal import main as _main
cert_data = pkgutil.get_data("pip._vendor.certifi", "cacert.pem")
except ImportError:
from pip import main as _main
cert_data = pkgutil.get_data("pip._vendor.requests", "cacert.pem")
except IOError:
cert_data = None
if not defined_cert and cert_data is not None:
cert_file = tempfile.NamedTemporaryFile(delete=False)
cert_file.write(cert_data)
cert_file.close()
else:
cert_file = None
try:
args = ["install"] + [{extra_args}]
if cert_file is not None:
args += ["--cert", cert_file.name]
args += sys.argv[1:]
sys.exit(_main(args))
finally:
if cert_file is not None:
os.remove(cert_file.name)
""".format(
defined_cert=defined_cert, extra_args=", ".join(repr(i) for i in extra_args)
)
).encode("utf8")
cmd = [py_executable, "-"] + project_names
logger.start_progress("Installing {}...".format(", ".join(project_names)))
logger.indent += 2
env = {
"PYTHONPATH": python_path,
"JYTHONPATH": python_path, # for Jython < 3.x
"PIP_FIND_LINKS": find_links,
"PIP_USE_WHEEL": "1",
"PIP_ONLY_BINARY": ":all:",
"PIP_USER": "0",
"PIP_NO_INPUT": "1",
}
if not download:
env["PIP_NO_INDEX"] = "1"
try:
call_subprocess(cmd, show_stdout=False, extra_env=env, stdin=script)
finally:
logger.indent -= 2
logger.end_progress()
def _pip_config(py_executable, python_path):
cmd = [py_executable, "-m", "pip", "config", "list"]
config = {}
for line in call_subprocess(
cmd,
show_stdout=False,
extra_env={"PYTHONPATH": python_path, "JYTHONPATH": python_path},
remove_from_env=["PIP_VERBOSE", "PIP_QUIET"],
raise_on_return_code=False,
):
key, _, value = line.partition("=")
if value:
config[key] = ast.literal_eval(value)
return config
def create_environment(
home_dir,
site_packages=False,
clear=False,
prompt=None,
search_dirs=None,
download=False,
no_setuptools=False,
no_pip=False,
no_wheel=False,
symlink=True,
):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(
install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages=site_packages, clear=clear, symlink=symlink)
)
install_distutils(home_dir)
to_install = []
if not no_setuptools:
to_install.append("setuptools")
if not no_pip:
to_install.append("pip")
if not no_wheel:
to_install.append("wheel")
if to_install:
install_wheel(to_install, py_executable, search_dirs, download=download)
install_activate(home_dir, bin_dir, prompt)
install_python_config(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and is_executable(fpath)
def path_locations(home_dir, dry_run=False):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
home_dir = os.path.abspath(home_dir)
lib_dir, inc_dir, bin_dir = None, None, None
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if IS_WIN:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
if not dry_run:
mkdir(home_dir)
if " " in home_dir:
import ctypes
get_short_path_name = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir) + 1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
# noinspection PyUnresolvedReferences
u = unicode
except NameError:
u = str
ret = get_short_path_name(u(home_dir), buf, size)
if not ret:
print('Error: the path "{}" has a space in it'.format(home_dir))
print("We could not determine the short pathname for it.")
print("Exiting.")
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, "Lib")
inc_dir = join(home_dir, "Include")
bin_dir = join(home_dir, "Scripts")
if IS_JYTHON:
lib_dir = join(home_dir, "Lib")
inc_dir = join(home_dir, "Include")
bin_dir = join(home_dir, "bin")
elif IS_PYPY:
lib_dir = home_dir
inc_dir = join(home_dir, "include")
bin_dir = join(home_dir, "bin")
elif not IS_WIN:
lib_dir = join(home_dir, "lib", PY_VERSION)
inc_dir = join(home_dir, "include", PY_VERSION + ABI_FLAGS)
bin_dir = join(home_dir, "bin")
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if IS_DARWIN:
prefixes.extend(
(
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib", "python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages"),
)
)
if hasattr(sys, "real_prefix"):
prefixes.append(sys.real_prefix)
if hasattr(sys, "base_prefix"):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
# On Windows, make sure drive letter is uppercase
if IS_WIN and filename[0] in "abcdefghijklmnopqrstuvwxyz":
filename = filename[0].upper() + filename[1:]
for i, prefix in enumerate(prefixes):
if IS_WIN and prefix[0] in "abcdefghijklmnopqrstuvwxyz":
prefixes[i] = prefix[0].upper() + prefix[1:]
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relative_path = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relative_path[0] == os.sep
relative_path = relative_path[1:]
return join(dst_prefix, relative_path)
assert False, "Filename {} does not start with any of these prefixes: {}".format(filename, prefixes)
def find_module_filename(modname):
if sys.version_info < (3, 4):
# noinspection PyDeprecation
import imp
try:
file_handler, filepath, _ = imp.find_module(modname)
except ImportError:
return None
else:
if file_handler is not None:
file_handler.close()
return filepath
else:
import importlib.util
if sys.version_info < (3, 5):
def find_spec(modname):
# noinspection PyDeprecation
loader = importlib.find_loader(modname)
if loader is None:
return None
else:
return importlib.util.spec_from_loader(modname, loader)
else:
find_spec = importlib.util.find_spec
spec = find_spec(modname)
if spec is None:
return None
if not os.path.exists(spec.origin):
# https://bitbucket.org/pypy/pypy/issues/2944/origin-for-several-builtin-modules
# on pypy3, some builtin modules have a bogus build-time file path, ignore them
return None
filepath = spec.origin
# https://www.python.org/dev/peps/pep-3147/#file guarantee to be non-cached
if os.path.basename(filepath) == "__init__.py":
filepath = os.path.dirname(filepath)
return filepath
def copy_required_modules(dst_prefix, symlink):
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
filename = find_module_filename(modname)
if filename is None:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
# special-case custom readline.so on OS X, but not for pypy:
if (
modname == "readline"
and sys.platform == "darwin"
and not (IS_PYPY or filename.endswith(join("lib-dynload", "readline.so")))
):
dst_filename = join(dst_prefix, "lib", "python{}".format(sys.version[:3]), "readline.so")
elif modname == "readline" and sys.platform == "win32":
# special-case for Windows, where readline is not a standard module, though it may have been installed
# in site-packages by a third-party package
dst_filename = None
else:
dst_filename = change_prefix(filename, dst_prefix)
if dst_filename is not None:
copyfile(filename, dst_filename, symlink)
if filename.endswith(".pyc"):
py_file = filename[:-1]
if os.path.exists(py_file):
copyfile(py_file, dst_filename[:-1], symlink)
def copy_required_files(src_dir, lib_dir, symlink):
if not os.path.isdir(src_dir):
return
for fn in os.listdir(src_dir):
bn = os.path.splitext(fn)[0]
if fn != "site-packages" and bn in REQUIRED_FILES:
copyfile(join(src_dir, fn), join(lib_dir, fn), symlink)
def copy_license(prefix, dst_prefix, lib_dir, symlink):
"""Copy the license file so `license()` builtin works"""
for license_path in (
# posix cpython
os.path.join(prefix, os.path.relpath(lib_dir, dst_prefix), "LICENSE.txt"),
# windows cpython
os.path.join(prefix, "LICENSE.txt"),
# pypy
os.path.join(prefix, "LICENSE"),
):
if os.path.exists(license_path):
dest = subst_path(license_path, prefix, dst_prefix)
copyfile(license_path, dest, symlink)
return
logger.warn("No LICENSE.txt / LICENSE found in source")
def copy_include_dir(include_src, include_dest, symlink):
"""Copy headers from *include_src* to *include_dest* symlinking if required"""
if not os.path.isdir(include_src):
return
# PyPy headers are located in ``pypy-dir/include`` and following code
# avoids making ``venv-dir/include`` symlink to it
if IS_PYPY:
for fn in os.listdir(include_src):
copyfile(join(include_src, fn), join(include_dest, fn), symlink)
else:
copyfile(include_src, include_dest, symlink)
def copy_tcltk(src, dest, symlink):
""" copy tcl/tk libraries on Windows (issue #93) """
for lib_version in "8.5", "8.6":
for libname in "tcl", "tk":
src_dir = join(src, "tcl", libname + lib_version)
dest_dir = join(dest, "tcl", libname + lib_version)
# Only copy the dirs from the above combinations that exist
if os.path.exists(src_dir) and not os.path.exists(dest_dir):
copy_file_or_folder(src_dir, dest_dir, symlink)
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn("Path not in prefix %r %r", prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print("Please use the *system* python to run this script")
return
if clear:
rm_tree(lib_dir)
# FIXME: why not delete it?
# Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify("Not deleting %s", bin_dir)
if hasattr(sys, "real_prefix"):
logger.notify("Using real prefix %r", sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, "base_prefix"):
logger.notify("Using base prefix %r", sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
prefix = os.path.abspath(prefix)
mkdir(lib_dir)
fix_lib64(lib_dir, symlink)
stdlib_dirs = [os.path.dirname(os.__file__)]
if IS_WIN:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), "DLLs"))
elif IS_DARWIN:
stdlib_dirs.append(join(stdlib_dirs[0], "site-packages"))
if hasattr(os, "symlink"):
logger.info("Symlinking Python bootstrap modules")
else:
logger.info("Copying Python bootstrap modules")
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
copy_required_files(stdlib_dir, lib_dir, symlink)
# ...and modules
copy_required_modules(home_dir, symlink)
copy_license(prefix, home_dir, lib_dir, symlink)
finally:
logger.indent -= 2
# ...copy tcl/tk
if IS_WIN:
copy_tcltk(prefix, home_dir, symlink)
mkdir(join(lib_dir, "site-packages"))
import site
site_filename = site.__file__
if site_filename.endswith(".pyc") or site_filename.endswith(".pyo"):
site_filename = site_filename[:-1]
elif site_filename.endswith("$py.class"):
site_filename = site_filename.replace("$py.class", ".py")
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, "orig-prefix.txt"), prefix)
site_packages_filename = join(site_dir, "no-global-site-packages.txt")
if not site_packages:
writefile(site_packages_filename, "")
if IS_PYPY or IS_WIN:
standard_lib_include_dir = join(prefix, "include")
else:
standard_lib_include_dir = join(prefix, "include", PY_VERSION + ABI_FLAGS)
if os.path.exists(standard_lib_include_dir):
copy_include_dir(standard_lib_include_dir, inc_dir, symlink)
else:
logger.debug("No include dir %s", standard_lib_include_dir)
platform_include_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platform_include_dir != standard_lib_include_dir:
platform_include_dest = distutils.sysconfig.get_python_inc(plat_specific=1, prefix=home_dir)
if platform_include_dir == platform_include_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platform_include_dest = subst_path(platform_include_dir, prefix, home_dir)
if platform_include_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copy_include_dir(platform_include_dir, platform_include_dest, symlink)
# pypy never uses exec_prefix, just ignore it
if os.path.realpath(sys.exec_prefix) != os.path.realpath(prefix) and not IS_PYPY:
if IS_WIN:
exec_dir = join(sys.exec_prefix, "lib")
elif IS_JYTHON:
exec_dir = join(sys.exec_prefix, "Lib")
else:
exec_dir = join(sys.exec_prefix, "lib", PY_VERSION)
copy_required_files(exec_dir, lib_dir, symlink)
if IS_JYTHON:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in "jython-dev.jar", "javalib", "jython.jar":
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name), symlink)
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, "registry")
if os.path.exists(src):
copyfile(src, join(home_dir, "registry"), symlink=False)
copyfile(join(prefix, "cachedir"), join(home_dir, "cachedir"), symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if "Python.framework" in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get("__PYVENV_LAUNCHER__"):
del os.environ["__PYVENV_LAUNCHER__"]
if re.search(r"/Python(?:-32|-64)*$", py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(os.path.dirname(py_executable), "python")
logger.notify("New %s executable in %s", EXPECTED_EXE, py_executable)
pc_build_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, "site-packages", "virtualenv_builddir_pyd.pth")
if IS_WIN and os.path.exists(os.path.join(pc_build_dir, "build.bat")):
logger.notify("Detected python running from build directory %s", pc_build_dir)
logger.notify("Writing .pth file linking to build directory for *.pyd files")
writefile(pyd_pth, pc_build_dir)
else:
if os.path.exists(pyd_pth):
logger.info("Deleting %s (not Windows env or not build directory python)", pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
# FIXME: could I just hard link?
executable = sys.executable
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if IS_WIN or IS_CYGWIN:
python_w = os.path.join(os.path.dirname(sys.executable), "pythonw.exe")
if os.path.exists(python_w):
logger.info("Also created pythonw.exe")
shutil.copyfile(python_w, os.path.join(os.path.dirname(py_executable), "pythonw.exe"))
python_d = os.path.join(os.path.dirname(sys.executable), "python_d.exe")
python_d_dest = os.path.join(os.path.dirname(py_executable), "python_d.exe")
if os.path.exists(python_d):
logger.info("Also created python_d.exe")
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info("Removed python_d.exe as it is no longer at the source")
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
if IS_PYPY:
py_executable_dll_s = [("libpypy-c.dll", "libpypy_d-c.dll")]
else:
py_executable_dll_s = [
("python{}.dll".format(sys.version_info[0]), "python{}_d.dll".format(sys.version_info[0])),
(
"python{}{}.dll".format(sys.version_info[0], sys.version_info[1]),
"python{}{}_d.dll".format(sys.version_info[0], sys.version_info[1]),
),
]
for py_executable_dll, py_executable_dll_d in py_executable_dll_s:
python_dll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
python_dll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
python_dll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(python_dll):
logger.info("Also created %s", py_executable_dll)
shutil.copyfile(python_dll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(python_dll_d):
logger.info("Also created %s", py_executable_dll_d)
shutil.copyfile(python_dll_d, python_dll_d_dest)
elif os.path.exists(python_dll_d_dest):
logger.info("Removed %s as the source does not exist", python_dll_d_dest)
os.unlink(python_dll_d_dest)
if IS_PYPY:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), "python")
if sys.platform in ("win32", "cygwin"):
python_executable += ".exe"
logger.info("Also created executable %s", python_executable)
copyfile(py_executable, python_executable, symlink)
if IS_WIN:
for name in ["libexpat.dll", "libeay32.dll", "ssleay32.dll", "sqlite3.dll", "tcl85.dll", "tk85.dll"]:
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name), symlink)
for d in sys.path:
if d.endswith("lib_pypy"):
break
else:
logger.fatal("Could not find lib_pypy in sys.path")
raise SystemExit(3)
logger.info("Copying lib_pypy")
copyfile(d, os.path.join(home_dir, "lib_pypy"), symlink)
if os.path.splitext(os.path.basename(py_executable))[0] != EXPECTED_EXE:
secondary_exe = os.path.join(os.path.dirname(py_executable), EXPECTED_EXE)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext.lower() == ".exe":
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn(
"Not overwriting existing {} script {} (you must use {})".format(
EXPECTED_EXE, secondary_exe, py_executable
)
)
else:
logger.notify("Also creating executable in %s", secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if ".framework" in prefix:
original_python = None
if "Python.framework" in prefix:
logger.debug("MacOSX Python framework detected")
# Make sure we use the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(prefix, "Resources/Python.app/Contents/MacOS/Python")
if "EPD" in prefix:
logger.debug("EPD framework detected")
original_python = os.path.join(prefix, "bin/python")
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, ".Python")
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(os.path.join(prefix, "Python"), virtual_lib, symlink)
# And then change the install_name of the copied python executable
# noinspection PyBroadException
try:
mach_o_change(py_executable, os.path.join(prefix, "Python"), "@executable_path/../.Python")
except Exception:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. " "Trying to call install_name_tool instead.", e)
try:
call_subprocess(
[
"install_name_tool",
"-change",
os.path.join(prefix, "Python"),
"@executable_path/../.Python",
py_executable,
]
)
except Exception:
logger.fatal("Could not call install_name_tool -- you must " "have Apple's development tools installed")
raise
if not IS_WIN:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = "python{}".format(sys.version_info[0])
py_exe_version_major_minor = "python{}.{}".format(sys.version_info[0], sys.version_info[1])
py_exe_no_version = "python"
required_symlinks = [py_exe_no_version, py_exe_version_major, py_exe_version_major_minor]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
if symlink:
os.symlink(py_executable_base, full_pth)
else:
copyfile(py_executable, full_pth, symlink)
cmd = [
py_executable,
"-c",
"import sys;out=sys.stdout;" 'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))',
]
logger.info('Testing executable with %s %s "%s"', *cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal("ERROR: The executable {} could not be run: {}".format(py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
# normalize paths using realpath to ensure that a virtualenv correctly identifies itself even
# when addressed over a symlink
proc_stdout = os.path.normcase(os.path.realpath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.realpath(home_dir))
if hasattr(norm_home_dir, "decode"):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal("ERROR: The executable %s is not functioning", py_executable)
logger.fatal("ERROR: It thinks sys.prefix is {!r} (should be {!r})".format(proc_stdout, norm_home_dir))
logger.fatal("ERROR: virtualenv is not compatible with this system or executable")
if IS_WIN:
logger.fatal(
"Note: some Windows users have reported this error when they "
'installed Python for "Only this user" or have multiple '
"versions of Python installed. Copying the appropriate "
"PythonXX.dll to the virtualenv Scripts/ directory may fix "
"this problem."
)
sys.exit(100)
else:
logger.info("Got sys.prefix result: %r", proc_stdout)
pydistutils = os.path.expanduser("~/.pydistutils.cfg")
if os.path.exists(pydistutils):
logger.notify("Please make sure you remove any previous custom paths from " "your %s file.", pydistutils)
# FIXME: really this should be calculated earlier
fix_local_scheme(home_dir, symlink)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info("Deleting %s", site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
if IS_WIN or IS_JYTHON and getattr(os, "_name", None) == "nt":
files = {"activate.bat": ACTIVATE_BAT, "deactivate.bat": DEACTIVATE_BAT, "activate.ps1": ACTIVATE_PS}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, "/"))
home_dir_msys = (drive and "/{}{}" or "{}{}").format(drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = """$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '{}'; else echo '{}'; fi;)""".format(
home_dir, home_dir_msys
)
files["activate"] = ACTIVATE_SH.replace("__VIRTUAL_ENV__", home_dir_sh)
else:
files = {
"activate": ACTIVATE_SH,
"activate.fish": ACTIVATE_FISH,
"activate.csh": ACTIVATE_CSH,
"activate.ps1": ACTIVATE_PS,
}
files["activate_this.py"] = ACTIVATE_THIS
if sys.version_info >= (3, 4):
# Add xonsh support
files["activate.xsh"] = ACTIVATE_XSH
install_files(home_dir, bin_dir, prompt, files)
def install_files(home_dir, bin_dir, prompt, files):
if hasattr(home_dir, "decode"):
home_dir = home_dir.decode(sys.getfilesystemencoding())
virtualenv_name = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace("__VIRTUAL_PROMPT__", prompt or "")
content = content.replace("__VIRTUAL_WINPROMPT__", prompt or "({})".format(virtualenv_name))
content = content.replace("__VIRTUAL_ENV__", home_dir)
content = content.replace("__VIRTUAL_NAME__", virtualenv_name)
content = content.replace("__BIN_NAME__", os.path.basename(bin_dir))
content = content.replace("__PATH_SEP__", os.pathsep)
writefile(os.path.join(bin_dir, name), content)
def install_python_config(home_dir, bin_dir, prompt=None):
if sys.platform == "win32" or IS_JYTHON and getattr(os, "_name", None) == "nt":
files = {}
else:
files = {"python-config": PYTHON_CONFIG}
install_files(home_dir, bin_dir, prompt, files)
for name, _ in files.items():
make_exe(os.path.join(bin_dir, name))
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
# FIXME: maybe this prefix setting should only be put in place if
# there's a local distutils.cfg with a prefix setting?
# FIXME: this is breaking things, removing for now:
# distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" home_dir
writefile(os.path.join(distutils_path, "__init__.py"), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, "distutils.cfg"), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir, symlink=True):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
# noinspection PyProtectedMember
if sysconfig._get_default_scheme() == "posix_local":
local_path = os.path.join(home_dir, "local")
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == "local":
continue
copyfile(
os.path.abspath(os.path.join(home_dir, subdir_name)),
os.path.join(local_path, subdir_name),
symlink,
)
def fix_lib64(lib_dir, symlink=True):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
# PyPy's library path scheme is not affected by this.
# Return early or we will die on the following assert.
if IS_PYPY:
logger.debug("PyPy detected, skipping lib64 symlinking")
return
# Check we have a lib64 library path
if not [p for p in distutils.sysconfig.get_config_vars().values() if isinstance(p, basestring) and "lib64" in p]:
return
logger.debug("This system uses lib64; symlinking lib64 to lib")
assert os.path.basename(lib_dir) == "python{}".format(sys.version[:3]), "Unexpected python lib dir: {!r}".format(
lib_dir
)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, "lib")
lib64_link = os.path.join(top_level, "lib64")
assert os.path.basename(lib_parent) == "lib", "Unexpected parent dir: {!r}".format(lib_parent)
if os.path.lexists(lib64_link):
return
if symlink:
os.symlink("lib", lib64_link)
else:
copyfile(lib_dir, lib64_link, symlink=False)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
# If the "executable" is a version number, get the installed executable for
# that version
orig_exe = exe
python_versions = get_installed_pythons()
if exe in python_versions:
exe = python_versions[exe]
if os.path.abspath(exe) != exe:
exe = distutils.spawn.find_executable(exe) or exe
if not os.path.exists(exe):
logger.fatal("The path {} (from --python={}) does not exist".format(exe, orig_exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal("The path {} (from --python={}) is not an executable file".format(exe, orig_exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.path.isfile(exe) and os.access(exe, os.X_OK)
# Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, "activate_this.py")
if not os.path.exists(activate_this):
logger.fatal(
"The environment doesn't have a file %s -- please re-run virtualenv " "on this environment to update it",
activate_this,
)
fixup_scripts(home_dir, bin_dir)
fixup_pth_and_egg_link(home_dir)
# FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = [
"python",
"python{}".format(sys.version[:3]),
"activate",
"activate.bat",
"activate_this.py",
"activate.fish",
"activate.csh",
"activate.xsh",
]
def fixup_scripts(_, bin_dir):
if IS_WIN:
new_shebang_args = ("{} /c".format(os.path.normcase(os.environ.get("COMSPEC", "cmd.exe"))), "", ".exe")
else:
new_shebang_args = ("/usr/bin/env", sys.version[:3], "")
# This is what we expect at the top of scripts:
shebang = "#!{}".format(
os.path.normcase(os.path.join(os.path.abspath(bin_dir), "python{}".format(new_shebang_args[2])))
)
# This is what we'll put:
new_shebang = "#!{} python{}{}".format(*new_shebang_args)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore child directories, e.g. .svn ones.
continue
with open(filename, "rb") as f:
try:
lines = f.read().decode("utf-8").splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
if not lines:
logger.warn("Script %s is an empty file", filename)
continue
old_shebang = lines[0].strip()
old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:])
if not old_shebang.startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug("Cannot make script %s relative", filename)
elif lines[0].strip() == new_shebang:
logger.info("Script %s has already been made relative", filename)
else:
logger.warn(
"Script %s cannot be made relative (it's not a normal script that starts with %s)",
filename,
shebang,
)
continue
logger.notify("Making script %s relative", filename)
script = relative_script([new_shebang] + lines[1:])
with open(filename, "wb") as f:
f.write("\n".join(script).encode("utf-8"))
def relative_script(lines):
"""Return a script that'll work in a relocatable environment."""
activate = (
"import os; "
"activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); "
"exec(compile(open(activate_this).read(), activate_this, 'exec'), { '__file__': activate_this}); "
"del os, activate_this"
)
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ["from", "__future__", "import"]:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ["", activate, ""] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for a_path in sys_path:
if not a_path:
a_path = "."
if not os.path.isdir(a_path):
continue
a_path = os.path.normcase(os.path.abspath(a_path))
if not a_path.startswith(home_dir):
logger.debug("Skipping system (non-environment) directory %s", a_path)
continue
for filename in os.listdir(a_path):
filename = os.path.join(a_path, filename)
if filename.endswith(".pth"):
if not os.access(filename, os.W_OK):
logger.warn("Cannot write .pth file %s, skipping", filename)
else:
fixup_pth_file(filename)
if filename.endswith(".egg-link"):
if not os.access(filename, os.W_OK):
logger.warn("Cannot write .egg-link file %s, skipping", filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
with open(filename) as f:
prev_lines = f.readlines()
for line in prev_lines:
line = line.strip()
if not line or line.startswith("#") or line.startswith("import ") or os.path.abspath(line) != line:
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug("Rewriting path {} as {} (in {})".format(line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info("No changes to .pth file %s", filename)
return
logger.notify("Making paths in .pth file %s relative", filename)
with open(filename, "w") as f:
f.write("\n".join(lines) + "\n")
def fixup_egg_link(filename):
with open(filename) as f:
link = f.readline().strip()
if os.path.abspath(link) != link:
logger.debug("Link in %s already relative", filename)
return
new_link = make_relative_path(filename, link)
logger.notify("Rewriting link {} in {} as {}".format(link, filename, new_link))
with open(filename, "w") as f:
f.write(new_link)
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
else:
dest_filename = None
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = [".."] * len(source_parts) + dest_parts
if not dest_is_directory and dest_filename is not None:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return "./"
return os.path.sep.join(full_parts)
FILE_PATH = __file__ if os.path.isabs(__file__) else os.path.join(os.getcwd(), __file__)
# Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=""):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = FILE_PATH
if filename.endswith(".pyc"):
filename = filename[:-1]
with codecs.open(filename, "r", encoding="utf-8") as f:
content = f.read()
py_exe = "python{}".format(python_version)
content = "#!/usr/bin/env {}\n# WARNING: This file is generated\n{}".format(py_exe, content)
# we build the string as two, to avoid replacing here, but yes further done
return content.replace("# EXTEND - " "bootstrap here", extra_text)
# EXTEND - bootstrap here
def convert(s):
b = base64.b64decode(s.encode("ascii"))
return zlib.decompress(b).decode("utf-8")
# file site.py
SITE_PY = convert(
"""
eJy1Pf1z2zaWv+uvQOnJiEplOnHaXNepc+MkTus7N/HG6WxuXZ+OkiCJNUWqBGlZ2+n+7fc+ABAg
Kdm+7Wk6qUQCDw8P7xsPcBAEJ6uVzKZimU+rVAol42KyEKu4XCgxywtRLpJiur+Ki3IDTyc38Vwq
UeZCbVSEraJe7+m/+Ok9FZ8XiTIowLe4KvNlXCaTOE03Ilmu8qKUUzGtiiSbiyRLyiROk39AizyL
xNN/HYPeWSZg5mkiC3ErCwVwlchn4mJTLvJMhNUK5/w8+jZ+MRgKNSmSVQkNCo0zUGQRl71Myimg
CS0rBaRMSrmvVnKSzJKJbbjOq3QqVmk8keJ//oenRk37/Z7Kl3K9kIUUGSADMCXAWiEe8DUpxCSf
ykiIN3IS4wD8vCZWj6ENcc0UkjHLRZpnc5hTJidSqbjYiHBclQSIUBbTHHBKAIMySdPeOi9u1ACW
lNZjDY9EzOzhT4bZA+aJ47c5B3D8mPV+zpK7IcMG7kFw5YLZppCz5E7ECBZ+yjs5GelnYTIT02Q2
Axpk5QCb9BgBJdJkfLCi5fher9DrA8LKcmUMY0hEmRvzS+oR9c5KEacK2LZaIY0UYf5OjpM4A2pk
tzAcQASS9rrGmSaqtOPQ7EQOAApcxxKkZKlEuIyTDJj1p3hCaP8tyab5Wg2IArBaSvxaqdKdf9hB
AGjtEGDYw8Uyq1llaXIj080AEPgM2BdSVWmJAjFNCjkp8yKRigAAahsh7wDpoYgLqUnInGnkdkj0
J5okGS4sChgKPL5EksySeVWQhIlZApwLXPH+4yfx7vTN2ckHzWMGGMvsfAk4AxRaaAcnGEAcVKo4
SHMQ6Kh3jv8T8XSKQjbH8QGvusHBvSvdC2Huq6jZx1lwILteXD0MzLEEZUJj9ajf79BlqBZAnz/u
We9e72QbVWji/G29yEEms3gpxSJm/kLO6H2v4byOVuXiFXCDQjglkErh4iCCCcIDkrg0C/NMihWw
WJpkctADCo2prb+KwAof8myf1rrBCQCh6GXw0nk2oBEzCRNtw3qF+sI03tDMdJOeXedlXpDiAP7P
JqSL0ji7IRwVMRR/G8t5kmWIEPJCr7/Xp4HVTQKcOI3EObUivWAaiT5rL26JIlEBLyHTAU/Ku3i5
SuWQxRd16241QoPJUpi1TpnjoGVJ6pVWrZ5qJ+8dRl8aXEdolotCAvBq7AndLM+HYgw6m7BZxUsW
r3KdE+f0OuSJOiFPUEvoi9+BoidKVUtpXyKvgGYhhurN8jTN10Cyo15PiD1sZIyyz5zwFt7BvwAX
/01lOVn0es5IFrAGhchvA4VAwCTITHO1RsLjNs3KTSWTZKwp8mIqCxrqYcQ+YMQf2Bjn2vuQl9qo
8XRxlfNlUqJKGmuTmbDFy/ol68dXPG+YBlhuRTQzTWs6LXF66WoRj6VxScZyhpKgF+mVXXYYs9cx
JtniUizZysA7IItM2IJ0KxZUOrNSkhMAMFj44ixZVSk1UshgIoaBliuCv4zRpOfaWQL2ZrPcQ4XE
5nsC9gdw+weI0XqRAH0mAAE0DGopWL5xUhboINT6qOcbfdOfxwdOPZtp28RDzuIk1VY+znpn9PC0
KEh8J3KFvYaaGApmmJXo2s0zoCOKeRAEPe3AiFyZb8BIvV5ZbI6AF4QZaDQaVwlavtEIbb3+oXo8
jHBG9rrZdhYeqAjT6QOQyekyK/IlvraTuwRtAmNhj96euCA1I9lD9ljxFdLP1ekr0xSNt8PPAhVR
7+LT6fuzL6eX4lhc1Tpt2FRo1zDmaRYDX5NJAK5qDFsrM2iJmi9B5Sfeg30nrgCDRr1JfmVcVsC8
gPrnoqLXMI2J97J3+uHkzfnp6OfL00+jy7PPp4AgGBrZ26Mpo32swGNUEUgHsOVURdrA9lo96MGb
k0v7oDdK1OjlN8Bv8CScyxK4uAhhxkMRLOM7BRwaDKnpAHFsNQATHgwG4rU4FE+fiheHBG+1WW0A
HFhepzE+HGmjPkqyWR4MqPGv7NUfs57SLtzV0TfX4vhYBL/Gt3HQAzeqbspM8RNJ1OfNSkLXEv4X
5mrQ6/WmcgbidyNRSMOn5BsPuAesCjTNta3+NU8y854Z0x2DdEhIPQCJ0WiSxkph49EoEETzImJP
GkUshBarjdtmoMfETyFhETPsMsR/OnCJx9QPx2Nc3C6mEcjmchIrya14otBxNEINNRqFekQQXRIP
cK9Y6fSFaYIaqkjAiyU2Q401VnmKP3EAlHmSNgykUCfieuhAKbqN00qq0JkVkCts0Au1aqKI98Af
CcHs1os0IPYxHAGvgGRpDmqyQGrVYPGzB/4MqCMTpGEgx7EVkwlx+w+OJGhYcCn7qM6VakBh7Scu
Ti/Ei2eH++iuQEw5tfTwmqMFTrJK2oczWCDD7owv9zLi4FJihnoUnx7thrmM7Fq0V39mVrWQy/xW
TgFd5E5nYcUnegOhN0xkEsPCgeImg88q0jiGMQaaPH0QBXQOgHxLgmKW3Kz2Hkf7MlOgaDhyJlrr
sJ5t3arIbxP0JsYb/RKMIShDNInGc+k5K+fxFdo8kH9wczMk1Vr2Qd0VFXukhDeCRHswrRVnRODO
URNf09ebLF9nIw51j1GphgPLrShMml+xQb0Ge+I9WBlAMofIrSYaQwGfXqA87QPyMH2YLlCWggcA
BJZcUTzmwDKxHE2Rw0IcFmEMXgkS2EKiQ3BrhqBYyhDDgURvI/vAaAaEBJOz2suqA81lKF+mGQzs
kMRnu/OIY0wfQIOKEZjFUEPjRoZ+V0egesW5q4icfj00uV++fGG+UQvKoCBmY5w1+gszsobRagOW
MwFFYHwuzscQH6zBfQUwldK8KfYvRb5ifwsW9EJLNxh6iN/KcnV0cLBeryOdP8iL+YGaHXz73cuX
3z1jPTidEgPBfBxx0cm06IDeoc8afW8MzGuzdA2GTDKfHQlWKMnvImcT8fuhSqa5ONofWJ2JbFzb
YfzX+CqgQkZm0B7TGagb1Cj9/sf+70fRiz+CCJvEZej2CAfsfmi7aS2Vb1qhR5mDwQf3ZJJXaJFr
hlDiaxgOIvGpHFfzwGLg2UHzAyaM4hpaVth/fo0Y+Axi2Mva2xFqC2IPtOvOCnxi9onJC9KaAqmM
1qkVJm+6tZmh8fThcm99Skd4zBQThUyCkuE3ebAg2sZtCcKPdmHR5jkurPl4FsFYeWPMp1PtQYYo
QIQMrsbQFT+Hu9G1Balas44GTePqSNDU4A2OmdwaHFnLPoLsCwoEKKznpdUtgFCNHKHJBJhlgBbu
OC271+QFxw1CJjgWz+mJBF/4qPXuGa9tlaaUumlwqUcVBuytNFrsHDgzNABANIqAG5pw5GNjUXgN
OmDlnOxBBpu1mAnfeP7fXtOHaS32rt5M4i4QuEYhJ50eBJ1QPuYRCgXStAr9nttY3NK2PdhOy0M8
ZVaJRFptEa6m9ugUqp0GapZkqIKdRYomaQ4esVWLxEf1e99poHgHH3caNS2Bmg41PZxWx+T3eQLY
1+0wUTavMKXgZlwQpWWiyMohnRbwD/gXlMmgtBMQk6BZMA+VMn9mf4LM2fnqL1v4w1IaOcJtus0f
adkMDQfdICSgzriAAGRAvLYckvfIiiAF5nKWxpNreC23CjbBiBSImGM8SMEA1vTSC2vwSYS7HCSc
CPmuVHIlvoYYFsxRg7UfprX/XAY1iYbQaUF+gs5QHLvZCydzcdzIZPi87OcwaENklQPzjsHrcbP5
LocbfrWJFXDYfZ1tkQJlTLmWYOCiem0o42aMvzp2WtTUMoMYXvIG8vZtzEiDnl1vDRpW3IDxFr05
ln7s5iSwbxjk6lAu7wI0LYma5CqgwLaZnXA/mi3atLHYnidjBOgtQDC49iDJVCcoMI/yfxnkfuje
ZDHvMY2LdZIFpLs0/Y79pWnhYUnpGbiDSwqpDmCimNY8eF+AhND+5gHIE+kCiM1lX2nXvw23OcfO
BvjxJh/YERlu4HnTV0cvrtuEGT4MtKXr6V1ZxArXL+VlZDHZBui6PTfSy0iEONvorUy91Y0hRZEr
CDnFx8svAinGWd51vLmXRN3o+kiCUruXIA2MiVUANqpEZJKDYDc3duL5Z+HWTeOd0B1V9rj+u9AC
2PvTTYYZrS6g160n0OHlN6OOlKSL7stvHkmOLnFsOGd24IHnuhUyTsnQO+8pn5fds4K242rAepZC
Mr3+bUK0eUWjafPQ3S3wY6BieluCMX82rMnYnjp+fI9nKzxtT3YAa0WU5rNHiwIBeD7+FaJYpRNg
t3GSUo4fiLG/j0rQROCcWugWXg/STvl2SNAVIqmrZ8gdHPIP2tPRXtOJyQd3RKrms4pVG5U9vaFf
70d5m/TuXuR25dBpxn0LfvAAjeCN3DVZbdlMr2e8sXDYobYegd2fgNd25vwTcHj2IBQeNRBD04pz
0LAJXVNxrYAB9gDVvMVF2eGgtIfmnbGZcTNY6JR4itL5VKxpd5wSfbhXAVCm7IB0wEE+1Husb6ui
4J1SEvKVLPZx828osDDIeBpUb9QGc/BBloiJbTahxKlTRpJ3MW6gU6F2JkHtwXbzziI3iROZ3SYF
9AWVEgY/fvzptMM+6GGw08P1pLeG2JVdWJwWruLDXS0fH7N9a9J6etQ2Xq1snokIu6dwT1Tv7bjx
nijFWpOFnNyMJG304jJjXye1+RZfIyp2/9cv11HxjGqOYCqTtEIasBuFxWKzKptQsruUYJJ1ZSdW
etD2LSdxZmk8FyF1nmL+QK8mpRhu40I7Gasix1pCUSXTg3kyFfK3Kk4xQJOzGeCCWxH6VcTDUxpB
vOMdaK4xU3JSFUm5ARrEKtc7ObRZ7TQcb3iioYck5+yZgrh9fSQucdr4ngk3NeQyYZ6fuMZJYgSF
HZyt5Yiew/ssH+GoIyQvMBQh1d5Zpce95gi5zozD/PUIzTeSXrnbRbTmLlFRy3ikdGPGnMw1QgkH
GLPyb/rpc6LLXFuwnG/Hcr4by3kTy3knlnMfy/luLF2ZwIWt8w9GFLpyEM3kdGd5hJs+4HFO48mC
22GtHtbkAUSxMiGQESouWfWSFLxbQ0BI7zn7h/SwLndIuAiwyDmTqUEi++NehA63TLGx05mKJ3Rn
norxeLaVfPh9D6KISnvG1J0FbhqXceQJxjzNxyC3Ft1hDWAomtUfnPfKbkdjztE1Vf3Ff33+8eMH
bI6g7HY1dcNFRLWNUwmfxsVctcWpDhhWwI/U0i+aoG4a4F5nlqTPWZL+UPQ5S9LXw+zxP+9oyx45
R6xpxzkXK7ChVIVjm7m1Kv1+47kuatHPmc15zwDcgqx0XIItVDq5uHh38vmE0zfBPwNXZAxxfflw
8TEtbIO2B+Q2tyTHPrWJrO2fOyeP2A5LmF4tT6/x4HBbYOjj+P86TyAoIBHpNNzjpvn4LIDV/a1C
KTfEZV/BvnMY3/EQHLnryt82B/A3jNBDHgH62oMJzVTqWKahcq1x20EPB+jjIo6m9+zTwQ7d4TU5
9LDNtvtM7TS1kuWb0x/OPpyfvbk4+fyj4zWh9/Px8uBQnP70RdD+OKp8diNi3BMusRQDVLF7lENM
c/ivwnB6WpWc+IJe787PdZp6icX8WN2JWjqC51zHYaFxZoIza/ahLsBAjFLtkzunJqhcgU5VoIu+
5Ip9lesKUDqMMUb/rtLevj4NY07N0IZeBAwPjV1SMAiusYFXVKdbmkCk4P0PfZKkAylt1eyOeEqZ
l9Y2qZP8N6llLz9FneFJ3VlrxqvAxTW4jtQqTUBXvgqsAOhuWBhQ841+aHf2GK8upeN0h5F1Q571
ViywXOJVwHPT/Qc1o/1WAYY1g72DeWeStsWpxBSre0QfG/V5+1vewVe79HoNFCwY7nKUuIiG6RKY
fQzxnFgk4HMDTy7AXKFrDRAaK+FnRI8c6yNz3JUO3i6n+38NNEH81r/80tG8LNL9v4sVBA6CayiC
DmK6jd9BrBDJSJx+fD8IGDmqRxR/rbAAGkw4ZZUcYafCDd45HIVKpjO9r+6rA3yhDSu97jX6F3JV
6P7d7mSAIvD7HyHZ2d//MAS01S52gCHOZ9CEjxXeFj88KeVuwprPnrhcyDTVZb1n785PwefConOU
I956OIUxOVDHbURdgsQnuRqgcJMRXhfIzAW6frTHPI28Zp35QBQ86u1tS9vVopxbu1crwVbEiXLR
DnHamjCmbDpCjoYlMasb4G8tpbYN0tltQ3RH0WHOGF0UVMjnswawND2NOZiAKANr2k12k3fSkqw0
hVtpMgF1CpoX9OoQZAWpiwe3iAHzjPOLeaHMeQ94uNoUyXxRYnoZOkdUa47Nfzr5cn72gcqvD1/U
7moHjw7JhR7yHvox1khhogC+uHVPyFejkcu6jVcIA5UQ/K/5ijfnj3mAVj9OaXn5bf2KD9wcOzEU
zwD0VLVqCgk6zk63LumphYFxtREkftwSqBozHwxlwbC+X+92u/Nrs6Jt2bAo5HqYl49Ijs9WtiJH
d3YqcpofPcXZCrcUpmF3I3jbJVvmM4auN60322p/3E9LCvHoH2DUbu2PYYpjWk31dBxe28YsFA5q
S/uLGxQ47SYZSjLQMHQ7D1we69bEujkzoFdN2gImvtfoGkHs1ueAovYzPEwssVsBgulItfpPVMgF
BFIfcajAk3qiSH8E4okIQ0dwhwPxVBx6s3Tswf2z1MoLbOSPoAl1MSLVt+cFsCJ8+Y0dSH5FiKEi
PRJ1OIjTy3JbJ4Of9QK9y+f+LDuFgPJ/KHdFnM1lyLCGBubXPrm3pC9J3XrEvkquu0yLOAMn9W4L
j7cloztnb1BrsEKr3Y3cNPWRTx5s0FkUv5NgPvgiXoPyX1VlyCu5ZZOv89DQdqj3Q8TqK2iq9z/D
AD3o37rK83zAndTQsNDT+23LlhOvjfVorYVsHOno2xfalZ0UEKKVqo9IOsewjFNq/YDa5B7XRj+w
T3Wm3v7uOA7jZFMsUB7cB8nPkFx8QIPPwiVU91sfH9DvpvJWpmAdwJqGWN39q63uHkQ2Y9FZ9PJQ
JC42F5tuFOhNjcCRqTLHcai+vDNlsmtcb2VrJPzHQfCLjjzi7Iaiy7d/OxuKtx8+wb9v5EcwlXiI
ayj+DmiJt3kBYSQfQ6Tz4ViwXnJ8mFcKT3oRNEri81F6dMsuPPLihoGupPdL6K0iFFg0iL443t8A
KNY4Mw3okG7tEZgacfhtDrM0/E7jE3ZRJ9AvnVECU+Wv2mX+KzU70D0OnC5XwfnZ29MPl6dReYf8
a34G104bNw72t6wkaX98VOC20VDYJ5MKn2gojqv8o0xXHZ6yjjbNiQGMNkUfQpGVjTD5toDYxhNx
gSkDsdpM80mELYH3+ZhSuQbXeeAElvfads+wIqxwoHecav8dHwN9RICnr7CJngI1pPHjMZ6O4cdR
sMXWDgVlfeF/T2/WUzdprI850IR6TdTqaYZ+/1rbLZiyGqLlIULt2BLeHnhLk1gtxxP3VNTHTOj7
HUCR0S6AnMVVWgqZQeREAT0dtAdt7h5kYjHhlWaTRad7KCWTruONcqo4YiUCHJV2XyVuV1BKD+Lt
n+Ib1vl4wkpUfLAToBOiFCTlTldVTRYsxhz3aEXb2tdeJ9kLt0BBE5kH5Th4ompqw0TRO2SU5rLU
BOAH4eDqeb2TTlnfiVecN1npGjz49vTp00D8+/2ODWMQpXl+Ax4XQOx0SM7p9RajrOdkF6mjLs68
ioAVJwt5BQ+uKSltn1cZZR93dKWFkPb/BgavSc2GpkPD5nJmruANWG7B+zjajvycJXR9C+aPJKpa
fQsO5paMJBEvghLox2qSJH3OQsA6bPIKTx5hLlEzirwDXk8QzBDf4sYRB9ILdB+p8s+yjUUH5kKA
Ay7L4eHovCadugFERxcbjefoLEvKulz+mburqI80l/ZaFs1QIl6jTJiJNKjhHGPzmLReTfi9izm9
kCSfXD1vlIg58+TX9yEPTA1Cls9mBlV4aJZpkstiYswprlkySUoHjGmHcLgzXUdDSfao14FSAMqc
bMDUCrN9+5VdGRfTj7TDum9G0oUppb3ih7M/cdYozoqienzKM1lCWs41X6i2/gOlr7Ur4A0mvtJc
jxXT3lUE7sGtKtNXDHBpQ33vAMCha22scrQc6SkK5y4lC5/5dsu9APihIJyRu02KsorTkT7JPkIX
bmT3nDWi9sDPzsNs1ksZohVM5vu6QBldh0F9sg0LGU0B+rGY6SxE5B6i8c+brHJ0AA/ZRwJPAkkz
HZkjoCZNdGUZ1h6q0xVoDlsgqK/NMY0tnrc5JdCqr3cx1xl/XTq1xX32C+LoLL54LcIXQ4i1G8fV
Vhu8nQaQfTLF6LzZDabiBP5mjs2D/uL1sQifD8W3O6BHWwY4OnRHaEasDQhbgby4fjQZ02RMZMPj
Cvc3NXVxQ43S4LpJ7TZVviei/KVBFD77pMs7nz9u7GAJCmCWyOn+E4WE0LjUOnY7m9KwePJXl86R
4eNDr6LPZXZ9Pi87hqhiXSs0R7UZb9eOYk7TajXEBw5RZ7ANtKelTUVIo6zNXoCFJW8avwaxAMCo
aw8Mv+FuKbzXtHB9rK1bo7pG2oLtCPn12TTe6aob1vJ2rzP3KEG+lwFwF7SWd1dAHsHpO/fbHV7+
8/mHa663bWO2EL2nIt7zJjoL3ztWdXfxuSePtuK8XnPz6U6nedzS1fVfkhoHSi0/4Hyaq/TIbTzC
cle+QyO3Za9LvKyO9pXRvKXSdYI4GOZbidArofifiqdgNcAvqma4DXxLd++dKCrywis2vjl8PjTn
GRmQnsCL6MXX+oo66mbObPs+81AHYfBS9zuM/s0BlpR+b/uqFZ/QtI4bV9X4g3HKaxCNLB1qI3N/
aXy9ZrHAO91qYrKrbK8veA+eyxh8FnM5Yw7eF0SWURe6rtqoRXibfnu8FPvaEAfdJSzbNOAu7fdo
Vta9us0AxvLIuYuY36BbVq2ObMLLGAbccAzd6kPiiZvOozVsFNBkljfBrulz/11z1y16duamlKA1
d/9km0sD3Xe7KeyiAecZNCEeVBNfK21rEbx5eTRy9fpQl7O0y7TdVjjPLXah1c+Ux2BSnbhRI2uY
cxlP3O9YMrXP97t2lDU559JchmhMt6Xrm5z+EDPPJSf43J5npjqiqeY8HbTQzV0jLuYcyflcjWK8
Nm5EsS5VUbVCGBM8vadLv2SsNibkwbtSAIRhI10i6paBA0eAkuPra7kKybkNQ9DQVN3qlMuqZMq6
XMdXAC7iNCX1N4lyrncNUon5ZlUVqwKC0kBfcsrlMV01uDVQk0JbxurGoG56DPWdl6QRqSbLnGbk
5GijtgaIwNa3pdNHI/sOuOZZfbY7GVpukFm1lEVc1nea+DvwCUQ+9Qh00BVX18mP1eLYYBMXscQy
h4MUpqrs968huvND2x2HuQdWzz2cn6iHzrs+5py4Y0YsHyd39jIz9z6dKd2a67p2tlnNd/YSF1gI
c00XqUedtuK3+uIgvr6SE+joHDocBLLvniPzC6atJmPf1KLhPHN1QvuKGDynTHa183q3Jg+073dr
xKF2fF9lWFZpOYdbGlsqao+6U+fUow2cVA5Wat6TyvEL2B+ZyvHgPyaVwydBihRveUjxzjAHOZ23
M8lFzLnQwRL/Dk7cSGzVJAEnjzea6edY00X5ulgLodN+kec3oxHzz9nMeLI8jjZIukLf3G0+pEbY
j3NySmeCP8dj3MxlXaWEnhBiiFu8qDwMKuKfB3obawS6lc4aQAfzle8URSjkMenRc1CKRTKd4j1p
rPO97BnypbcGGnl2I2jbBy+7o+L/y88nnz7/fOEM5G0oGSxHhg5he0MHVOZdUtYmtn2nErczIDrf
1Wve9K87mYYXkwqfHM//E4xgys382+bxNBLyyYDrfZfxxtxEJrO8mi+40ATm4kDzF814WNgK73ie
gddSmlq1jCbHZSqWkIwjz3k0pdy2sUrmMZkmeEV3BQa+o+l11TfRYt0AjYmuKe4b016R27JJIn6D
+5dKjqDnaJyAXgjwX/HfZ0D3kSF8sEsJbQUE/umRcEBs5wILAr9wpR7Sysnr6wX/eNkZSZ1k5g3v
M/OxSHJ4l3gdAt3aqzjy5fvtVSn46nXRLLfY44M6XFBdoM+d5SKico9iwiwSwjgRktg80JJGmQld
tliDw+vjP16KL2C89coMTAfcqqFb2s35ULyCN+PRJ3TIY91Ebh2zz8bX7Goo+i5ePyRk5dnBNbh/
MprwMVSjWEZgLedop9GWPWvS9wzr06ziWeNt/HjrJqg4o3MBo4aqihowsER+XuEhWbrGz/zxAkwn
3ObJlGvzUULNMHhrLCiQBhjaN3WvjqT61LWRM9LHiCDeYxdPN9YGyGkDULkoSLqb2g4V9hAry5pE
0MUd42qu3OKCRKlKfvvdN9/uLdX8+V++e/nipdfRTGfLZXnO+ajgn5ipCxp0bBQ6dm6k+vJjlpRE
SP/o3FntlKY26+AHFf66QF9RA2ye+9uKm4efB2IHgg6S4Xto9SEv36NXT/gOxYUs6EasPKMHW2qq
9hrH+fgvY6CupKiNrB3/RrTQhdgGRpeVdC49LPlfnnd2bFORzWFkLGfokcOJDTs8DyxkaxpccI28
rRlrmtly2mIuyra5CwZ2nGqXjBd3xCUPtvrEnJvR/jVeylnIfftHW/Q16BYcdQZ/al7ES6WVGqcY
+OSLfw0wOmLkFVeryCZ7GHIdiuPdjYhiX5l7QCMX+6af0bH/j+ferWbUhROAhKEJIzNqXNzlgrR9
fZj/SFb6pr1Hg7Rd7Q3ToAj0sujwtvOY5j3bmCaJ07FH18kH7Hm6lz7X0dAIm20UcZt/f7AZxT0i
ucpVchfYv9fAGQPnsBxGY+2rQKkrX+qsnAyTd/2qO6fmHqYfWNLjH84/vjk5J4qNLk7e/ufJD3QC
AGW7kal58CZvlu/zmux75wTdDV9dtt01eI1tx7XrfNRcQ2i9b9W7dkDovlagaxmbQbz7eluH1uns
didAfPe0G1C3pSs6Ibeykfq6NLdUu3E2sKef8kEu88spgjWPTKWYO1ZjMxqHfOEckGpHnixBdRWZ
AV7X/+jQtlUbsW3VneHaUTi92zP3dm8p3BjYU4G0fBgPInPaqh9zJtpWlnBqr/mXt8hPwxslzH13
IMET6Vy9TLcuM6jS/xNfRZVhfBrrP6Y1tH8ug9pxhZOyfwcGSxUnOhxqXenQnp+bLJnKdAsVQLGy
UjV3CjMqRsnqekBbQftEiat9un5lH1XVtf2Fq6Yzw39LsE62tFdzKq6lZ6MHjWdV6ta+2j6tDpQw
pYqqfOacdwZ9eQCUrqVbgXRgrpGt4Xgj+k9UX6cz8SQXUVLfq+sgj+6Mg72h1jOxv+1yEfdyDSGe
b284bdzfoXsccg91Tw9VmRscHJuIJ4q23RoiXhNkLsoTdCOnl7bDGlC91wVfb6+eH9nqGOR4fO3o
ITr4EDgpsSvHo955AbTTHb/+/lXxx9CeOMR9m0FzlOugcfRw+/ZA6yj1li0EU57LkALvffdusenh
/VWcoNfE1XLgEc1MhL//MbCzc06O6ynYJ4P2tGsttg0Un4V3QdHh+Taoll4UTwrcamw+d843bfiM
YvjMnm6kS/XdoqnAkRNy23we4R4mj1rPtNmdLpB5UHdG0fTnBk2e1MsP4s7OCLcaeIme7WaiOfWa
J7b4FHwXT3f/5w/o3z6DY7sf7koP2VYvOu+PYM8YD25hcXuTS83zCCwPqNKQFPgTEVrZxxvKakK6
DFVPDhmDAiVyzTGUonQ9edEj7VRYQ9H7X+4V6Ls=
"""
)
# file activate.sh
ACTIVATE_SH = convert(
"""
eJytVV1v2kAQfPevWAxKk7QU0cdWRCUKEkgJRJhStU3lHPYSn2rO6O5MQj7+e/dsY2wc6EPDA2Dv
3N3szuxeHSYBVzDnIcIiVhpmCLFCH+65DsBWUSw9hBkXLeZpvmIabTidy2gBM6aCU6sO6ygGjwkR
aZCxAK7B5xI9Ha4ty8fNKjg+gScL6BMLhRqac1iu/ciDs5aPq5aIwxA+nR21rQRTB4kGFYU+oFhx
GYkFCg0rJjmbhagyVA1+QfMRGk/T7vi9+wK/aZ2OpVCgZYzA50ABoPx89EImKS2mgYVhspyi2Xq7
8eSOLi/c6WA8+da9dK+7kz5tZ9N+X0AHKBK8+ZhIx25U0HaOwIdlJHUCzN+lKVcWJfE5/xeZH5P+
aNgfXfX2UMrjFWJ5pEovDx0kWUYR1azuiWdUEMWkj4+a1E7sAEz48KiCD3AfcC+AgK0QGP1QyIsW
CxPWAUlgnJZtRX7zSBGSRkdwRwzIQPRvHknzsGRkyWyp+gjwnVwZxToLay7usm1KQFMgaJgSgxcw
cYcK7snezDdfazBWpWPJYktijv5GACq/MOU/7zr9ZlLq5+f85U+n7057Y2cwGjZfkyFJsinJxLmh
S0U7ILDT3qOs065I6rSrWjrtgyJm4Q2RFLKJ9obTbfo1w61t0uuALSLho6I+Mh2MO/Tq4GA4hw2g
tkOgaUKb1t+c/mLRtEjjXEoMccVKLV0YFuWzLavAtmO7buHRdW0rq0MxJavSbFTJtFGzhwK65brn
g6E77F71XPdzBiv2cc572xCmYPTGKsl6qFX3NJahtdOmu0dZRrnUnskpxewvBk73/LLnXo9HV9eT
ijF3jdAxJB2j8FZ0+2Fb0HQbqinUOvCwx5FVeGlTDBWWFxzf0nBAwRYIN6XC39i3J1BanE3DgrNN
8nW4Yn8QVCzRzIZYsJAzlV0glATX7xSNdYnMXxvCEq0iotCSxevm6GhnJ+p2c21YVvqY31jLNQ0d
Ac1FhrMbX+3UzW8yB99gBv7n/Puf2ffa3CPN/gKu/HeT
"""
)
# file activate.fish
ACTIVATE_FISH = convert(
"""
eJytVm1v2zYQ/q5fcZUdyClqGVuHfQgwDGnjIQYSO3DcAMM6yLREWxxo0iMpty7243ekLImy5RQY
lg+RJT73fvfwerDImYY14xS2hTawolBomuE/Jjaw1LJQKYUVEyOSGrYnhsZrpvMlvP3CTM4EEFCF
EBZsv8MAcmN2N6ORfdM55TxO5RauQVOtmRRv46AHdxKENFYQmIGMKZoafoiDYF0ItCIFJCuic7Y+
JDtichhmEH6UYk+V0WjQGXIHRoKWW2od2YAVgZQIQHVyI9g3GgaAf5oaJ3JU1idqs68PrFB10ID+
+OFPh1hL5QzhR2UAo/UxP8bx8Ijr0Bb2m5ebfq2kdImKrHymuQQPGNgDLwvW2qUsuHDPs+CS05GF
0pSNHf4BoyC6iSD6LKITkxmt6mztReOvWKA9U6YgnIo9bGVGgYgMtZtCCWva5BSrqbaEY1gIlWJL
hYkjZ7WHQJsYyTP/FPZEMbLiVDsUW4Oh2HxDgWlLZg93yctkvvh0+5A83S7uwzrFPddcGrtrg81X
rGxruUYbuk7zfzKtC6pHP73/GQg3VGFLW12Qo/Mc81TrrGwPygT9Nnm+T17G8+fJbFomKoxDCD+L
8BqbAobcwPtatir7cPO11D5oV+w8lutalnJNLys6l2wEj71Ty1DoBrvCfie9vy/uZ9P72eM4DM78
qM9OvakPXvejDXvFG5fzp/ns8WmRzDD388nd2C/6M2rHhqbbnTkAlyl22tINYlK1rUv30nYj4Vx+
cT2p6FbuESrXsHTgnZKoYVlRWyWr0fNl3A6Fw7n6wPNorIim3lxE+sRGOSLaSEWdM1KxDROEN3Z8
8+DJdgFSSHCAEg/1PQl6JtFZq67Mt6t1RFdFHU9f2lUMHaXgaITw5heIhBQZflaFJREatYrI18Pq
7E23z7tDJtPuo4aXLoTrXxgXIP5s1lG6SHvwSdhImVKU0z3xGSoOPE5sxxcE1bB4+YEwSbzXJAmt
/v+PuP4jYVWennEFklbrsu2XPFXz02VBh3QJbHFX2PfCHyXJh8k0md4+jjETR5E638t+wxJL21P4
MQ7KJwz/hhMO6XbF46kuPPW1tC+7pt92B5Pjh+G2/HZcEhy65qtv7ciSu8nz7YeH8XF+wuN991Hu
Dm7k0wKbCRupTQy1bYKUcTqjRxpqTb4/9Gcz3YJ3cgIOHtnTlkN9bYgp9Du33XgyGlHErmN6x8kB
N7MzUrTmS+FKiU+KT6WTEhcUxXBNQK17fGa/epjJ2m5+7+Avu2vuFN1hip1z/nIgyJY2tv37opms
I2klzT3hyqiYMGuIrvSVjjrhMMBYklRyjL3cWl65kht1gyt9DVGHMAxweKj1uN0doae24tIyBfOO
a6FOZy1jZzukdvvqN1kPccDLjbwGdtJ8m72rgeki+xOnXcf/CzFcuJM=
"""
)
# file activate.csh
ACTIVATE_CSH = convert(
"""
eJx9k9tq20AQhu/3Kf7IJm5N4vRarts6caCBxAnBCZSmLCtpXC1IK2e1svFNn72zklzkA9WFkOb0
z34708Mi1SWWOiPkVekQEaqSEmy0SxGURWVjQqTNlYqdXitHo7hMAwyXtsjBn8OR6OFHUSFWxhQO
tjLQDom2FLts6703ljgvQbTFTK11QphpXGeq1Pic1IYk+vY7VzobxUX+ZSRESQ6GNpk2NBm8iYEQ
KtOqREK7LjBwxN32v8rH+5l8vXtevEzv5dN08R1nE3zC+Tm4CJk1alvQP4oL3wMfVRkvduQdw1Kq
ynSMkzrPjw9Pi64SVsxj5SaHQnXgf6Rq/7hx+W53jtv5aysdvJ2Fw8BrBaYwCZts5SFQW/OITMe6
2iZFzPR6eKm1tbWU0VoZh7WyWkUZlSPRyd1XqC/ioCsEUnZ+pQya6zoiyChazGL/JjrZ4fuVlNd3
czmfPtxKGf7L4Ecv8aGj1ZBiuZpE8BEuJSPAj1fn8tKonDDBqRxBWUkng/e6cV6aTKKXHtlNUWWJ
3wdtoDyZS20c2ZoV+SLaFiYn4y44mGM2qY5TXoOSLtBvxgG8WhUTXfIgJ1CG14qw8XXNwHFWrCxB
RUXl/HHaGeK47Ubx5ngCPHmt9eDEJ8aIiTex/hh1cseAyR8Mg367VWwYdiuG+4RaSebzs7+jFb7/
Qqd+g6mF1Uz2LnK3rfX08dulhcFl3vwL0SyW+At+C2qe
"""
)
# file activate.xsh
ACTIVATE_XSH = convert(
"""
eJyFU11rwjAUfc+vuIt9sMz1Bwg+OCYoaB2bkw2RkNlbLdSkJLFsjP33JdV+aN2Wh5L7eW7PuaGU
vkqhd8A3Jsm5QdAblWQGYqkgT5Q58BRFTiklsZJ7+HDJgZEy1ZDsM6kMbNEwjRlwDex0JyTCGFiE
ZdcuV1vt9wnYk8RAs89IbigkAniacI36GHInwrR0rk55a1IWel9BEHwHFqZL2Xz6wJaTp8XLcMoe
h4sx7QGlft3Jc04YgNfKPAO7Ev4f7m0xnofj+WzUBq1Cbegq9NcAdVJFVxkbhcuCtONc55x5jaS6
UkgRoTbq4IRACkKagnUrR13egWdMYygTb65rUavpBCEdOAiNtptSmGLOhYGcq4S/p6hJU/rV5RBr
n1xtavlq1BHS/CMbU5SxhocxalNa2jnSCw29prXqr4+OgEdR96zxbbW1Xd8aFuR+ErJwOBtZhB7Y
rRdmsFAH7IHCLOUbLCyfkIsFub4TJU2NtbB11lNEf5O+mPt8WwqNm8tx+UhsjbubnRRugLu9+5YP
6AcvDiI9
"""
)
# file activate.bat
ACTIVATE_BAT = convert(
"""
eJx9Ul9LhEAQfxf8DoOclI/dYyFkaCmcq4gZQTBUrincuZFbff12T133TM+nnd35/Zvxlr7XDFhV
mUZHOVhFlOWP3g4DUriIWoVomYZpNBWUtGpaWgImO191pFkSpzlcmgaI70jVX7n2Qp8tuByg+46O
CMHbMq64T+nmlJt082D1T44muCDk2prgEHF4mdI9RaS/QwSt3zSyIAaftRccvqVTBziD1x/WlPD5
xd729NDBb8Nr4DU9QNMKsJeH9pkhPedhQsIkDuCDCa6A+NF9IevVFAohkqizdHetg/tkWvPoftWJ
MCqnOxv7/x7Np6yv9P2Ker5dmX8yNyCkkWnbZy3N5LarczlqL8htx2EM9rQ/2H5BvIsIEi8OEG8U
+g8CsNTr
"""
)
# file deactivate.bat
DEACTIVATE_BAT = convert(
"""
eJyFkN0KgkAUhO8X9h0GQapXCIQEDQX/EBO6kso1F9KN3Or1201Si6JzN+fMGT5mxQ61gKgqSijp
mETup9nGDgo3yi29S90QjmhnEteOYb6AFNjdBC9xvoj9iTUd7lzWkDVrwFuYiZ15JiW8QiskSlbx
lpUo4sApXtlJGodJhqNQWW7k+Ou831ACNZrC6BeW+eXPNEbfl7OiXr6H/oHZZl4ceXHoToG0nuIM
pk+k4fAba/wd0Pr4P2CqyLeOlJ4iKfkJo6v/iaH9YzfPMEoeMG2RUA==
"""
)
# file activate.ps1
ACTIVATE_PS = convert(
"""
eJyNVMtu2zAQvOsrNrLQ2miloFcXPdiwgRhwHCN2c2kLgqZWMQGKFEhKqVHky3LoJ/UXSlqWX3LS
8ibtzHJ2d5Z/Xn53YLnmBjIuEPLSWFghpMqCUaVmmEKmVQ5ztVh/ho0qgVEpXVSXEriFlGtkVmwS
GCmwLk8fEkiuKbO8ohaTwnwKgsgwzQvbX95MFmQ+WN7AF4jyDZeVYtRyJZN8w1SeU5kmBbXrPWE4
WIzJaHLv8KYQ3MY+Cl2NRokK668w2qe9TpKwB/GcapQ2CLJSMp8dHoVaUdFPsZHV/WaeuGXrHxDN
lByhsbr0IewFvwJwh2fQte53fUVFNacrgX1yNx2Rh8n98utgur2xt0XXHH8ilFW/qfB12h6vMVeu
kAYJYQsaQmyYKnBXxJb5HFwQ2VTbJ0qkpOLallSQwg2vsC2Ze3Ad92rf4p/r5Rbzw4XfX2Mc6dw2
pqlrPHtoKfIpHOZ00ucsiAXS7KKaFhK1VprWBjDO29K5lClpuSzxXN1Vywan6jqwQJFBukNcvd2P
g8/exhWbVLGdlOe2XetwLaLY2LWLxDls/0JE9aPxpA6U0qAFrjUKrKi0e7ea4CAEYqlkeijQ7eRx
s9z4m1ULWj13waNPx9zpa1nVIxv/B8ebEJ7nvCZkOJmR2eB2TNzxMLIYzwkJ4cNRjno0Z1wncjEY
Tsdkfn93O182G3vevdc8eRhqGO56f7oRF4gn63GUqzWxS9d0YJCmQKHQmPGfYP0zicBK7R8pqCkf
YVW6t1TJ9/5FNYzq1D2uyT7Hk3bOidfKPc5hN+r+e0Wg14NwO3R8ElwejPjuPxbdu/EvkRDrCw==
"""
)
# file distutils-init.py
DISTUTILS_INIT = convert(
"""
eJytV21v5DQQ/p5fMaRCJLANcAcSqlghuBdUcRzo6BdUnSI3cXZNs3bO9m679+uZsbOJnWR7fKBS
u65nPC/PvK7YdUpbUCYR/mSOw/GBaSnkxiTJBaiuUjUHYUAqCwwOQts9a7k8wE7V+5avwCh44FAx
CXuDnBasgkbIGuyWg7F1K+5Q0LWTzaT9DG7wgdL3oCR0x+64QkaUv9sbC3ccdXjBeMssaG5EzQ0I
SeJQDkq77I52q+TXyCcawevLx+JYfIRaaF5ZpY8nP7ztSYIEyXYc1uhu0TG7LfobIhm7t6I1Jd0H
HP8oIbMJe+YFFmXZiJaXZb6CdBCQ5olohudS6R0dslhBDuuZEdnszSA/v0oAf07xKOiQpTcIaxCG
QQN0rLpnG0TQwucGWNdxpg1FA1H1+IEhHFpVMSsQfWb85dFYvhsF/YS+8NZwr710lpdlIaTh2mbf
rGDqFFxgdnxgV/D6h2ffukcIBUotDlwbVFQK2Sj4EbLnK/iud8px+TjhRzLcac7acvRpTdSiVawu
fVpkaTk6PzKmK3irJJ/atoIsRRL9kpw/f/u1fHn97tWLmz/e/Z3nTunoaWwSfmCuFTtWbYXkmFUD
z9NJMzUgLdF9YRHA7pjmgxByiWvv31RV8Zfa64q/xix449jOOz0JxejH2QB8HwQg8NgeO26SiDIL
heMpfndxuMFz5p0oKI1H1TGgi6CSwFiX6XgVgUEsBd2WjVa70msKFa56CPOnbZ5I9EnkZZL0jP5M
o1LwR9Tb51ssMfdmX8AL1R1d9Wje8gP2NSw7q8Xd3iKMxGL1cUShLDU/CBeKEo2KZRYh1efkY8U7
Cz+fJL7SWulRWseM6WvzFOBFqQMxScjhoFX0EaGLFSVKpWQjNuSXMEi4MvcCa3Jw4Y4ZbtAWuUl6
095iBAKrRga0Aw80OjAhqy3c7UVbl/zRwlgZUCtu5BcW7qV6gC3+YpPacOvwxFCZoJc7OVuaFQ84
U9SDgUuaMVuma2rGvoMRC3Y8rfb92HG6ee1qoNO8EY8YuL4mupbZBnst9eIUhT5/lnonYoyKSu12
TNbF6EGP2niBDVThcbjwyVG1GJ+RK4tYguqreUODkrXiIy9VRy3ZZIa3zbRC0W68LRAZzfQRQ4xt
HScmNbyY01XSjHUNt+8jNt6iSMw3aXAgVzybPVkFAc3/m4rZHRZvK+xpuhne5ZOKnz0YB0zUUClm
LrV9ILGjvsEUSfO48COQi2VYkyfCvBjc4Z++GXgB09sgQ9YQ5MJFoIVOfVaaqyQha2lHKn3huYFP
KBJb8VIYX/doeTHjSnBr8YkT34eZ07hCWMOimh6LPrMQar8cYTF0yojHdIw37nPavenXpxRHWABc
s0kXJujs0eKbKdcs4qdgR4yh1Y5dGCJlMdNoC5Y5NgvcbXD9adGIzAEzLy/iKbiszYPA/Wtm8UIJ
OEGYljt14Bk9z5OYROuXrLMF8zW3ey09W+JX0E+EHPFZSIMwvcYWHucYNtXSb8u4AtCAHRiLmNRn
1UCevMyoabqBiRt3tcYS9fFZUw/q4UEc/eW8N/X3Tn1YyyEec3NjpSeVWMXJOTNx5tWqcsNwLu5E
TM5hEMJTTuGZyMPGdQ5N+r7zBJpInqNJjbjGkUbUs+iGTEAt63+Ee2ZVbNMnwacF6yz4AXEZ/Ama
5RTNk7yefGB+5ESiAtoi/AE9+5LpjemBdfj0Ehf09Lzht5qzCwT9oL00zZZaWjzEWjfEwoU9mMiD
UbThVzZ34U7fXP+C315S91UcO9rAFLen4fr29OA9WnOyC1c8Zu5xNaLeyNo2WNvPmkCtc2ICqidc
zmg+LaPu/BXc9srfx9pJbJiSw5NZkgXxWMiyBWpyNjdmeRbmzb+31cHS
"""
)
# file distutils.cfg
DISTUTILS_CFG = convert(
"""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
"""
)
# file activate_this.py
ACTIVATE_THIS = convert(
"""
eJylVE1v2zAMvetXENqhNpZ5wHoL0EMOBdqh64Kt3RAEhaE4TMzOlgxJ+ULR/z7Sdpr041BsOUSS
9fj0SD5Jaz0qIq1NRFiTjytToV3DwnkoVt6jjUA2om888v9QqduAgFssEtegTWJJIV9QhWnm0cyT
dAAPJ3n7Jc9PhvC0/5hmSt3wCgpjYYawCjiHTYkWdm4F9SpE+QS8iVsKkewSxrtYOnt8/gCsi0z6
TOuM7OemhWZKa62obpyP4MJ+Fiji03wXlIp+N1TAv71ShdsCmwjXpsZz753vtr0hljQKAX0kZ9ud
RE+O9f5TKVKdKvUBOCcOnEsCEB2MRzcX0NKAwIBHsoHm2CYsoDl5LKLzu1TxMuclnHGeWWNimfHK
svxkvzazIGOyl5Cmire4YOSdnWo5Td8d4gM22b0jm0x76jv4CIeAbIkx6YIGoHWahaaimByCmV7N
DFfktaKesM257xtI4zhBT8sygpm59YsMn2n9dfnj5nZ0lZ9f/2olyzlCZubzYzNAH1Cza0Pb9U+N
Kf6YJUp5BVg6blvT26ozRI1FaSyFWl3+zMeT8YT5SxNMjD5hs3Cyza7Z5Wv0gS2Qk1047h5jv05u
Lr5fM5pRWVOZyHemzkI0PoYNceH1vVkbxtICnuCdr0Ra3ksLRwVr6y/J8alXNJNKH2cRmAyrjk6U
vp/sNUvALpqpfl++zALOzkBvyJ5+0S2oO5JxXcx/piDhBwHvJas6sq55D486E6EmSo+yvjnT4eld
+saBii/aWlLEDi7cqRJUxg6SkW2XPBPB2wzke1zlHLyg7b5C7UIdpkdu/CYmFpcxKb9tTFeHvfEW
bEt+khbtQs4f8N0GrneByuKGWSp+9I7V9bPpUAw/pfZFJgkSODeE2qdQSDg5uatvYvb76i9zKfxE
"""
)
# file python-config
PYTHON_CONFIG = convert(
"""
eJyNVV1P2zAUfc+v8ODBiSABxlulTipbO6p1LWqBgVhlhcZpPYUkctzSivHfd6+dpGloGH2Ja/ue
e+65Hz78xNhtf3x90xmw7vCWsRPGLvpDNuz87MKfdKMWSWxZ4ilNpCLZJiuWc66SVFUOZkkcirll
rfxIBAzOMtImDzSVPBRrekwoX/OZu/0r4lm0DHiG60g86u8sjPw5rCyy86NRkB8QuuBRSqfAKESn
3orLTCQxE3GYkC9tYp8fk89OSwNsmXgizrhUtnumeSgeo5GbLUMk49Rv+2nK48Cm/qMwfp333J2/
dVcAGE0CIQHBsgIeEr4Wij0LtWDLzJ9ze5YEvH2WI6CHTAVcSu9ZCsXtgxu81CIvp6/k4eXsdfo7
PvDCRD75yi41QitfzlcPp1OI7i/1/iQitqnr0iMgQ+A6wa+IKwwdxyk9IiXNAzgquTFU8NIxAVjM
osm1Zz526e+shQ4hKRVci69nPC3Kw4NQEmkQ65E7OodxorSvxjvpBjQHDmWFIQ1mlmzlS5vedseT
/mgIEsMJ7Lxz2bLAF9M5xeLEhdbHxpWOw0GdkJApMVBRF1y+a0z3c9WZPAXGFcFrJgCIB+024uad
0CrzmEoRa3Ub4swNIHPGf7QDV+2uj2OiFWsChgCwjKqN6rp5izpbH6Wc1O1TclQTP/XVwi6anTr1
1sbubjZLI1+VptPSdCfwnFBrB1jvebrTA9uUhU2/9gad7xPqeFkaQcnnLbCViZK8d7R1kxzFrIJV
8EaLYmKYpvGVkig+3C5HCXbM1jGCGekiM2pRCVPyRyXYdPf6kcbWEQ36F5V4Gq9N7icNNw+JHwRE
LTgxRXACpvnQv/PuT0xCCAywY/K4hE6Now2qDwaSE5FB+1agsoUveYDepS83qFcF1NufvULD3fTl
g6Hgf7WBt6lzMeiyyWVn3P1WVbwaczHmTzE9A5SyItTVgFYyvs/L/fXlaNgbw8v3azT+0eikVlWD
/vBHbzQumP23uBCjsYdrL9OWARwxs/nuLOzeXbPJTa/Xv6sUmQir5pC1YRLz3eA+CD8Z0XpcW8v9
MZWF36ryyXXf3yBIz6nzqz8Muyz0m5Qj7OexfYo/Ph3LqvkHUg7AuA==
"""
)
MH_MAGIC = 0xFEEDFACE
MH_CIGAM = 0xCEFAEDFE
MH_MAGIC_64 = 0xFEEDFACF
MH_CIGAM_64 = 0xCFFAEDFE
FAT_MAGIC = 0xCAFEBABE
BIG_ENDIAN = ">"
LITTLE_ENDIAN = "<"
LC_LOAD_DYLIB = 0xC
maxint = MAJOR == 3 and getattr(sys, "maxsize") or getattr(sys, "maxint")
class FileView(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, file_obj, start=0, size=maxint):
if isinstance(file_obj, FileView):
self._file_obj = file_obj._file_obj
else:
self._file_obj = file_obj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return "<fileview [{:d}, {:d}] {!r}>".format(self._start, self._end, self._file_obj)
def tell(self):
return self._pos
def _checkwindow(self, seek_to, op):
if not (self._start <= seek_to <= self._end):
raise IOError(
"{} to offset {:d} is outside window [{:d}, {:d}]".format(op, seek_to, self._start, self._end)
)
def seek(self, offset, whence=0):
seek_to = offset
if whence == os.SEEK_SET:
seek_to += self._start
elif whence == os.SEEK_CUR:
seek_to += self._start + self._pos
elif whence == os.SEEK_END:
seek_to += self._end
else:
raise IOError("Invalid whence argument to seek: {!r}".format(whence))
self._checkwindow(seek_to, "seek")
self._file_obj.seek(seek_to)
self._pos = seek_to - self._start
def write(self, content):
here = self._start + self._pos
self._checkwindow(here, "write")
self._checkwindow(here + len(content), "write")
self._file_obj.seek(here, os.SEEK_SET)
self._file_obj.write(content)
self._pos += len(content)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, "read")
size = min(size, self._end - here)
self._file_obj.seek(here, os.SEEK_SET)
read_bytes = self._file_obj.read(size)
self._pos += len(read_bytes)
return read_bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + "L" * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(at_path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cpu_type, cpu_sub_type, file_type, n_commands, size_of_commands, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by n commands
for _ in range(n_commands):
where = file.tell()
# Read command header
cmd, cmd_size = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmd_size - name_offset).decode()
load = load[: load.index("\0")]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + "\0".encode())
# Seek to the next command
file.seek(where + cmd_size, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = FileView(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
n_fat_arch = read_data(file, BIG_ENDIAN)
for _ in range(n_fat_arch):
# Read arch header
cpu_type, cpu_sub_type, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert len(what) >= len(value)
with open(at_path, "r+b") as f:
do_file(f)
if __name__ == "__main__":
main()
| [
"sanaawan@ittc.ku.edu"
] | sanaawan@ittc.ku.edu |
a9c0c46edad6ba923dfb0354bc47459129d856c1 | 6a7563ad479e2c3d497d62e91f418d245ec658df | /scratch/indexoper.py | 0650e8b9aa3b7800c20f1bb25cf51fcdc44417d2 | [] | no_license | rosoba/rosoba | 979901ab4858c1559e7ae9c214fb60ca71eec9b5 | b26ae5b6b0f9b7027f306af7da9d1aff1c3e2a46 | refs/heads/master | 2021-01-19T18:36:04.107879 | 2016-01-20T09:48:48 | 2016-01-20T09:48:48 | 4,391,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | '''
Created on Nov 25, 2013
@author: rch
'''
import numpy as np
L = np.array([[0, 1],
[1, 2],
[2, 3]], dtype='i')
X = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0]], dtype='f')
u_i, u_j = X[L.T]
print u_j
| [
"rostislav.chudoba@rwth-aachen.de"
] | rostislav.chudoba@rwth-aachen.de |
053ad40b16939d38bd80d77c8b759c34b030b3ee | 4ab16447a03a85c3fdc4a016f6fa481756eeeb70 | /src/python/test/yolov3debugloss.py | 279d4b8c3f09e71aedfc600ee8acaf5578b620e9 | [] | no_license | phildue/cnn_gate_detection | 3cd4ae9efde53dbef1aa41b9f7ba5e2875dc80a7 | 9f872b18595e8cd8389d0d1733ee745c017deb3b | refs/heads/master | 2021-03-27T19:34:04.169369 | 2018-12-19T09:32:43 | 2018-12-19T09:32:43 | 112,591,501 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,141 | py | import pprint as pp
from pathlib import Path
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN, ReduceLROnPlateau, CSVLogger, \
History
from keras.optimizers import Adam
from modelzoo.GateNetDecoder import GateNetDecoder
from modelzoo.GateNetEncoder import Encoder
from modelzoo.Preprocessor import Preprocessor
from modelzoo.build_model import build_detector
from modelzoo.metrics.AveragePrecisionGateNet import AveragePrecisionGateNet
from modelzoo.metrics.GateDetectionLoss import GateDetectionLoss
from utils.fileaccess.GateGenerator import GateGenerator
from utils.fileaccess.utils import create_dirs, save_file
from utils.imageprocessing.transform.RandomEnsemble import RandomEnsemble
from utils.imageprocessing.transform.TransformResize import TransformResize
from utils.labels.ImgLabel import ImgLabel
from utils.workdir import cd_work
cd_work()
img_res = 416, 416
anchors = np.array([
[[81, 82],
[135, 169],
[344, 319]],
[[10, 14],
[23, 27],
[37, 58]],
])
architecture = [
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 4, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 8, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 24, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 32, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 64, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'predict'},
{'name': 'route', 'index': [3]},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 64, 'strides': (1, 1), 'alpha': 0.1},
# {'name': 'upsample', 'size': 2},
# {'name': 'route', 'index': [-1, 8]},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'predict'}
]
"""
Model
"""
model, output_grids = build_detector(img_shape=(img_res[0], img_res[1], 3), architecture=architecture, anchors=anchors,
n_polygon=4)
encoder = Encoder(anchor_dims=anchors, img_norm=img_res, grids=output_grids, n_polygon=4, iou_min=0.4)
decoder = GateNetDecoder(anchor_dims=anchors, norm=img_res, grid=output_grids, n_polygon=4)
preprocessor = Preprocessor(preprocessing=[TransformResize(img_res)], encoder=encoder, n_classes=1, img_shape=img_res, color_format='bgr')
loss = GateDetectionLoss()
"""
Datasets
"""
image_source = ['resource/ext/samples/daylight_course1',
'resource/ext/samples/daylight_course5',
'resource/ext/samples/daylight_course3',
'resource/ext/samples/iros2018_course1',
'resource/ext/samples/iros2018_course5',
'resource/ext/samples/iros2018_flights',
'resource/ext/samples/basement_course3',
'resource/ext/samples/basement_course1',
'resource/ext/samples/iros2018_course3_test',
'resource/ext/samples/various_environments20k',
# 'resource/ext/samples/realbg20k'
]
batch_size = 16
n_samples = None
subsets = None
min_obj_size = 0.001
max_obj_size = 2
min_aspect_ratio = 0.3
max_aspect_ratio = 3.0
def filter(label):
objs_in_size = [obj for obj in label.objects if
min_obj_size < (obj.poly.height * obj.poly.width) / (img_res[0] * img_res[1]) < max_obj_size]
objs_within_angle = [obj for obj in objs_in_size if
min_aspect_ratio < obj.poly.height / obj.poly.width < max_aspect_ratio]
objs_in_view = []
for obj in objs_within_angle:
mat = obj.poly.points
if (len(mat[(mat[:, 0] < 0) | (mat[:, 0] > img_res[1])]) +
len(mat[(mat[:, 1] < 0) | (mat[:, 1] > img_res[0])])) > 2:
continue
objs_in_view.append(obj)
return ImgLabel(objs_in_view)
valid_frac = 0.005
train_gen = GateGenerator(image_source, batch_size=batch_size, valid_frac=valid_frac,
color_format='bgr', label_format='xml', n_samples=n_samples,
remove_filtered=False, max_empty=0, filter=filter, subsets=subsets)
"""
Paths
"""
work_dir = 'testv3'
model_dir = 'out/' + work_dir + '/'
create_dirs([model_dir])
"""
Training Config
"""
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.005)
def average_precision06(y_true, y_pred):
return AveragePrecisionGateNet(batch_size=batch_size, n_boxes=encoder.n_boxes, grid=output_grids,
norm=img_res, iou_thresh=0.6).compute(y_true, y_pred)
model.compile(optimizer=optimizer,
loss=loss.compute,
metrics=[average_precision06, loss.localization_loss, loss.confidence_loss])
initial_epoch = 0
epochs = 100
log_file_name = model_dir + '/log.csv'
append = Path(log_file_name).is_file() and initial_epoch > 0
callbacks = [
EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3, mode='min',
verbose=1),
ModelCheckpoint(model_dir, monitor='val_loss', verbose=1,
save_best_only=True,
mode='min', save_weights_only=False,
period=1),
TensorBoard(batch_size=batch_size, log_dir=model_dir, write_images=True,
histogram_freq=0),
TerminateOnNaN(),
ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, min_lr=0.00001),
CSVLogger(log_file_name, append=append),
History()
]
if isinstance(preprocessor.augmenter, RandomEnsemble):
augmentation = ''
augmenters = preprocessor.augmenter.augmenters
probs = preprocessor.augmenter.probs
for i in range(len(augmenters)):
augmentation += '\n{0:.2f} -> {1:s}'.format(probs[i], augmenters[i].__class__.__name__)
else:
augmentation = preprocessor.augmenter.__class__.__name__
summary = {'resolution': img_res,
'image_source': train_gen.source_dir,
'color_format': train_gen.color_format,
'batch_size': train_gen.batch_size,
'n_samples': train_gen.n_samples,
'transform': augmentation,
'initial_epoch': initial_epoch,
'epochs': epochs,
'weights': model.count_params(),
'architecture': architecture,
'anchors': anchors,
'img_res': img_res,
'grid': output_grids,
# 'valid_set': validation_set,
'min_obj_size': min_obj_size,
'max_obj_size': max_obj_size,
'max_aspect_ratio': max_aspect_ratio,
'min_aspect_ratio': min_aspect_ratio}
pp.pprint(summary)
save_file(summary, 'summary.txt', model_dir, verbose=False)
save_file(summary, 'summary.pkl', model_dir, verbose=False)
model.summary()
model.fit_generator(
generator=preprocessor.preprocess_train_generator(train_gen.generate()),
steps_per_epoch=(train_gen.n_samples / batch_size),
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
validation_data=preprocessor.preprocess_train_generator(train_gen.generate_valid()),
validation_steps=int(train_gen.n_samples * train_gen.valid_frac) / batch_size,
callbacks=callbacks)
| [
"phild@protonmail.com"
] | phild@protonmail.com |
4a17b85bb8379fc9ce9ca619292237c86f7c8a04 | 75e518cb1c38cbf8bc55b5e5bb186bcf4412f240 | /migrations/versions/852cea8a2a22_initial_migration.py | 181aa2c80e51866462ceae6d38c2dafa9ee2b0a6 | [
"MIT"
] | permissive | adriankiprono/pitches_project | 984e8381b78c711bf20f84380fa83c26a1392f1b | a3102a7b1d618ad35b981414049c3ead0b5ecc3c | refs/heads/master | 2022-10-07T17:11:54.302322 | 2019-12-04T11:12:18 | 2019-12-04T11:12:18 | 223,401,174 | 0 | 0 | MIT | 2022-09-16T18:13:55 | 2019-11-22T12:44:29 | Python | UTF-8 | Python | false | false | 1,141 | py | """Initial Migration
Revision ID: 852cea8a2a22
Revises: a7cab0911f2f
Create Date: 2019-11-28 11:12:51.171433
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '852cea8a2a22'
down_revision = 'a7cab0911f2f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('comment', sa.VARCHAR(length=1000), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('pitch', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['pitch'], ['pitches.id'], name='comments_pitch_fkey'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='comments_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='comments_pkey')
)
# ### end Alembic commands ###
| [
"tuimuradrian6@gmail.com"
] | tuimuradrian6@gmail.com |
6ec735f143588e6adf5da9dd01457a246fb174ae | ea42ec421c74c273ef1e614dff447076ddd4f69a | /Week_04/126.Word-ladder-II.py | 4565ffe65c268efe85726068a328c969c18c9e36 | [] | no_license | youwithouto/algorithm021 | 5278262f0909914c7e6d6eb58d709fc173b29991 | 81e526385eb3464cbe173ea145badd4be20879af | refs/heads/main | 2023-03-01T08:07:07.526371 | 2021-02-06T04:31:30 | 2021-02-06T04:31:30 | 316,673,717 | 0 | 0 | null | 2020-11-28T06:40:55 | 2020-11-28T06:40:54 | null | UTF-8 | Python | false | false | 939 | py | import collections
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
wordSet = set(wordList)
result = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newLayer = collections.defaultdict(list)
for word in layer:
if word == endWord:
result.extend(k for k in layer[word])
else:
for i in range(len(word)):
for c in 'abcdefghijklmnopqrstuvwxyz':
nextWord = word[:i] + c + word[i + 1:]
if nextWord in wordSet:
newLayer[nextWord] += [j + [nextWord]
for j in layer[word]]
wordSet -= set(newLayer.keys())
layer = newLayer
return result
| [
"youwithouto.z@gmail.com"
] | youwithouto.z@gmail.com |
d279b1982813b325d1db6a5a7844f00fbfd6d590 | e5202e0f36c15b8898920a461a866168fa059947 | /lblnew/n2o/conc_3.2e-07/band07_wn_1215_1380/nv_1000/dv_0.001/ng_4/g_ascending_k_descending/refPTs_P_1_T_250__P_500_T_250/ng_refs_2__2/ng_adju_0__0/getabsth_auto__auto/absth_dlogN_uniform__dlogN_uniform/klin_2.22e-20/atmpro_saw/wgt_k_1/wgt_0.6_0.5__0.6_0.9/wgt_flux_1/w_diffuse_1.8_1.66__1.6_1.8/option_compute_ktable_0/option_compute_btable_0/crd_5014a19/param.py | e0566588dd0043ba9876e68e86cdd4000b75a9d9 | [] | no_license | qAp/analysis_-_new_kdist_param | 653c9873751646f6fa9481544e98ed6065a16155 | 272dc3667030cdb18664108d0bd78fee03736144 | refs/heads/master | 2021-06-11T04:21:35.105924 | 2019-08-04T13:13:07 | 2019-08-04T13:13:07 | 136,108,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/n2o/conc_3.2e-07/band07_wn_1215_1380/nv_1000/dv_0.001/ng_4/g_ascending_k_descending/refPTs_P_1_T_250__P_500_T_250/ng_refs_2__2/ng_adju_0__0/getabsth_auto__auto/absth_dlogN_uniform__dlogN_uniform/klin_2.22e-20/atmpro_saw/wgt_k_1/wgt_0.6_0.5__0.6_0.9/wgt_flux_1/w_diffuse_1.8_1.66__1.6_1.8/option_compute_ktable_0/option_compute_btable_0/crd_5014a19'
PARAM = {'atmpro': 'saw', 'band': '7', 'commitnumber': '5014a19', 'conc': 3.2e-07, 'dv': 0.001, 'klin': 2.22e-20, 'molecule': 'n2o', 'ng_adju': [0, 0], 'ng_refs': [2, 2], 'nv': 1000, 'option_compute_btable': 0, 'option_compute_ktable': 0, 'option_wgt_flux': 1, 'option_wgt_k': 1, 'ref_pts': [[1, 250], [500, 250]], 'tsfc': 257, 'vmax': 1380, 'vmin': 1215, 'w_diffuse': [[1.8, 1.66], [1.6, 1.8]], 'wgt': [[0.6, 0.5], [0.6, 0.9]]} | [
"llacque@gmail.com"
] | llacque@gmail.com |
40fc6c47643f9ed3130fe6e82adeaa39f9a4b23e | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v7_2_0/aaa_config/aaa/authentication/__init__.py | d5aa7c18701ef49ed13f411a6214d847e3494fbd | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import login
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-aaa - based on the path /aaa-config/aaa/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__login',)
_yang_name = 'authentication'
_rest_name = 'authentication'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__login = YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'aaa-config', u'aaa', u'authentication']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'aaa', u'authentication']
def _get_login(self):
"""
Getter method for login, mapped from YANG variable /aaa_config/aaa/authentication/login (container)
"""
return self.__login
def _set_login(self, v, load=False):
"""
Setter method for login, mapped from YANG variable /aaa_config/aaa/authentication/login (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_login is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_login() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """login must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__login = t
if hasattr(self, '_set'):
self._set()
def _unset_login(self):
self.__login = YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
login = __builtin__.property(_get_login, _set_login)
_pyangbind_elements = {'login': login, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
857884fcc3acbb7df2a7c6f6a680064ffe58729c | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/tutorial/control/summary.py | 92927577d2393d79f61a34f87903c70e13ad1416 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,249 | py | # 2015.11.10 21:30:56 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/tutorial/control/summary.py
from tutorial.control.functional import FunctionalVarSet
from tutorial.logger import LOG_ERROR, LOG_DEBUG
class _Flag(object):
def __init__(self, name, active, store = True):
super(_Flag, self).__init__()
self.name = name
self.active = active
self.store = store
def __repr__(self):
return '{0:>s}: {1!r:s}'.format(self.name, self.active)
def isActive(self):
return self.active
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class FlagSummary(object):
def __init__(self, flagNames, initial = None):
super(FlagSummary, self).__init__()
if flagNames is None:
flagNames = []
if initial is None:
initial = {}
self.__flags = {}
initialGetter = initial.get
for name in flagNames:
self.__flags[name] = _Flag(name, initialGetter(name, False))
return
def __repr__(self):
return 'FlagSummary({0:s}): {1!r:s}'.format(hex(id(self)), self.__flags.values())
def deactivateFlag(self, flagName):
LOG_DEBUG('Deactivate flag', flagName)
if flagName in self.__flags:
self.__flags[flagName].deactivate()
else:
self.__flags[flagName] = _Flag(flagName, False, store=False)
def activateFlag(self, flagName):
LOG_DEBUG('Activate flag: ', flagName)
if flagName in self.__flags:
self.__flags[flagName].activate()
else:
self.__flags[flagName] = _Flag(flagName, True, store=False)
def isActiveFlag(self, flagName):
activeFlag = False
if flagName in self.__flags:
activeFlag = self.__flags[flagName].isActive()
return activeFlag
def addFlag(self, flagName):
if flagName not in self.__flags:
self.__flags[flagName] = _Flag(flagName, False)
def getDict(self):
filtered = filter(lambda flag: flag.store, self.__flags.itervalues())
return dict(map(lambda flag: (flag.name, flag.active), filtered))
class VarSummary(object):
def __init__(self, varSets, runtime = None):
super(VarSummary, self).__init__()
if varSets:
self.__varSets = dict(map(lambda varSet: (varSet.getID(), FunctionalVarSet(varSet)), varSets))
else:
self.__varSets = {}
self.__runtime = runtime or {}
def get(self, varID, default = None):
if varID in self.__varSets:
result = self.__varSets[varID].getFirstActual()
else:
result = self.__runtime.get(varID, default)
return result
def set(self, varID, value):
if varID in self.__varSets:
LOG_ERROR('Var {0:>s} in not mutable.'.format(varID))
else:
LOG_DEBUG('Set var {0:>s}'.format(varID), value)
self.__runtime[varID] = value
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tutorial\control\summary.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:30:56 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
abec78e2a72aa4027585259773b61dd1fb5a4f12 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_148/ch88_2020_05_18_19_59_52_666753.py | e0172651bedd8c38edc188d258b10ee5ae48067b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | class Retangulo:
def __init__(self, x_coord, y_coord):
self.x = x_coord
self.y = y_coord
def calcula_perimetro(self):
a = self.x
b = self.y
return 2*(a+b)
def calcula_area(self):
c = self.x
d = self.y
return c*d
| [
"you@example.com"
] | you@example.com |
184f4705dd6a5a6cdcc98bd061041cf0dd48e763 | 6390cb7020af3efadfc55bf374316b39164c72e3 | /part2/2_4_7_exptced_conditions.py | 5044b10932ff08bc199440db7ea7177d8000278d | [] | no_license | andrewnnov/stepik_qa | 37fb0808eb30eb807fd951efa0716589ce8a42fa | 689dd383793aeb20e88ce89ff56ff6db263615bd | refs/heads/main | 2023-06-19T00:22:09.292291 | 2021-07-18T19:34:46 | 2021-07-18T19:34:46 | 383,043,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
link = "http://suninjuly.github.io/wait2.html"
browser = webdriver.Chrome("C:\Projects\stepik_qa\driver\chromedriver.exe")
browser.get(link)
# говорим Selenium проверять в течение 5 секунд, пока кнопка не станет кликабельной
button = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.ID, "verify"))
)
button.click()
message = browser.find_element_by_id("verify_message")
assert "successful" in message.text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| [
"andrewnnov@yandex.ru"
] | andrewnnov@yandex.ru |
ad2368024d7faf371220f45fba13ace22d01cb63 | c1847b5eced044ee1c03c9cd32bf336f38d6b17c | /apptools/apptools-ios-tests/apptools/target_platforms.py | beb6bb6c94a3e7beb4a333dfa7443c75af97151d | [
"BSD-3-Clause"
] | permissive | JianfengXu/crosswalk-test-suite | 60810f342adc009bbe249bc38e2153b1f44b5d68 | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | refs/heads/master | 2021-01-17T04:51:47.366368 | 2015-08-31T02:07:47 | 2015-08-31T02:07:47 | 17,897,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import commands
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_list_target_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.PackTools + "crosswalk-app platforms"
status = os.popen(cmd).readlines()
self.assertEquals("ios", status[0].strip(" *\n"))
self.assertEquals("android", status[1].strip(" *\n"))
self.assertEquals("windows", status[2].strip(" *\n"))
if __name__ == '__main__':
unittest.main()
| [
"yunx.liu@intel.com"
] | yunx.liu@intel.com |
8920296ee5cb27a4b0e22e713224727923678238 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/disassociate_subnet_firewall_request_body.py | 06e2bb56a2ca9a526a1f1210ffb3bc9cc8974413 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,355 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DisassociateSubnetFirewallRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'subnets': 'list[FirewallAssociation]'
}
attribute_map = {
'subnets': 'subnets'
}
def __init__(self, subnets=None):
"""DisassociateSubnetFirewallRequestBody
The model defined in huaweicloud sdk
:param subnets: 解绑ACL的子网列表
:type subnets: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
self._subnets = None
self.discriminator = None
self.subnets = subnets
@property
def subnets(self):
"""Gets the subnets of this DisassociateSubnetFirewallRequestBody.
解绑ACL的子网列表
:return: The subnets of this DisassociateSubnetFirewallRequestBody.
:rtype: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
return self._subnets
@subnets.setter
def subnets(self, subnets):
"""Sets the subnets of this DisassociateSubnetFirewallRequestBody.
解绑ACL的子网列表
:param subnets: The subnets of this DisassociateSubnetFirewallRequestBody.
:type subnets: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
self._subnets = subnets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DisassociateSubnetFirewallRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
b6c0a97616dabb5cfc7b0ef8dc9ffc6d4a2d0bf3 | 7b971c4465797ef3561c69d7dd819fcd5d2a3736 | /sbmgraphs.py | bd07584ba679e7c73b667477b4ac368082fdc166 | [] | no_license | rkdarst/pcd | 978d898a3ab1a1fd8fbb48e644234d66fe7ab95f | 54dda49ce8a248446b236a42b9d313ce410cf58b | refs/heads/master | 2021-01-23T12:38:09.042817 | 2016-11-01T18:02:25 | 2016-11-01T18:02:25 | 22,289,583 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 17,660 | py | # Richard Darst, July 2012
import collections
import math
import random
from scipy.stats import binom
import networkx
from pcd import nxutil
from pcd import cmty
product = lambda l: reduce(lambda x,y: x*y, l, 1)
# Detectability-related functions.
from math import sqrt
def k_out_limit(kin, q): return kin - .5*( sqrt((q-1)**2+(4*q*kin)) - (q-1))
def k_in(epsilon, ktot): return ktot/float(epsilon+1)
def k_out(epsilon, ktot): return ktot*epsilon/float(epsilon+1)
def k_in(epsilon, ktot, q=2): return ktot/float((q-1)*epsilon+1)
def k_out(epsilon, ktot, q=2): return ktot*epsilon/float((q-1)*epsilon+1)
from scipy.special import erf
def frac_detectable(pin, pout, n):
if pin-pout > (1/sqrt(n))*sqrt(pin+(2-1)*pout):
x = .5*( 1+erf(sqrt(_alpha2(pin,pout,n) / float(2*(1-_alpha2(pin,pout,n))))) )
return x
return .5
def _alpha2(pin, pout, n):
return ((2*n*pin - 2*n*pout)**2 - 2*(2*n*pin + 2*n*pout)) / float((2*n*pin-2*n*pout)**2)
def frac_detectable_q(pin,pout,q,n):
raise NotImplementedError("does not work for general q")
return .5*( 1+erf(sqrt(_alpha22(pin,pout,q,n) / float(2*(1-_alpha22(pin,pout,q,n))))))
def _alpha22(pin,pout, q, n):
raise NotImplementedError("does not work for general q")
return ( (q*n*pin - q*n*pout)**2 - 2*(q*n*pin + q*n*pout) ) / ( (q*n*pin-q*n*pout)**2 )
#def partition_nodes(U, q, n):
# """Partition a set of nodes
#
# Given a set of nodes, return min(N//Nsets, nodesPerSet) equal
# partitions of the nodes. This function does allow overlaps.
#
# Algorithm: Divide U into U//q (approximately) equal size
# communities first, spanning the system. Then, choose
# """
# U = list(U)
# random.shuffle(U)
# N = len(U)//q
# N = min(N, n)
# sets = [ U[N*i:N*(i+1)] for i in range(q) ]
# print " ", [len(s) for s in sets]
# for s in sets:
# while len(s) < n:
# node = random.choice(U)
# if node in s:
# continue
# s.append(node)
# print " ", [len(s) for s in sets]
# return sets
def makeCommunities(U, q, n):
"""Take a universe of nodes U and partition it into q communities of
n, allowing overlaps."""
U = set(U)
Ulist = list(U)
random.shuffle(Ulist)
Ulist.sort()
communities = [ ]
# Make initial communities about equally sized, picking from all
n_initial = int(math.ceil(len(U)/float(q)))
n_initial = len(U)/float(q)
if isinstance(n, (int, float)):
assert n_initial <= n, "let(U)//q < n."
else:
raise NotImplementedError
assert n_initial <= min(n) # so our initial community assignments work
assert sum(n) > U
for i in range(q):
nodes = Ulist[int(round(n_initial*i)):int(round(n_initial*(i+1)))]
communities.append(set(nodes))
#print "initial:", [ len(nodes) for nodes in communities ]
assert 0 == len(U - set.union(*communities)) # every node is at least once
assert len(U) == sum(len(n) for n in communities) # no nodes are duplicated
assert sum(len(_) for _ in communities) == len(U)
# Now pick random nodes among all other nodes to top-off each
# community t, 'n' nodes.
for nodes in communities:
nodes |= set(random.sample(U-nodes, n-len(nodes)))
#print "final community sizes:", [ len(nodes) for nodes in communities ]
return communities
def random_sample(nodes, n):
"""Return a random sample of size n."""
nodes = list(nodes)
return set(random.sample(nodes, n))
def random_sample_q(nodes, q, n):
"""Return list of q random samples of size n."""
nodes = list(nodes)
return [ set(random.sample(nodes, n)) for _ in range(q) ]
def add_edges(g, nodes, p, weighted=None):
"""Add edges between nodes in the set 'nodes' randomly, iterating
through all pairs, adding with p for each one."""
nodes = list(nodes)
for i, x in enumerate(nodes):
for j, y in enumerate(nodes[i+1:]):
if weighted:
#g.add_edge(x, y, weight=weight)
g.add_edge(a, b, weight=None)
g.edge[a][b].setdefault('weights', []).append(p)
elif random.uniform(0, 1) < p:
g.add_edge(x, y)
def add_edges_exact(g, nodes, p, weighted=None,
links=None):
"""Add edges """
if not links:
links = set(frozenset((a, b))
for a in nodes for b in nodes
if a != b)
assert len(links) == len(nodes) * (len(nodes)-1) / 2
else:
assert not nodes
if weighted == 'exact':
#for x in links:
# print x
# a, b = x
e = 0
for a,b in links:
g.add_edge(a, b, weight=None)
g.edge[a][b].setdefault('weights', []).append(p)
assert g.edge[b][a]['weights'] == g.edge[a][b]['weights']
#print g.edge[a][b]['weight']
e += 1
#print p, len(nodes), len(g.edges()), e
#raw_input()
return
links = list(links)
#nNodes = len(nodes)
#nEdges = int(round(len(links) * p))
nEdges = binom(len(links), p).rvs()
edges = random.sample(links, nEdges)
e = 0
#for a,b in edges:
# g.add_edge(a, b)
# e += 1
g.add_edges_from(edges)
e += len(edges)
#print p, len(nodes), len(g.edges()), e
def add_edges_exact_sparse(g, nodes, p, weighted=None, links=None):
if weighted: raise
if links: raise
assert p <= 1
added_edges = set()
nEdges = binom(len(nodes)*(len(nodes)-1)/2, p).rvs()
nodes = list(nodes)
while len(added_edges) < nEdges:
#print nEdges, len(added_edges)
n1 = random.choice(nodes)
n2 = random.choice(nodes)
if n1 == n2: continue
this = frozenset((n1, n2))
if this in added_edges:
continue
g.add_edge(n1, n2)
added_edges.add(this)
def add_edges_cm(g, nodes, p, weighted=None):
"""Add edges according to configuration model, where every node
has a controlled degree.
There is logic (possibly hackish) to avoid self-loops or multiple
edges."""
if weighted: raise NotImplementedError("add_edges_cm + weighted")
nodes = list(nodes)
nNodes = len(nodes)
#nEdges = int(round(.5 * len(nodes) * (len(nodes)-1) * p))
nEdges = binom(.5*len(nodes)*(len(nodes)-1), p).rvs()
vertices = [ ]
for n in nodes:
vertices.extend([n] * int(round(2*nEdges/float(nNodes))))
#if len(vertices)
random.shuffle(vertices)
#for a, b in zip(vertices[0::2], vertices[1::2]):
# if g.has_edge(a, b):
# ...
# g.add_edge(a, b)
tries = 0
while True:
if len(vertices) < 2:
break
if len(vertices) == 0:
break
a = vertices.pop(0)
b = vertices.pop(0)
if g.has_edge(a, b) or a == b:
if tries > 10:
break
vertices.insert(random.randint(0, len(vertices)), a)
vertices.insert(random.randint(0, len(vertices)), b)
#if tries > 0: print vertices
tries += 1
continue
g.add_edge(a, b)
tries = 0
#print p, len(nodes), len(g.edges())
#raw_input()
def add_edges_fixed(g, nodes, p, weighted=None, links=None):
"""Add exactly nLinks*p edges, not a binom distribution around
this. Don't use configuration model, so degrees are not controlled."""
if not links:
links = set(frozenset((a, b))
for a in nodes for b in nodes
if a != b)
assert len(links) == len(nodes) * (len(nodes)-1) / 2
else:
assert not nodes
assert not weighted
links = list(links)
#nNodes = len(nodes)
#nEdges = int(round(len(links) * p))
nEdges = int(round(len(links) * p))
edges = random.sample(links, nEdges)
e = 0
for a,b in edges:
g.add_edge(a, b)
e += 1
#print p, len(nodes), len(g.edges()), e
def add_edges_out(g, p, g_layers, weighted=False,
edges_constructor=add_edges_exact,
non_overlapping=False):
"""Adds edges between any links _not_ in the same community. Uses exhaustive """
nodes = set(g.nodes())
if weighted: raise NotImplementedError("weighted+add_pout")
links = set()
_iterCmtys = nxutil._iterCmtys
if non_overlapping:
raise
else:
# This branch is for non-overlapping things, however, it should
for n1, n1data in g.nodes_iter(data=True):
for n2, n2data in g.nodes_iter(data=True):
if n2 <= n1: continue
# If there is no overlap
if [1 for g_ in g_layers if
#set(_iterCmtys(g_.node[n1]))&set(_iterCmtys(g_.node[n2]))
g_.node[n1]['cmtys']&g_.node[n2]['cmtys']
]:
continue
links.add((n1, n2))
edges_constructor(g, nodes=None, p=p, weighted=weighted,
links=links)
def add_edges_out_sparse(g, p, g_layers, weighted=False,
edges_constructor=add_edges_exact,
non_overlapping=False):
#for g in g_layers:
if len(g_layers) != 1:
raise NotImplementedError("len(g_layers) > 1 in this function")
cmtys = cmty.Communities.from_networkx(g_layers[0])
if not cmtys.is_non_overlapping():
raise NotImplementedError("overlapping in this function")
# Total number of pairs of nodes
nodes = g.nodes()
n_links = len(g)*(len(g)-1) / 2
# subtract total number of links in communites.
n_links -= sum(s*(s-1)/2 for c, s in cmtys.cmtysizes().iteritems())
n_links_wanted = binom(n_links, p).rvs()
n_links_present = 0
node_dict = g_layers[0].node
#from fitz import interact ; interact.interact()
while n_links_present < n_links_wanted:
a = random.choice(nodes)
b = random.choice(nodes)
#if any(g_.node[n1]['cmtys']&g_.node[n2]['cmtys'] for g_ in g_layers):
if node_dict[a]['cmtys']&node_dict[b]['cmtys']:
continue
if g.has_edge(a, b):
continue
g.add_edge(a, b)
n_links_present += 1
#from fitz import interact ; interact.interact()
def sbm_incomplete(U, q, n, p, weighted=None,
edges_constructor=None):
"""Create a SBM graph from a universe of nodes, possibly
excluding some of the nodes."""
U = set(U)
g = networkx.Graph()
# Pick our communities from the universe. Note that the size of
# the universe decreases.
# is n constant, or does it change for all communities?
if isinstance(n, (int, float)):
communities = [ random.sample(U, n) for _ in range(q) ]
else:
communities = [ random.sample(U, int(round(_))) for _ in n ]
U = set().union(*communities)
#print sum(len(x) for x in communities), [ len(x) for x in communities ]
#print len(U)
# Add all nodes from the (new) universe, then add all of their edges.
g.add_nodes_from(U)
# is p constant for all communities, or per-community?
if isinstance(p, (int, float)):
for nodes in communities:
edges_constructor(g, nodes, p, weighted=weighted)
else:
for nodes, p in zip(communities, p):
edges_constructor(g, nodes, p, weighted=weighted)
if weighted:
for a, b in g.edges_iter():
newweight = 1. - product(1.-x for x in g.edge[a][b]['weights'])
g.edge[a][b]['weight'] = newweight
# Set the planted communities:
nxutil.cmtyInit(g)
for c, nodes in enumerate(communities):
nxutil.cmtyAddFromList(g, c, nodes)
return g
def sbm(U, q, n, p, weighted=None, edges_constructor=add_edges_exact_sparse, pOut=None):
U = set(U)
g = networkx.Graph()
# Pick our communities from the universe.
communities = makeCommunities(U, q, n)
# Add all nodes from the (new) universe, then add all of their edges.
g.add_nodes_from(U)
for nodes in communities:
#add_edges(g, nodes, p, weighted=weighted)
#add_edges_cm(g, nodes, p, weighted=weighted)
#add_edges_exact(g, nodes, p, weighted=weighted)
edges_constructor(g, nodes, p, weighted=weighted)
if weighted:
for a, b in g.edges_iter():
newweight = 1. - product(1.-x for x in g.edge[a][b]['weights'])
g.edge[a][b]['weight'] = newweight
# Set the planted communities:
nxutil.cmtyInit(g)
for c, nodes in enumerate(communities):
nxutil.cmtyAddFromList(g, c, nodes)
# external edges?
if pOut:
#add_edges_out(g=g, p=pOut, g_layers=(g,), weighted=weighted,
# edges_constructor=edges_constructor)
add_edges_out_sparse(g=g, p=pOut, g_layers=(g,), weighted=weighted,
edges_constructor=edges_constructor)
return g
def sbm2(pin, pout, n, q):
"""Better interface to sbm"""
N = n * q
g = sbm(range(N), q, n, pin, pOut=pout)
import pcd.cmty
cmtys = pcd.cmty.Communities.from_networkx(g)
return g, cmtys
def compose_graphs(gs, weighted=None):
g = networkx.Graph()
for g_ in gs: g.add_nodes_from(g_.nodes_iter())
for g_ in gs: g.add_edges_from(g_.edges_iter())
if weighted:
for a, b in g.edges_iter():
#weights = [ _g.edge[a][b]['weight']
# for _g in gs if _g.has_edge(a, b) ]
weights = [ ]
[ weights.extend(_g.edge[a][b]['weights'])
for _g in gs if _g.has_edge(a, b) ]
newweight = 1. - product(1.-x for x in weights)
#if len(weights) > 1:
# print weights, newweight
g[a][b]['weight'] = newweight
#sys.exit()
#print gs
#for _g in gs:
# nxutil.graphproperties(_g )
#raw_input('>')
return g
def multiLayerSBM(N_U, config, weighted=None, incomplete0=False,
edges_constructor=add_edges_exact,
pOut=None):
"""Config is a list of (q, n, p) tuples."""
U = range(N_U)
subgraphs = [ ]
for level, (q, n, p) in enumerate(config):
if level==0 and incomplete0:
_g = sbm_incomplete(U, q=q, n=n, p=p, weighted=weighted,
edges_constructor=edges_constructor)
U = tuple(_g.edges())
else:
_g = sbm(U, q=q, n=n, p=p, weighted=weighted,
edges_constructor=edges_constructor)
subgraphs.append(_g)
g = compose_graphs(subgraphs, weighted=weighted)
if pOut:
#add_edges_out(g, p=pOut, g_layers=subgraphs, weighted=weighted,
# edges_constructor=edges_constructor)
add_edges_out_sparse(g, p=pOut, g_layers=subgraphs, weighted=weighted,
edges_constructor=edges_constructor)
#for a,b in g.edges_iter(): assert _g.has_edge(a, b)
#for a,b in _g.edges_iter(): assert g.has_edge(a, b)
#for a,b in _g.edges_iter():
# assert g.edge[a][b]['weight'] == _g.edge[a][b]['weight']
return g, subgraphs
def overlap_communities(g, n_min=1):
"""Return graph with communities set to all pairwise overlap of
given graph communities.
n_min = minimum size of overlap to be returned."""
g_new = g.copy()
nxutil.cmtyInit(g_new)
cmtys = nxutil.communities(g)
cmtys_list = list(cmtys)
overlap_cmtys = { }
overlap_cmty_i = 0
print cmtys_list
for i, c1 in enumerate(cmtys_list):
c1nodes = cmtys[c1]
for j, c2 in enumerate(cmtys_list[i+1:]):
c2nodes = cmtys[c2]
newCmtyNodes = c1nodes & c2nodes
print c1, c2, len(c1nodes), len(c2nodes), len(newCmtyNodes)
if len(newCmtyNodes) > n_min:
overlap_cmtys[overlap_cmty_i] = newCmtyNodes
overlap_cmty_i += 1
nxutil.cmtyAddFromList(g_new, overlap_cmty_i, newCmtyNodes)
return g_new
def make_overlap_test(N_U, q1, n1, p1, q2, n2, p2, pU):
"""
N_U: number of nodes in the universe.
q1, q2: sizes of level1 and level2 communities.
n1, n2: number of nodes in level1 and level2 communities.
p1, p2: edge densities
pU: background connection probability.
"""
U = set(range(N_U))
g = networkx.Graph()
#n1 = 25
#q1 = 40
#p1 = .95
level1 = [ random.sample(U, n1) for _ in range(q1) ]
U = set().union(*level1)
print sum(len(x) for x in level1), [ len(x) for x in level1 ]
print len(U)
g.add_nodes_from(U)
for points in level1:
add_edges(g, points, p1)
#level2 = None
#n2 = 100
#q2 = 10
#p2 = .65
#level2 = [ random.sample(U, size2) for _ in range(q2) ]
level2 = partition_nodes(U, q2, n2)
for points in level2:
add_edges(g, points, p2)
add_edges(g, list(g.nodes()), pU)
g1 = g.copy()
nxutil.cmtyInit(g1)
for c, nodes in enumerate(level1):
nxutil.cmtyAddFromList(g1, c, nodes)
g2 = g.copy()
nxutil.cmtyInit(g2)
for c, nodes in enumerate(level2):
nxutil.cmtyAddFromList(g2, c, nodes)
return g, g1, g2
if __name__ == "__main__":
import sys
q = int(sys.argv[1])
n = int(sys.argv[2])
#g = sbm(U=range(n*q), q=q, n=n, p=.00001, pOut=.00001, edges_constructor=add_edges_exact_sparse)
avg = [ ]
#for i in range(100):
g = sbm(U=range(n*q), q=q, n=n, p=10./n, pOut=10./n, edges_constructor=add_edges_exact_sparse)
print len(g)
print g.number_of_edges()
avg.append(g.number_of_edges())
import numpy
print numpy.mean(avg)
| [
"rkd@zgib.net"
] | rkd@zgib.net |
ce287987914d60a0d28cb2e7d614601a01354d20 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/infra/clusterstats.py | 01d0a31abeaa6466ae3fe00309e81bc56ba09230 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,692 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ClusterStats(Mo):
meta = StatsClassMeta("cobra.model.infra.ClusterStats", "cluster")
counter = CounterMeta("uTime", CounterCategory.GAUGE, "milliseconds", "time in unavailable state")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "uTimeLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "uTimeMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "uTimeMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "uTimeAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "uTimeSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "uTimeTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "uTimeThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "uTimeTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "uTimeTr"
meta._counters.append(counter)
counter = CounterMeta("a2uCount", CounterCategory.COUNTER, "count", "active to unavailable transitions")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "a2uCountLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "a2uCountCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "a2uCountPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "a2uCountMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "a2uCountMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "a2uCountAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "a2uCountSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "a2uCountBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "a2uCountThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "a2uCountTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "a2uCountTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "a2uCountRate"
meta._counters.append(counter)
meta.isAbstract = True
meta.moClassName = "infraClusterStats"
meta.moClassName = "infraClusterStats"
meta.rnFormat = ""
meta.category = MoCategory.STATS_CURRENT
meta.label = "current cluster stats"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1w")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats15min")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats5min")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1d")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1qtr")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1year")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1h")
meta.concreteSubClasses.add("cobra.model.infra.ClusterStats1mo")
meta.rnPrefixes = [
]
prop = PropMeta("str", "a2uCountAvg", "a2uCountAvg", 9320, PropCategory.IMPLICIT_AVG)
prop.label = "active to unavailable transitions average value"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountAvg", prop)
prop = PropMeta("str", "a2uCountBase", "a2uCountBase", 9315, PropCategory.IMPLICIT_BASELINE)
prop.label = "active to unavailable transitions baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountBase", prop)
prop = PropMeta("str", "a2uCountCum", "a2uCountCum", 9316, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "active to unavailable transitions cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountCum", prop)
prop = PropMeta("str", "a2uCountLast", "a2uCountLast", 9314, PropCategory.IMPLICIT_LASTREADING)
prop.label = "active to unavailable transitions current value"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountLast", prop)
prop = PropMeta("str", "a2uCountMax", "a2uCountMax", 9319, PropCategory.IMPLICIT_MAX)
prop.label = "active to unavailable transitions maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountMax", prop)
prop = PropMeta("str", "a2uCountMin", "a2uCountMin", 9318, PropCategory.IMPLICIT_MIN)
prop.label = "active to unavailable transitions minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountMin", prop)
prop = PropMeta("str", "a2uCountPer", "a2uCountPer", 9317, PropCategory.IMPLICIT_PERIODIC)
prop.label = "active to unavailable transitions periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountPer", prop)
prop = PropMeta("str", "a2uCountRate", "a2uCountRate", 9325, PropCategory.IMPLICIT_RATE)
prop.label = "active to unavailable transitions rate"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountRate", prop)
prop = PropMeta("str", "a2uCountSpct", "a2uCountSpct", 9321, PropCategory.IMPLICIT_SUSPECT)
prop.label = "active to unavailable transitions suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountSpct", prop)
prop = PropMeta("str", "a2uCountThr", "a2uCountThr", 9322, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "active to unavailable transitions thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("a2uCountThr", prop)
prop = PropMeta("str", "a2uCountTr", "a2uCountTr", 9324, PropCategory.IMPLICIT_TREND)
prop.label = "active to unavailable transitions trend"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountTr", prop)
prop = PropMeta("str", "a2uCountTrBase", "a2uCountTrBase", 9323, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "active to unavailable transitions trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("a2uCountTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uTimeAvg", "uTimeAvg", 9347, PropCategory.IMPLICIT_AVG)
prop.label = "time in unavailable state average value"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeAvg", prop)
prop = PropMeta("str", "uTimeLast", "uTimeLast", 9341, PropCategory.IMPLICIT_LASTREADING)
prop.label = "time in unavailable state current value"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeLast", prop)
prop = PropMeta("str", "uTimeMax", "uTimeMax", 9346, PropCategory.IMPLICIT_MAX)
prop.label = "time in unavailable state maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeMax", prop)
prop = PropMeta("str", "uTimeMin", "uTimeMin", 9345, PropCategory.IMPLICIT_MIN)
prop.label = "time in unavailable state minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeMin", prop)
prop = PropMeta("str", "uTimeSpct", "uTimeSpct", 9348, PropCategory.IMPLICIT_SUSPECT)
prop.label = "time in unavailable state suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeSpct", prop)
prop = PropMeta("str", "uTimeThr", "uTimeThr", 9349, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "time in unavailable state thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("uTimeThr", prop)
prop = PropMeta("str", "uTimeTr", "uTimeTr", 9351, PropCategory.IMPLICIT_TREND)
prop.label = "time in unavailable state trend"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeTr", prop)
prop = PropMeta("str", "uTimeTrBase", "uTimeTrBase", 9350, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "time in unavailable state trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeTrBase", prop)
prop = PropMeta("str", "uTimeTtl", "uTimeTtl", 15705, PropCategory.IMPLICIT_TOTAL)
prop.label = "time in unavailable state total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("uTimeTtl", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
84e79321c4786a570167323987d2d13e7654deb4 | 2e6f4690a2a9448a1eb027c14a637ab449b94c4f | /qa/rpc-tests/bipdersig.py | 1bb4ac97183bc298a20535bae3f3bb1c86222804 | [
"MIT"
] | permissive | mirzaei-ce/core-mashhadbit | 11d60f09f80c8056f5e063eb65783f8699f5ede8 | 1d9d45336cbbda7ffd700d3f1c3dd9e8b4ce2745 | refs/heads/master | 2021-07-18T11:43:26.440889 | 2017-10-26T14:31:07 | 2017-10-26T14:31:07 | 108,422,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the BIP66 changeover logic
#
from test_framework.test_framework import MashhadbitTestFramework
from test_framework.util import *
class BIP66Test(MashhadbitTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
4c367a95ab9afb62865daaf75a0e5314c4705ec7 | 374dea7d7d1a424d91f369cc75b11b16e1a489cd | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/bPN_2o1RXRZaK7Vxgp3oTysbcxQmJr9XStOWBh0VWNo=/binascii.cpython-37m-x86_64-linux-gnu.pyi | 352cce23207d526d4b293041c52677dec98d99b8 | [] | no_license | tkoon107/text-generation-LSTM-neural-net | ed0e6a0fb906f4b4fd649eadfe36c254144be016 | 6b98ee355a30da128462bfac531509539d6533ae | refs/heads/master | 2020-05-27T16:46:44.128875 | 2019-06-10T18:26:54 | 2019-06-10T18:26:54 | 188,708,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | pyi | import builtins as _mod_builtins
class Error(_mod_builtins.ValueError):
__class__ = Error
__dict__ = {}
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__module__ = 'binascii'
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __weakref__(self):
'list of weak references to the object (if defined)'
pass
class Incomplete(_mod_builtins.Exception):
__class__ = Incomplete
__dict__ = {}
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__module__ = 'binascii'
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __weakref__(self):
'list of weak references to the object (if defined)'
pass
__doc__ = 'Conversion between binary data and ASCII'
__file__ = '/home/trevor/anaconda3/lib/python3.7/lib-dynload/binascii.cpython-37m-x86_64-linux-gnu.so'
__name__ = 'binascii'
__package__ = ''
def a2b_base64(data):
'Decode a line of base64 data.'
pass
def a2b_hex(hexstr):
'Binary data of hexadecimal representation.\n\nhexstr must contain an even number of hex digits (upper or lower case).\nThis function is also available as "unhexlify()".'
pass
def a2b_hqx(data):
'Decode .hqx coding.'
pass
def a2b_qp(data, header):
'Decode a string of qp-encoded data.'
pass
def a2b_uu(data):
'Decode a line of uuencoded data.'
pass
def b2a_base64(data):
'Base64-code line of data.'
pass
def b2a_hex(data):
'Hexadecimal representation of binary data.\n\nThe return value is a bytes object. This function is also\navailable as "hexlify()".'
pass
def b2a_hqx(data):
'Encode .hqx data.'
pass
def b2a_qp(data, quotetabs, istext, header):
'Encode a string using quoted-printable encoding.\n\nOn encoding, when istext is set, newlines are not encoded, and white\nspace at end of lines is. When istext is not set, \\r and \\n (CR/LF)\nare both encoded. When quotetabs is set, space and tabs are encoded.'
pass
def b2a_uu(data):
'Uuencode line of data.'
pass
def crc32(data, crc):
'Compute CRC-32 incrementally.'
pass
def crc_hqx(data, crc):
'Compute CRC-CCITT incrementally.'
pass
def hexlify(data):
'Hexadecimal representation of binary data.\n\nThe return value is a bytes object.'
pass
def rlecode_hqx(data):
'Binhex RLE-code binary data.'
pass
def rledecode_hqx(data):
'Decode hexbin RLE-coded string.'
pass
def unhexlify(hexstr):
'Binary data of hexadecimal representation.\n\nhexstr must contain an even number of hex digits (upper or lower case).'
pass
| [
"trevorlang@langdatascience.org"
] | trevorlang@langdatascience.org |
23f64bddd7650d60c54a5d74d312571372533641 | d7620b35a248cf1cabc98f721026a781164c89f5 | /OpenCV-basic/ch03/arithmetic.py | 2f220e3aab44a851cb6efc0940d6ac10c11964da | [] | no_license | Seonghyeony/Project-OpenCV | e53ed1e72b113a29cc5890a89a4e7f4078dfa198 | a07fdc4bfa4cbf5b851a6ec20f0873d09cecbd54 | refs/heads/main | 2023-02-17T22:38:35.537226 | 2021-01-19T13:09:29 | 2021-01-19T13:09:29 | 326,711,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | import sys
import numpy as np
import cv2
from matplotlib import pyplot as plt
src1 = cv2.imread('lenna256.bmp', cv2.IMREAD_GRAYSCALE)
src2 = cv2.imread('square.bmp', cv2.IMREAD_GRAYSCALE)
if src1 is None or src2 is None:
print('Image load failed!')
sys.exit()
dst1 = cv2.add(src1, src2, dtype=cv2.CV_8U) # 덧셈 연산
dst2 = cv2.addWeighted(src1, 0.5, src2, 0.5, 0.0) # 가중치 연산
dst3 = cv2.subtract(src1, src2) # 뺄셈 연산
dst4 = cv2.absdiff(src1, src2) # 차 연산
# 2행 3열 1번 째, 2행 3열 2번 째....
plt.subplot(231), plt.axis('off'), plt.imshow(src1, 'gray'), plt.title('src1')
plt.subplot(232), plt.axis('off'), plt.imshow(src2, 'gray'), plt.title('src2')
plt.subplot(233), plt.axis('off'), plt.imshow(dst1, 'gray'), plt.title('add')
plt.subplot(234), plt.axis('off'), plt.imshow(dst2, 'gray'), plt.title('addWeighted')
plt.subplot(235), plt.axis('off'), plt.imshow(dst3, 'gray'), plt.title('subtract')
plt.subplot(236), plt.axis('off'), plt.imshow(dst4, 'gray'), plt.title('absdiff')
plt.show()
| [
"sunghyun7949@naver.com"
] | sunghyun7949@naver.com |
6d9d0475e47cb4ffbe3bffda932aeaad4e9568e9 | 4419d7b3479ea5ae7a76d6153fd21f8cec78a406 | /virtual/lib/python3.6/site-packages/werkzeug/local.py | 6ef633222fb20be7463b4808df68d34256b1adfa | [
"MIT"
] | permissive | monicaoyugi/News-Updates | 276f2e5b2e7ec1d1bc21786160f6df32d5f0d8c2 | 2652b28153f36284447952fdef4c63496af90418 | refs/heads/master | 2020-12-26T08:36:37.365993 | 2020-02-07T13:03:51 | 2020-02-07T13:03:51 | 237,449,423 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,458 | py | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: 2007 Pallets
:README.md: BSD-3-Clause
"""
import copy
from functools import update_wrapper
from ._compat import implements_bool
from ._compat import PY2
from .wsgi import ClosingIterator
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ("__storage__", "__ident_func__")
def __init__(self):
object.__setattr__(self, "__storage__", {})
object.__setattr__(self, "__ident_func__", get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, "__ident_func__", value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError("object unbound")
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, "stack", None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, "stack", None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Every time the manager cleans up,
it will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, "__ident_func__", ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals))
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instantiated with a callable as well now.
"""
__slots__ = ("__local", "__dict__", "__name__", "__wrapped__")
def __init__(self, local, name=None):
object.__setattr__(self, "_LocalProxy__local", local)
object.__setattr__(self, "__name__", name)
if callable(local) and not hasattr(local, "__release_local__"):
# "local" is a callable that is not an instance of Local or
# LocalManager: mark it as a wrapped function.
object.__setattr__(self, "__wrapped__", local)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, "__release_local__"):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError("no object bound to %s" % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError("__dict__")
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return "<%s unbound>" % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == "__members__":
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
| [
"monicaoyugi@gmail.com"
] | monicaoyugi@gmail.com |
dadd0fa88fd200adad18bac6e84b65a9d05615d6 | 802105debf55010216717f1a2536057cec2f147c | /convo/migrations/0005_convo_user.py | b262f16574328cd460c80a81fdf8dda56e3acfc7 | [] | no_license | Ibrokola/speakout | ba4bbff627fcd0a82cf206b45d4a2938931ede0e | a800e17edbd69be2c45fec7fe75a83ed1b92d8c5 | refs/heads/master | 2021-01-19T16:59:42.743235 | 2017-05-06T04:37:26 | 2017-05-06T04:37:26 | 88,296,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 10:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('convo', '0004_auto_20170415_1044'),
]
operations = [
migrations.AddField(
model_name='convo',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"babskolawole@gmail.com"
] | babskolawole@gmail.com |
5607094dc8545aa042719562e0b5822af083ab9f | ac33111a551d13c13f96abd64898ce216959ada0 | /study/sorting/6-02 위에서아래로.py | fa7f9e4debd4637dbca63642ba35a3245ee2076f | [] | no_license | apple2062/algorithm | b48833e2ebcfe08623f328309780ab5e59749c79 | 9bdd08f513bc2f7600b7b263738e3eb09e86f77c | refs/heads/master | 2023-04-15T05:09:06.401237 | 2021-04-29T03:30:55 | 2021-04-29T03:30:55 | 289,252,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # 6-2 위에서 아래로
# 크기에 상관없이 나열되어 있다.
# 큰 숫 부터 작은 수 순서로 정렬 . 내림차순 정렬 프로그램 만들기
n = int(input())
matrix = []
for i in range(n):
matrix.append(int(input()))
matrix.sort(reverse=True)
for i in matrix:
print(i,end = ' ')
| [
"apple2062@naver.com"
] | apple2062@naver.com |
cec893095fc572f735a747b84cbe0d4f91bce9c7 | 9275454ce938751179ef08ecc21b5dd22a1a0ef0 | /src/brasil/gov/barra/tests/test_helper.py | 1dcf8e1fb446a3a1350cc5cde01b87c10d6d0263 | [] | no_license | Uelson/brasil.gov.barra | a949e49f3c7fd6e52dd657946946ef5d574bb849 | 649fbb6a36a541fb129bb234a307bff6a7e9c0f0 | refs/heads/master | 2020-05-20T19:31:16.899824 | 2015-02-26T21:44:03 | 2015-02-26T21:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | # -*- coding: utf-8 -*-
from brasil.gov.barra.interfaces import IBarraInstalada
from brasil.gov.barra.testing import INTEGRATION_TESTING
from plone import api
from zope.interface import alsoProvides
import unittest2 as unittest
class HelperViewTest(unittest.TestCase):
""" Caso de teste da Browser View BarraHelper"""
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
pp = api.portal.get_tool('portal_properties')
self.sheet = getattr(pp, 'brasil_gov', None)
# Como nao eh um teste funcional, este objeto
# REQUEST precisa ser anotado com o browser layer
alsoProvides(self.portal.REQUEST, IBarraInstalada)
def test_helper_view_registration(self):
""" Validamos se BarraHelper esta registrada"""
view = api.content.get_view(
name='barra_helper',
context=self.portal,
request=self.portal.REQUEST,
)
view = view.__of__(self.portal)
self.failUnless(view)
def test_helper_view_local(self):
"""Uso do metodo local"""
# Obtemos a Browser view
view = api.content.get_view(
name='barra_helper',
context=self.portal,
request=self.portal.REQUEST,
)
# Validamos que ela retorne o valor padrao para
# o metodo remoto(configurado em profiles/default/propertiestool.xml)
self.assertFalse(view.local())
# Alteramos o valor para hospedagem para local
self.sheet.local = True
# O resultado da consulta a Browser View deve se adequar
self.assertTrue(view.local())
| [
"erico@simplesconsultoria.com.br"
] | erico@simplesconsultoria.com.br |
e0491c010038ee0b638174e74c8ad306a26cd3a4 | 3f84ff1f506287bf0bb3b0840947e3ef23f22c87 | /10day/8-工厂模式5.py | dbbcbe331d30c8a777a92ad609bc957cecc5b3e6 | [] | no_license | 2099454967/wbx | 34b61c0fc98a227562ea7822f2fa56c5d01d3654 | 316e7ac7351b532cb134aec0740e045261015920 | refs/heads/master | 2020-03-18T06:09:58.544919 | 2018-05-28T13:01:19 | 2018-05-28T13:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | #商店的基类(抽象类) 规定好商店要干的事情
#预订
#创建
class Store(object):
def createFactory(self,type):
pass
def order(self,type):
return self.createFactory(type)
class BmwStore(Store):
def createFactory(self,type):
return BmwFactory().selectCar(type)
class BCStore(Store):
def createFactory(self,type):
return BCFactory().selectCar(type)
class Factory(object):
def __init__(self,name):
self.name = name
def selectCar(self,type):
pass
class BCFactory(Factory):
def selectCar(self,type):
if type == 0:
return Bmw730()
elif type == 1:
return Bmwx5()
class BmwFactory(Factory):
def selectCar(self,type):
if type == 0:
return DaG()
elif type == 1:
return XiaoG()
class Car(object):
def move(self):
print("在移动")
def music(self):
print("播放音乐")
class Bmw730(Car):
pass
class Bmwx5(Car):
pass
class DaG(Car):
pass
class XiaoG(Car):
pass
if __name__ == '__main__':
store = BmwStore()
bmwx5 = store.order(1)
bmwx5.move()
bmwx5.music()
| [
"2099454967@qq.com"
] | 2099454967@qq.com |
dfebf83a65eca890a8e83df26a06b48a5168ba05 | 45a148dc8e4f2c469d7cca660fb2aca179473f2e | /package_control/clients/bitbucket_client.py | 40965da955056d79ec27e171e623ab2c85cd055e | [
"MIT"
] | permissive | tools-alexuser01/package_control | cbaa8d9c1e9d83f82f4195bcf7fd9c79f71e7de7 | 338ed4077b720c3904a8823984fa694097d8c8a8 | refs/heads/master | 2021-05-01T13:42:29.002863 | 2015-06-08T17:21:39 | 2015-06-08T17:21:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,069 | py | import re
from ..versions import version_sort, version_process
from .json_api_client import JSONApiClient
try:
from urllib import quote
except (ImportError):
from urllib.parse import quote
# A predefined list of readme filenames to look for
_readme_filenames = [
'readme',
'readme.txt',
'readme.md',
'readme.mkd',
'readme.mdown',
'readme.markdown',
'readme.textile',
'readme.creole',
'readme.rst'
]
class BitBucketClient(JSONApiClient):
def make_tags_url(self, repo):
"""
Generate the tags URL for a BitBucket repo if the value passed is a BitBucket
repository URL
:param repo:
The repository URL
:return:
The tags URL if repo was a BitBucket repo, otherwise False
"""
match = re.match('https?://bitbucket.org/([^/]+/[^/]+)/?$', repo)
if not match:
return False
return 'https://bitbucket.org/%s#tags' % match.group(1)
def make_branch_url(self, repo, branch):
"""
Generate the branch URL for a BitBucket repo if the value passed is a BitBucket
repository URL
:param repo:
The repository URL
:param branch:
The branch name
:return:
The branch URL if repo was a BitBucket repo, otherwise False
"""
match = re.match('https?://bitbucket.org/([^/]+/[^/]+)/?$', repo)
if not match:
return False
return 'https://bitbucket.org/%s/src/%s' % (match.group(1), quote(branch))
def download_info(self, url, tag_prefix=None):
"""
Retrieve information about downloading a package
:param url:
The URL of the repository, in one of the forms:
https://bitbucket.org/{user}/{repo}
https://bitbucket.org/{user}/{repo}/src/{branch}
https://bitbucket.org/{user}/{repo}/#tags
If the last option, grabs the info from the newest
tag that is a valid semver version.
:param tag_prefix:
If the URL is a tags URL, only match tags that have this prefix
:raises:
DownloaderException: when there is an error downloading
ClientException: when there is an error parsing the response
:return:
None if no match, False if no commit, or a list of dicts with the
following keys:
`version` - the version number of the download
`url` - the download URL of a zip file of the package
`date` - the ISO-8601 timestamp string when the version was published
"""
tags_match = re.match('https?://bitbucket.org/([^/]+/[^#/]+)/?#tags$', url)
version = None
url_pattern = 'https://bitbucket.org/%s/get/%s.zip'
output = []
if tags_match:
user_repo = tags_match.group(1)
tags_url = self._make_api_url(user_repo, '/tags')
tags_list = self.fetch_json(tags_url)
tag_info = version_process(tags_list.keys(), tag_prefix)
tag_info = version_sort(tag_info, reverse=True)
if not tag_info:
return False
used_versions = {}
for info in tag_info:
version = info['version']
if version in used_versions:
continue
tag = info['prefix'] + version
output.append({
'url': url_pattern % (user_repo, tag),
'commit': tag,
'version': version
})
used_versions[version] = True
else:
user_repo, commit = self._user_repo_branch(url)
if not user_repo:
return user_repo
output.append({
'url': url_pattern % (user_repo, commit),
'commit': commit
})
for release in output:
changeset_url = self._make_api_url(user_repo, '/changesets/%s' % release['commit'])
commit_info = self.fetch_json(changeset_url)
timestamp = commit_info['utctimestamp'][0:19]
if 'version' not in release:
release['version'] = re.sub('[\-: ]', '.', timestamp)
release['date'] = timestamp
del release['commit']
return output
def repo_info(self, url):
"""
Retrieve general information about a repository
:param url:
The URL to the repository, in one of the forms:
https://bitbucket.org/{user}/{repo}
https://bitbucket.org/{user}/{repo}/src/{branch}
:raises:
DownloaderException: when there is an error downloading
ClientException: when there is an error parsing the response
:return:
None if no match, or a dict with the following keys:
`name`
`description`
`homepage` - URL of the homepage
`author`
`readme` - URL of the readme
`issues` - URL of bug tracker
`donate` - URL of a donate page
"""
user_repo, branch = self._user_repo_branch(url)
if not user_repo:
return user_repo
api_url = self._make_api_url(user_repo)
info = self.fetch_json(api_url)
issues_url = u'https://bitbucket.org/%s/issues' % user_repo
return {
'name': info['name'],
'description': info['description'] or 'No description provided',
'homepage': info['website'] or url,
'author': info['owner'],
'donate': u'https://gratipay.com/on/bitbucket/%s/' % info['owner'],
'readme': self._readme_url(user_repo, branch),
'issues': issues_url if info['has_issues'] else None
}
def _main_branch_name(self, user_repo):
"""
Fetch the name of the default branch
:param user_repo:
The user/repo name to get the main branch for
:raises:
DownloaderException: when there is an error downloading
ClientException: when there is an error parsing the response
:return:
The name of the main branch - `master` or `default`
"""
main_branch_url = self._make_api_url(user_repo, '/main-branch')
main_branch_info = self.fetch_json(main_branch_url, True)
return main_branch_info['name']
def _make_api_url(self, user_repo, suffix=''):
"""
Generate a URL for the BitBucket API
:param user_repo:
The user/repo of the repository
:param suffix:
The extra API path info to add to the URL
:return:
The API URL
"""
return 'https://api.bitbucket.org/1.0/repositories/%s%s' % (user_repo, suffix)
def _readme_url(self, user_repo, branch, prefer_cached=False):
"""
Parse the root directory listing for the repo and return the URL
to any file that looks like a readme
:param user_repo:
The user/repo string
:param branch:
The branch to fetch the readme from
:param prefer_cached:
If a cached directory listing should be used instead of a new HTTP request
:raises:
DownloaderException: when there is an error downloading
ClientException: when there is an error parsing the response
:return:
The URL to the readme file, or None
"""
listing_url = self._make_api_url(user_repo, '/src/%s/' % branch)
root_dir_info = self.fetch_json(listing_url, prefer_cached)
for entry in root_dir_info['files']:
if entry['path'].lower() in _readme_filenames:
return 'https://bitbucket.org/%s/raw/%s/%s' % (user_repo,
branch, entry['path'])
return None
def _user_repo_branch(self, url):
"""
Extract the username/repo and branch name from the URL
:param url:
The URL to extract the info from, in one of the forms:
https://bitbucket.org/{user}/{repo}
https://bitbucket.org/{user}/{repo}/src/{branch}
:raises:
DownloaderException: when there is an error downloading
ClientException: when there is an error parsing the response
:return:
A tuple of (user/repo, branch name) or (None, None) if not matching
"""
repo_match = re.match('https?://bitbucket.org/([^/]+/[^/]+)/?$', url)
branch_match = re.match('https?://bitbucket.org/([^/]+/[^/]+)/src/([^/]+)/?$', url)
if repo_match:
user_repo = repo_match.group(1)
branch = self._main_branch_name(user_repo)
elif branch_match:
user_repo = branch_match.group(1)
branch = branch_match.group(2)
else:
return (None, None)
return (user_repo, branch)
| [
"will@wbond.net"
] | will@wbond.net |
98bd0ae106c9273c71d3380733ab51da4788afe1 | 8ff8ce3486db6cc9554aef203e7b25c09232c1db | /src/dsr_shelx/networkx/linalg/algebraicconnectivity.py | 9a7fe8cd1d040a91b9829b56a2ba07d7fc61bfeb | [] | no_license | dkratzert/DSR | 4eff1abec06ab9636d17168ee6273e4b3a27ccf6 | 87e5be534ab5e74b3bdf70904a13577b595a8f80 | refs/heads/master | 2023-07-08T13:41:10.404051 | 2023-06-23T19:06:31 | 2023-06-23T19:06:31 | 16,441,015 | 7 | 1 | null | 2022-11-13T12:16:37 | 2014-02-01T19:38:00 | Python | UTF-8 | Python | false | false | 18,281 | py | """
Algebraic connectivity and Fiedler vectors of undirected graphs.
"""
from functools import partial
import networkx as nx
from networkx.utils import (
not_implemented_for,
np_random_state,
reverse_cuthill_mckee_ordering,
)
__all__ = ["algebraic_connectivity", "fiedler_vector", "spectral_ordering"]
class _PCGSolver:
"""Preconditioned conjugate gradient method.
To solve Ax = b:
M = A.diagonal() # or some other preconditioner
solver = _PCGSolver(lambda x: A * x, lambda x: M * x)
x = solver.solve(b)
The inputs A and M are functions which compute
matrix multiplication on the argument.
A - multiply by the matrix A in Ax=b
M - multiply by M, the preconditioner surrogate for A
Warning: There is no limit on number of iterations.
"""
def __init__(self, A, M):
self._A = A
self._M = M
def solve(self, B, tol):
import numpy as np
# Densifying step - can this be kept sparse?
B = np.asarray(B)
X = np.ndarray(B.shape, order="F")
for j in range(B.shape[1]):
X[:, j] = self._solve(B[:, j], tol)
return X
def _solve(self, b, tol):
import numpy as np
import scipy as sp
import scipy.linalg.blas # call as sp.linalg.blas
A = self._A
M = self._M
tol *= sp.linalg.blas.dasum(b)
# Initialize.
x = np.zeros(b.shape)
r = b.copy()
z = M(r)
rz = sp.linalg.blas.ddot(r, z)
p = z.copy()
# Iterate.
while True:
Ap = A(p)
alpha = rz / sp.linalg.blas.ddot(p, Ap)
x = sp.linalg.blas.daxpy(p, x, a=alpha)
r = sp.linalg.blas.daxpy(Ap, r, a=-alpha)
if sp.linalg.blas.dasum(r) < tol:
return x
z = M(r)
beta = sp.linalg.blas.ddot(r, z)
beta, rz = beta / rz, beta
p = sp.linalg.blas.daxpy(p, z, a=beta)
class _LUSolver:
"""LU factorization.
To solve Ax = b:
solver = _LUSolver(A)
x = solver.solve(b)
optional argument `tol` on solve method is ignored but included
to match _PCGsolver API.
"""
def __init__(self, A):
import scipy as sp
import scipy.sparse.linalg # call as sp.sparse.linalg
self._LU = sp.sparse.linalg.splu(
A,
permc_spec="MMD_AT_PLUS_A",
diag_pivot_thresh=0.0,
options={"Equil": True, "SymmetricMode": True},
)
def solve(self, B, tol=None):
import numpy as np
B = np.asarray(B)
X = np.ndarray(B.shape, order="F")
for j in range(B.shape[1]):
X[:, j] = self._LU.solve(B[:, j])
return X
def _preprocess_graph(G, weight):
"""Compute edge weights and eliminate zero-weight edges."""
if G.is_directed():
H = nx.MultiGraph()
H.add_nodes_from(G)
H.add_weighted_edges_from(
((u, v, e.get(weight, 1.0)) for u, v, e in G.edges(data=True) if u != v),
weight=weight,
)
G = H
if not G.is_multigraph():
edges = (
(u, v, abs(e.get(weight, 1.0))) for u, v, e in G.edges(data=True) if u != v
)
else:
edges = (
(u, v, sum(abs(e.get(weight, 1.0)) for e in G[u][v].values()))
for u, v in G.edges()
if u != v
)
H = nx.Graph()
H.add_nodes_from(G)
H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)
return H
def _rcm_estimate(G, nodelist):
"""Estimate the Fiedler vector using the reverse Cuthill-McKee ordering."""
import numpy as np
G = G.subgraph(nodelist)
order = reverse_cuthill_mckee_ordering(G)
n = len(nodelist)
index = dict(zip(nodelist, range(n)))
x = np.ndarray(n, dtype=float)
for i, u in enumerate(order):
x[index[u]] = i
x -= (n - 1) / 2.0
return x
def _tracemin_fiedler(L, X, normalized, tol, method):
"""Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix
of the graph. This function starts with the Laplacian L, not the Graph.
Parameters
----------
L : Laplacian of a possibly weighted or normalized, but undirected graph
X : Initial guess for a solution. Usually a matrix of random numbers.
This function allows more than one column in X to identify more than
one eigenvector if desired.
normalized : bool
Whether the normalized Laplacian matrix is used.
tol : float
Tolerance of relative residual in eigenvalue computation.
Warning: There is no limit on number of iterations.
method : string
Should be 'tracemin_pcg' or 'tracemin_lu'.
Otherwise exception is raised.
Returns
-------
sigma, X : Two NumPy arrays of floats.
The lowest eigenvalues and corresponding eigenvectors of L.
The size of input X determines the size of these outputs.
As this is for Fiedler vectors, the zero eigenvalue (and
constant eigenvector) are avoided.
"""
import numpy as np
import scipy as sp
import scipy.linalg # call as sp.linalg
import scipy.linalg.blas # call as sp.linalg.blas
import scipy.sparse # call as sp.sparse
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = np.sqrt(L.diagonal())
# TODO: rm csr_array wrapper when spdiags array creation becomes available
D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr"))
L = D @ L @ D
e *= 1.0 / np.linalg.norm(e, 2)
if normalized:
def project(X):
"""Make X orthogonal to the nullspace of L."""
X = np.asarray(X)
for j in range(X.shape[1]):
X[:, j] -= (X[:, j] @ e) * e
else:
def project(X):
"""Make X orthogonal to the nullspace of L."""
X = np.asarray(X)
for j in range(X.shape[1]):
X[:, j] -= X[:, j].sum() / n
if method == "tracemin_pcg":
D = L.diagonal().astype(float)
solver = _PCGSolver(lambda x: L @ x, lambda x: D * x)
elif method == "tracemin_lu":
# Convert A to CSC to suppress SparseEfficiencyWarning.
A = sp.sparse.csc_array(L, dtype=float, copy=True)
# Force A to be nonsingular. Since A is the Laplacian matrix of a
# connected graph, its rank deficiency is one, and thus one diagonal
# element needs to modified. Changing to infinity forces a zero in the
# corresponding element in the solution.
i = (A.indptr[1:] - A.indptr[:-1]).argmax()
A[i, i] = float("inf")
solver = _LUSolver(A)
else:
raise nx.NetworkXError(f"Unknown linear system solver: {method}")
# Initialize.
Lnorm = abs(L).sum(axis=1).flatten().max()
project(X)
W = np.ndarray(X.shape, order="F")
while True:
# Orthonormalize X.
X = np.linalg.qr(X)[0]
# Compute iteration matrix H.
W[:, :] = L @ X
H = X.T @ W
sigma, Y = sp.linalg.eigh(H, overwrite_a=True)
# Compute the Ritz vectors.
X = X @ Y
# Test for convergence exploiting the fact that L * X == W * Y.
res = sp.linalg.blas.dasum(W @ Y[:, 0] - sigma[0] * X[:, 0]) / Lnorm
if res < tol:
break
# Compute X = L \ X / (X' * (L \ X)).
# L \ X can have an arbitrary projection on the nullspace of L,
# which will be eliminated.
W[:, :] = solver.solve(X, tol)
X = (sp.linalg.inv(W.T @ X) @ W.T).T # Preserves Fortran storage order.
project(X)
return sigma, np.asarray(X)
def _get_fiedler_func(method):
"""Returns a function that solves the Fiedler eigenvalue problem."""
import numpy as np
if method == "tracemin": # old style keyword <v2.1
method = "tracemin_pcg"
if method in ("tracemin_pcg", "tracemin_lu"):
def find_fiedler(L, x, normalized, tol, seed):
q = 1 if method == "tracemin_pcg" else min(4, L.shape[0] - 1)
X = np.asarray(seed.normal(size=(q, L.shape[0]))).T
sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
return sigma[0], X[:, 0]
elif method == "lanczos" or method == "lobpcg":
def find_fiedler(L, x, normalized, tol, seed):
import scipy as sp
import scipy.sparse # call as sp.sparse
import scipy.sparse.linalg # call as sp.sparse.linalg
L = sp.sparse.csc_array(L, dtype=float)
n = L.shape[0]
if normalized:
# TODO: rm csc_array wrapping when spdiags array becomes available
D = sp.sparse.csc_array(
sp.sparse.spdiags(
1.0 / np.sqrt(L.diagonal()), [0], n, n, format="csc"
)
)
L = D @ L @ D
if method == "lanczos" or n < 10:
# Avoid LOBPCG when n < 10 due to
# https://github.com/scipy/scipy/issues/3592
# https://github.com/scipy/scipy/pull/3594
sigma, X = sp.sparse.linalg.eigsh(
L, 2, which="SM", tol=tol, return_eigenvectors=True
)
return sigma[1], X[:, 1]
else:
X = np.asarray(np.atleast_2d(x).T)
# TODO: rm csr_array wrapping when spdiags array becomes available
M = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / L.diagonal(), 0, n, n))
Y = np.ones(n)
if normalized:
Y /= D.diagonal()
sigma, X = sp.sparse.linalg.lobpcg(
L, X, M=M, Y=np.atleast_2d(Y).T, tol=tol, maxiter=n, largest=False
)
return sigma[0], X[:, 0]
else:
raise nx.NetworkXError(f"unknown method {method!r}.")
return find_fiedler
@np_random_state(5)
@not_implemented_for("directed")
def algebraic_connectivity(
G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
):
"""Returns the algebraic connectivity of an undirected graph.
The algebraic connectivity of a connected undirected graph is the second
smallest eigenvalue of its Laplacian matrix.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional (default: None)
The data key used to determine the weight of each edge. If None, then
each edge has unit weight.
normalized : bool, optional (default: False)
Whether the normalized Laplacian matrix is used.
tol : float, optional (default: 1e-8)
Tolerance of relative residual in eigenvalue computation.
method : string, optional (default: 'tracemin_pcg')
Method of eigenvalue computation. It must be one of the tracemin
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
or 'lobpcg' (LOBPCG).
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_lu' LU factorization
=============== ========================================
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
algebraic_connectivity : float
Algebraic connectivity.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
See Also
--------
laplacian_matrix
"""
if len(G) < 2:
raise nx.NetworkXError("graph has less than two nodes.")
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
return 0.0
L = nx.laplacian_matrix(G)
if L.shape[0] == 2:
return 2.0 * L[0, 0] if not normalized else 2.0
find_fiedler = _get_fiedler_func(method)
x = None if method != "lobpcg" else _rcm_estimate(G, G)
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
return sigma
@np_random_state(5)
@not_implemented_for("directed")
def fiedler_vector(
G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
):
"""Returns the Fiedler vector of a connected undirected graph.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix
of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional (default: None)
The data key used to determine the weight of each edge. If None, then
each edge has unit weight.
normalized : bool, optional (default: False)
Whether the normalized Laplacian matrix is used.
tol : float, optional (default: 1e-8)
Tolerance of relative residual in eigenvalue computation.
method : string, optional (default: 'tracemin_pcg')
Method of eigenvalue computation. It must be one of the tracemin
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
or 'lobpcg' (LOBPCG).
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_lu' LU factorization
=============== ========================================
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
fiedler_vector : NumPy array of floats.
Fiedler vector.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes or is not connected.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
See Also
--------
laplacian_matrix
"""
import numpy as np
if len(G) < 2:
raise nx.NetworkXError("graph has less than two nodes.")
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
raise nx.NetworkXError("graph is not connected.")
if len(G) == 2:
return np.array([1.0, -1.0])
find_fiedler = _get_fiedler_func(method)
L = nx.laplacian_matrix(G)
x = None if method != "lobpcg" else _rcm_estimate(G, G)
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
return fiedler
@np_random_state(5)
def spectral_ordering(
G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
):
"""Compute the spectral_ordering of a graph.
The spectral ordering of a graph is an ordering of its nodes where nodes
in the same weakly connected components appear contiguous and ordered by
their corresponding elements in the Fiedler vector of the component.
Parameters
----------
G : NetworkX graph
A graph.
weight : object, optional (default: None)
The data key used to determine the weight of each edge. If None, then
each edge has unit weight.
normalized : bool, optional (default: False)
Whether the normalized Laplacian matrix is used.
tol : float, optional (default: 1e-8)
Tolerance of relative residual in eigenvalue computation.
method : string, optional (default: 'tracemin_pcg')
Method of eigenvalue computation. It must be one of the tracemin
options shown below (TraceMIN), 'lanczos' (Lanczos iteration)
or 'lobpcg' (LOBPCG).
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_lu' LU factorization
=============== ========================================
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
spectral_ordering : NumPy array of floats.
Spectral ordering of nodes.
Raises
------
NetworkXError
If G is empty.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
See Also
--------
laplacian_matrix
"""
if len(G) == 0:
raise nx.NetworkXError("graph is empty.")
G = _preprocess_graph(G, weight)
find_fiedler = _get_fiedler_func(method)
order = []
for component in nx.connected_components(G):
size = len(component)
if size > 2:
L = nx.laplacian_matrix(G, component)
x = None if method != "lobpcg" else _rcm_estimate(G, component)
sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
sort_info = zip(fiedler, range(size), component)
order.extend(u for x, c, u in sorted(sort_info))
else:
order.extend(component)
return order
| [
"dkratzert@gmx.de"
] | dkratzert@gmx.de |
2c671f1c4a133aca1fab7ed9e5ecaf37b23c15ad | ba7052d8cf27317d7ebd22911e44aec860464307 | /durable_rules_tools/magic.py | 9d67ab8b328bcf9a84bdf2ac2c57607849a25aea | [
"MIT"
] | permissive | innovationOUtside/durable_rules_magic | 841a005ecae417910dbc7800270483e1eed11945 | 3d2fd4386ce2ea8ab83e6e84a39822dc56e21f9a | refs/heads/master | 2023-01-10T16:49:16.784258 | 2020-11-02T18:26:15 | 2020-11-02T18:26:15 | 263,738,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | from IPython.core.magic import magics_class, line_cell_magic, Magics
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
import warnings
from .rules_utils import quick_assert_fact, quick_retract_fact, quick_post_event, _delete_state
# TO DO - things are passed in from the magic as strings
# Should we try to cast them to eg int, float, list, tuple, dict?
@magics_class
class DurableRulesMagic(Magics):
def __init__(self, shell, cache_display_data=False):
super(DurableRulesMagic, self).__init__(shell)
self.graph = None
self.RULESET = None
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def assert_facts(self, line, cell):
"Assert and/or retract several facts."
args = parse_argstring(self.assert_facts, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('-'):
quick_retract_fact(_ruleset, _assertion.lstrip('-'))
elif not _assertion.startswith('#'):
quick_assert_fact(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def retract_facts(self, line, cell):
"Retract and/or assert several facts."
args = parse_argstring(self.retract_facts, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('*'):
quick_assert_fact(_ruleset, _assertion.lstrip('-'))
elif not _assertion.startswith('#'):
quick_retract_fact(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def post_events(self, line, cell):
"Post several events."
args = parse_argstring(self.post_events, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if not _assertion.startswith('#'):
quick_post_event(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def facts_and_events(self, line, cell):
"Assert and/or retract several facts and/or post several events."
args = parse_argstring(self.facts_and_events, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('-'):
quick_retract_fact(_ruleset, _assertion.lstrip('-'))
elif _assertion.startswith('*'):
quick_assert_fact(_ruleset, _assertion.lstrip('*'))
elif _assertion.startswith('%'):
quick_post_event(_ruleset, _assertion.lstrip('%')) | [
"tony.hirst@gmail.com"
] | tony.hirst@gmail.com |
923c45fbfb8927418171bd74da9c695b7b579f88 | ba7640cffff3085f045d69f37735de0f759e66c3 | /running/HTMLTestRunner.py | db252aa5a296ce7ed1d9fbf5720cde93a7b4abba | [
"Apache-2.0"
] | permissive | luoqingfu/reudom | f5e88292a7e8cdbb372340795bc5ec5c85a26931 | 3c52ff4aa2cd772260bbf3575f2844d76bc2f16a | refs/heads/master | 2020-12-07T13:23:29.972584 | 2019-12-24T14:57:05 | 2019-12-24T14:57:05 | 232,730,930 | 1 | 0 | Apache-2.0 | 2020-01-09T05:38:10 | 2020-01-09T05:38:09 | null | UTF-8 | Python | false | false | 45,699 | py | """
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung , bugmaster"
__version__ = "0.9.0"
"""
Change History
Version 0.9.0
* Increased repeat execution
* Added failure screenshots
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import io
import sys
import time
import copy
import unittest
from xml.sax import saxutils
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
3: 'skip',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<script src="https://code.jquery.com/jquery-3.3.1.slim.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
<script src="https://cdn.bootcss.com/twitter-bootstrap/4.3.1/js/bootstrap.min.js"></script>
<script src="http://apps.bdimg.com/libs/Chart.js/0.2.0/Chart.min.js"></script>
<!-- <link href="https://cdn.bootcss.com/echarts/3.8.5/echarts.common.min.js" rel="stylesheet"> -->
<link rel="stylesheet" href="https://cdn.bootcss.com/twitter-bootstrap/4.3.1/css/bootstrap.min.css">
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript">
function show_img(obj) {
var obj1 = obj.nextElementSibling
obj1.style.display='block'
var index = 0;//每张图片的下标,
var len = obj1.getElementsByTagName('img').length;
var imgyuan = obj1.getElementsByClassName('imgyuan')[0]
//var start=setInterval(autoPlay,500);
obj1.onmouseover=function(){//当鼠标光标停在图片上,则停止轮播
clearInterval(start);
}
obj1.onmouseout=function(){//当鼠标光标停在图片上,则开始轮播
start=setInterval(autoPlay,1000);
}
for (var i = 0; i < len; i++) {
var font = document.createElement('font')
imgyuan.appendChild(font)
}
var lis = obj1.getElementsByTagName('font');//得到所有圆圈
changeImg(0)
var funny = function (i) {
lis[i].onmouseover = function () {
index=i
changeImg(i)
}
}
for (var i = 0; i < lis.length; i++) {
funny(i);
}
function autoPlay(){
if(index>len-1){
index=0;
clearInterval(start); //运行一轮后停止
}
changeImg(index++);
}
imgyuan.style.width= 25*len +"px";
//对应圆圈和图片同步
function changeImg(index) {
var list = obj1.getElementsByTagName('img');
var list1 = obj1.getElementsByTagName('font');
for (i = 0; i < list.length; i++) {
list[i].style.display = 'none';
list1[i].style.backgroundColor = 'white';
}
list[index].style.display = 'block';
list1[index].style.backgroundColor = 'red';
}
}
function hide_img(obj){
obj.parentElement.style.display = "none";
obj.parentElement.getElementsByClassName('imgyuan')[0].innerHTML = "";
}
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:Skip; 3:All */
function showCase(level, channel) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (["ft","pt","et","st"].indexOf(id.substr(0,2))!=-1){
if ( level == 0 && id.substr(2,1) == channel ) {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'pt'+ channel) {
if ( level == 1){
tr.className = '';
}
else if (level > 4 && id.substr(2,1) == channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'ft'+channel) {
if (level == 2) {
tr.className = '';
}
else if (level > 4 && id.substr(2,1) == channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'et'+channel) {
if (level == 3) {
tr.className = '';
}
else if (level > 4 && id.substr(2,1) == channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'st'+channel) {
if (level == 4) {
tr.className = '';
}
else if (level > 4 && id.substr(2,1) == channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
if (!tr) {
tid = 'e' + tid0;
tr = document.getElementById(tid);
}
if (!tr) {
tid = 's' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
</script>
%(heading)s
%(report)s
%(ending)s
%(chart_script)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 20px;
margin-bottom: 1ex;
margin-left: 10px;
margin-right: 10px;
width: 23%;
float: left;
padding-top: 10px;
padding-left: 10px;
padding-bottom: 10px;
padding-right: 10px;
box-shadow:0px 0px 5px #000;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
/*border: solid #627173 1px; */
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
width: 500px;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
margin-left: 10px;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
margin-left: 10px;
}
#header_row {
font-weight: bold;
color: #606060;
background-color: #f5f5f5;
border-top-width: 10px;
border-color: #d6e9c6;
font-size: 15px;
}
#result_table td {
border: 1px solid #f5f5f5;
padding: 2px;
}
#total_row { font-weight: bold; }
.passClass { background-color: #d6e9c6; }
.failClass { background-color: #faebcc; }
.errorClass { background-color: #ebccd1; }
.passCase { color: #28a745; font-weight: bold;}
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
.hiddenRow { display: none; }
.none {color: #009900 }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
/* -- chars ---------------------------------------------------------------------- */
.testChars {width: 900px;margin-left: 0px;}
.btn-info1 {
color: #fff;
background-color: #d6e9c6;
border-color: #d6e9c6;
}
.btn-info2 {
color: #fff;
background-color: #faebcc;
border-color: #faebcc;
}
.btn-info3 {
color: #fff;
background-color: #ebccd1;
border-color: #ebccd1;
}
/* -- screenshots ---------------------------------------------------------------------- */
.img{
height: 100%;
border-collapse: collapse;
}
.screenshots {
z-index: 100;
position:fixed;
height: 80%;
left: 50%;
top: 50%;
transform: translate(-50%,-50%);
display: none;
box-shadow:1px 2px 20px #333333;
}
.imgyuan{
height: 20px;
border-radius: 12px;
background-color: red;
padding-left: 13px;
margin: 0 auto;
position: relative;
top: -40px;
background-color: rgba(1, 150, 0, 0.3);
}
.imgyuan font{
border:1px solid white;
width:11px;
height:11px;
border-radius:50%;
margin-right: 9px;
margin-top: 4px;
display: block;
float: left;
background-color: white;
}
.close_shots {
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAcRklEQVR4Xu1dCbBdVZVdC4IyDyEBEmbIxJSEIUDodImMAoUig9IWiAjaFA3SDS0liIrQtnQjtA0iUGiDoK2gDFUtiDKPCRAykomQgRAIcyCEIQFyulbe/b/e/3nvvzvsO59d9esn9e/Z55x17rpn2gPhJVUEnHODAWwDYGsAgwBsCWAggAEA+gPYHsB6ANYNfuvfKwB8COCj4Lf+/TyAtwC8AeA1AEsAvAJgMYBFJFel2pGaKmdN+23abefcdgB2ATAcwFAAQwAcGLz0pnV1UPYQgHkA5gaEmk1ydpYNqFpdniAxRtQ5NyoggEgwLpgNYmjKpMhKAI8DeATAwyQfzaTWilTiCRJiIJ1zWgodDuAwAMcD2DBEsSI/cieA+wD8leT8Ijc077Z5grQZAefcMABHAzgSwEF5D1SK9U8GcDeAP5N8KsV6SqnaE6Rp2Jxz2kd8GcAxAMaUckSTN/oKAHeSfCK5qvJrqD1BglOmrwI4A4BmDS8NBJYC+CWAW0lOrysotSWIc+4EACcHy6i6jn/Yfo8HcIt+SC4PW6gKz9WKIM65jQGcGcwWun/wEg0B3ctcD+C6uhwf14IgwTLqHABnAVg/2jvhn26DwO8AXF31jX2lCRIQ4zwA5/rXPDUE/gTgSpJahlVOKkkQ59xGAC4AcD6AtSs3asXs0O8BXEZyWjGbF69VlSOIc06zxfcDO6d4qPhSSRD4BYBLSb6eRElRylaGIM453V9cDGBkUcCtcTt00nUxSd2plFpKTxDn3M4AfgJAdxleioXAswAuJPm3YjUrfGtKTRDnnDbglwHoF77L/skcELhO+0GS7+VQd6IqS0kQ59xonZwA+Hyi3vvCWSLwEoDzSP4xy0qT1lU6ggSzxs+SdtyXzw0BXTSeQ1JOYYWX0hDEOSevvGsAfLHwqPoGdkJATlz/RPLBTg/m/fdSEMQ5d6zMGwJX1bwx8/XbIXABSe0hCyuFJ4hz7lIAFxUWQd+wpAjcBuD0om7gC0sQ59ymAG4MfDOSDoIvX2wEZgI4leTTRWtmIQninNsHwM1BIISiYebbkw4CnwL4Osn/TUd9PK2FI4hz7jgAv80hIkg8BH0pawR0sfhTa6Vx9RWKIM45maNfHbczvlxlEPgFybOL0JvCEMQ592MAPywCKL4NhUBArr4n5t2SQhDEOfdzXR7lDYavv3AI3AvgWJKKLJmL5E4Q59wNOubLpfe+0jIg8BiAL5FUEInMJVeCOOduAnBK5r32FZYNgWcAHEVScYkzldwI4smR6ThXoTLdkRxJUgG8M5NcCOKc+xWA0zLrpa+oKghMAHBolqGHMieIc+4qAIU4wqvKW1OzftyvGMkkXRb9zpQg3q4qiyGtRR13kNSFcuqSGUGcczrG1XGuF4+ABQI3kPy2haK+dGRCEOfcVxTjNe3OeP21Q+ASkj9Ks9epE8Q5t3+QwMXHp0pzJOur+1skdeiTiqRKEOeccvE9GaQkS6UDXqlHQPlbSCr9nLmkTRAlZlECGi8egTQRWARgLEklNTWV1AjinPtPAN81ba1X5hFoj8C9JI+wBigVgjjnvgZA0b+9eASyRECxgRWT2UzMCeKcUxpkRdRTAGkvHoGsETie5O1WlaZBEIWZPNSqgV6PRyAiAgqaPZrkkojlWj5uShDn3A8AXGLRMK/DI5AAgdtJKl13YjEjiHPu74L7jsSN8gpKiMDHnwBvvwO8pZ+lwBYDgP6bAJttDPTLJXSyAtMpCWkisSSIcmzvm6g1SQuv/Bj4zDpJtfjyURGYvwh4fiHwoVIY9pJ1PwvsuA0wQkH4M5WVAHYj+UKSWk0I4pzTdb9yc2Qvr70JLH4VWPI6oK/Y2msDWw0ABm8JbLNV9u2pU43LlgPT5wCvh3DREFGO+FzW6NxFUnljYktigjjn9gQwKXYL4hbUdD7jBeDNt9tr+OxngD13BQZtEbcWX64dAu++BzwzDXjv/fAYbTcY2Hv38M/bPPlNkgpAGEssCPIXAF+IVXvcQhqcByPkjNxjODDEZ32OC/ca5YT/01OB5R9EV6kP1g6KQ56ZvApgOMllcWpMRBDn3DeC8KBx6o5fZvxk4NWI7sl7DAOG7BC/Tl+ygcA7y4CnpwHvxyCHym+yEXDQ2KzRvIpkrKg5sQninNNuWBug7TLtrZZWj8QM4br7MGCoJ0ns8Vr6bmNZ9X6CKDxrETj6EEC/s5X94sT+jd3K3LwDtSl84cX40O42FBi2Y/zydS35tsgxFfigxUlVVEwOPgDYeMOopZI+fw/Jo6IqiUUQ55w+wwuiVmby/ANPAjo9SSK7DgGG75REQ73K6m5DM0erY9w4SORDELX0OJJ3RGlyXIIojVbq7o4tO/J/DwCfKBB4Qtll5zzO5hM2OofiuvTTnuMjo4xp+S2xBN6zJJU5ILREJohzbhSAKaFrsH7wyUmA7j4sRJdXIoqX1gjoCF3kWKE7NyPJZ5Pe3PhIx75xCHILgJOM4IquRvsP7UOsREstLbm89ETgjYAcKw3JoRpGjgB2zvZcp9fQziK5a9jhjkSQ3C4Fm3ujr9k9D4ftX7jntGnX5t1LAwHdjGvPIdMdS9l2ELDPHpYa4+oK7ccelSC/URaguK0yKzdzLjDH+IxAx786Bq67aPkqcshsx1J0arX/nsAG61lqjatrBslQV/qhCeKcGwFgVtwWmZd7aELj0spS6k6SV0WOqTaHIM3jInLsOwrYaAPL0Uqq6ySSHb1eoxBEmZ+UAaoYoun/7hQCWcgkRaYpdRNZJmhD/qnBCWEzdtqUixwbrl80RMeTPKBTo0IRxDnXH0AIk81O1Rn/XcuACZOBN41TR2gTqc1kXUSW0CLHqlW2Pd50Y2DfkcAGhSNHVz8PJykP2LYSliDfA1CYxIo9eqM7Edlm9WXVG2fYd9oWGLVLnJLlKvOKyDEVcMaxoDfbpEGO9Qux52g3Jh09D8MSROeqxd3BfroKGD8J0NGkpcjRZ3ToE0HLmrPR9fJrjQ25NTnkSThmFLD+utn0I1ktO5Bsa7vUkSDOuUMA3JesDRmU1vJAM0kY550ozZFptky0qyZyMhM5rGXzTYExI4H1SkEO9f4Ckpe1gyEMQcqTQ1BfQpHE6qa9C7Xttwb22s36VcpP3+IlwDPT7evffLPGskreg+WRmSTbDm6fBHHO6e+6LSpX4Ok4/iKdBjQfb7hOrYr+95eWABNTIMcAkWMUIC/O8klbU/hOBDkBwG3l6y+ACVMafuqWUpyb4Hi9WvQK8Oxz8cr2VWpg/wY5yhsw43KS57fqYieC/B5A7sncY4/oU1MAndJYSllJ8uLLwKQZlkg0dG2xeWNZtU6po8msINly09SWIM45zZVGNs724xJao44wdVpjKYqWoo1oWWThy8DkFMix5YAGDuvkEvfKGv1xJJ/orbQvgigy3R+tW5GLPp3W6NTGUrbesrGsKLosWAxMmWnfSpFD/e9Xru1pH0C0XGb1RZCbAZxsj2xOGnVqo9MbSxm8BbDfaEuNtroWvARMScF8bquBjWWVYpBVSEiuwYe+CGJ8tVoAJHV6o1McS1HMrf0LSBJFO5w627KnDV3qr8ix1lr2uvPXqKDXU5ub0ZIgzrmDASgfdfVEpzg6zbEUfVHHKn5eQWTeImBaCuTQjKll1Zof2oJ0PHEzLiTZw6SqHUEuB/CviasrqgKd5uhUx1K0JhdJ8n55rD0uuzDSnkvmIx2vli1BzVzXoyR7xEdtRxAFPiqNrUAsGCfPBBYujlW0bSEdeYokeS0/5i4Ennvetk/SVrZTu2QIDCDZbbm+BkGccwoaNT9ZHSUprdMdnfJYii7Nxu4FrJ3xGj01cgwCxhTCTdZylPrSdSLJW7seaEWQ0wHI/qoeMnUWMP8l276KJHIvzeoI9PkFwIy5tn2QtrJeiiZD4gaS3SGtWhFEbohKwlkf0YZWG1tLkW2SSJL2Jdqc+cDMRCkwWve6KrZn0cd0CcnBfc0gss0YGF1vyUskDWnaqvsy/daeJC0zjNnzgVkpkKNq1svRX81hJFdPyT1mEOecIj2kYOoZvYW5lEiDJHIe0p7E2pBv1jxg9jx7mHbYGtizQqb98RA6neSvWxHkDADXxtNZkVI6BdKG11LkfqqZxMoUXEsqLa2speoelOHxuonkqa0Ich2Afwyvp6JPasOrja+lKICBSJLUmSiNmGDq547bAqNr4IMfbkynklxtHtF7ifUYgHHhdFT8qTS+0gqBI5LEdUdNg7gaxp22A0bVKIpL51f3E5Kr7fd7E0QXJArx40UIpLHOVxA1kSRqtI80ln7qY91CHIV/s4eQnNdNEOfcpgCMA0yFb01hn9RGWESxlI0CkoQNw5nG4YH6U9cgeeHG8giS9zYTZG8AE8OVrdlTadw1KAyn7kk6RRycNgeYlyCjVruhqnuY1c6v8Fkkr2kmSHUcpDp3PvoTadxWixwiSbuYtWlcYKrnnhxhxv9Kkuc1E0TWu7Li9dIOgTTsnbTM0p5Ey65mScMERvp9qoew7/cdJI9rJshVAM4OW7q2z6VBEkUg1GViV2LLNIwoNWA+WVCU13YiyTHNBLkTwDFRNNT22TR8LnT0q5lEhpPWZvgaqBE7Abv4TFpR3lm54DYTRMnHx0RRUOtn0/Dak4+3dfoBDZJPWBr3VV2nmSDV80GPC0vYcvraa69QZPEpr5OMznaeIEngU9m0IockbZfK7zoUGC7/Ny8xEdh3NUEKmyAnZq8yL6Y9g1x4iyRKSqoTKy9JEDiqiyDK/WGYWzlJm0paNq3QnnHgUDJS3XV4SYrAKV0E2R/A+KTaal8+reDQUYBVfkWZkHixQODcLoIcAeAeC42115FWeoEwwI4cDuzsyREGqpDPXNJFEEVwVyR3LxYI5EESJR2VZa4XSwT+u4sgiuJwvaXm2utKK8VZK2CVbFRJR71YI3BjF0HOBXCFtfba61PaBaVfSFOUZFSusl7SQOBPXQS5CMCladRQe51pLreUXFRJRr2khcDdniBpQSu9H3/SmEGsM+92tVmpFxRQ2ktaCHQTxC+xrCFe+XGDHNa523u3U9HWFVjaSxoIdC+x/CbdEt4VKxs5yNMmR1eblQZNAaa9WCPQvUn3x7xW0H60okGONzN2799nj0YsXS+WCHQf8/qLQgtYRQ4tq956x0JbdB2eJNEx67tE90WhNzVJCu2HHwFPTwPezokcXe3fe3dAgae9WCDwL95Y0QLGDz5skGPpuxbakuvYazdAAai9JEWg21hRweK6s+ok1Vqr8u+LHFOBd5YVq9v+jsRiPBrm7hLnnPcojArp8g8aG/KikaOrH4q1q5i7XuIi0CNogydIFBjfe79Bjnffi1Iq+2e9nVYSzHu43PqgDWGhfG95Y8+xbHnYEuGeU9AG5TbUJaOleEvfuGj2CNrgw/6EgVGk0J5DM4ilKAvVviMbOUTGTwZ0KmYp3pEqMpq9w/74wHGdINRySjPHcmNyKPuUyDFw80YLREKRRKdjluJdcaOg+QzJRtAGiXPOhx7tCz5txLXn0MbcUjRjyFREmXGbRcs4kUSnZJbiSRIWzdtJHu+DV4eBa6nIMdX+ZVW2KZFDGXFbiZZxEybbk9JHPAkz6msEr/bpD1rBpss/LauslzvriRyjAGXC7Us0Y4kk1nseH1CuE0nWSH/gE+j0hkxmIyKH9YZZcXi15+jfgRxd7dEyS8stLbssxYck7QvNngl0gn2IT8HWBZkMDnVaJQNES1HqNZFDmW+jiGYwkcT6aHnEzo3YvV56I9AzBVtAEJ/EU0DIVF3kkF+HpSgXiJZVm20cT6tmMpHE+nLSp0XoPR5tk3j6NNByctJplTU5lE1KG3Klg04imtFEEmvzFp9Yp3lU2qaBPgPAtUnGr9Rl5TsucljfZCvFmsihNNAWIvKKJNbWwz41W9fo3ETyVP2ndxro3QFMtxjD0ukQObSsUqAFS1FqNe05urJHWekWicdPAt42NrH3mW81QqeT/PUaBAn2Ia8DGGg1jqXQ89qbjdOqT4zJIVIoqEK7JJ1JwflYJJls78HoSTKM5Nx2BPkdgK8lHbvSlH/1jQY5rDM7aTmlmWPDDdKFQjOe7kmsfeAVxlRGjvWTJSS7XTJ7LLGCGeR0ADfUApclIsdUYNUq2+5qI649R6cc6Fa1fvJpgyTWUVQUzlTm8vWSG0gqys9qaUUQZV2ZX3lMXnm9YT6yytgNRke4OsrVkW6W8umqxnLrDWPHUIU1VXjT+siJJG9tS5BgFpGF3LqVxeQVxcydJgtN2y7236Qxc+gyMA/RTCiSWEdyVHhTufDWQwaQ7P7KrDGDBAS5HICse6snL7/aIIe1yGxEew6ZkeQpIr1IooMHS1EQCAWDqLY8SvJzzV1sR5CDAdxfOSzSSkmw+WYNcsg6tygikugAwlIUTkhhhaorF5L8aUeCBLOI8fojZ1TTirIuU3Ud5cqvo2gyYQqwRKf2hqLojQpQV00ZTbJHvoqWM0hAkJsBnFwJHNLKHSgnJ+05ikiOroF7agqgAwlL2WYQMKZ6JJGLbW+Y+iLICQBus8Q1F10vvgJMes6+6i02b5BD7rJFFx1lK5mPpShYtvpfHbmc5PlRCKI1g7Gtd8ZoppWaWeTQsmqdfhl3KEF1sjHTHsxSlHZBOFRDxpF8IjRBgmWWEnsq8nv5ZOFiYPJM+3ZvOaCxIe9XInJ0oTBxOqC9mKUogY8S+ZRbVpBsefzYdokVEKScy6wFLwFTZtkP2VYDG+RQ/KqyShokGTQQ2H/PsiKidrdcXukPnQiivyuKWXneiPkvAVNTIMegLRprbgV2K7s8+xyggwtL0cdjbGlJsh9JBU5cQ/okSDCLyC5L9lnFl3mLgGmz7dupZYTIsVYFyNGFzqQZgPZolqLlp0iy5mGQZS3WumaSbHsDGoYghwC4z7pV5vpeeBGYPsdc7er8fyJHuQY9HA7ao2mvZik6wBBJyvMxuYDkZe0g6EiQYBbRmzfMEkdTXamRY6vGnqPKor2a9myWovuhsXuVZTm6A8kXkxLkAgD/bomhma65C4HnnjdT162oeuf87THSnk17N0sZIJLsCfQr9PZ1dfTEvroddgYZAMDYsMdgNJ5fAMxY7fhlK9U2p2iNlfZu2sNZisxwdLpV3Puiw0n+LTFBgmXWNQDOtMQvka4584GZLyRS0bJw9Q3y2mOmPZyWq5Yichz290W0OBhP8oBOXQ01gwQE0U4/BZuNTk1s8ffZ84FZKZBj+8HAXpW2Vu0MtparWrZaivYk4/ax1Gih6ySSci/vU0ITJCBJ/v7qs+cBs+Z16lf0v9fD3yEcLmmQpFiZrmaQDPUljEqQfQE8FQ7lFJ6Sp9wTz9orrpfHXDj8tLfTHs9K5J9/6DgrbUn1fJtkqLgLkQgSzCLy1/1K0hZGLq/oHY9PtI8oqCSXSnbpZU0EtMfTXs9KtMzqnQfFSnd4PbNIhvYfjkMQLSafCd8eoyfTcJWtZ9SOaAOi5ayWtRYyYidglyEWmpLoOI3k/4RVEJkgwSxyI4BvhK3E5DnrU6v6xn2KPhxW+778j8+fJRnptCAuQXSrnoJdRx9jZ+nP4CMHRieJxQcq/8xWx5G8I0rnYxEkmEX+A8AaHlhRKo/0rBVBPDkiwd7j4aQXswfuFz0vSvzW9i55D8mjoqpLQhDF1NTidMuolcZ63uIL5qOXx4K+R6G4pj2KGfa5/ZLXH19DW5P2vlTGJkgwiyhE4/Xx2xyhZNJNus9/EQHsDo++sBCYHtH+bb9RwOBsvqUtWn8VyXPiAJCIIAFJHgBwUJzKI5XRMe/DT8XLUe4zKEWCOtTDUSyo8zXfkSP+cJLLQvWr10MWBMnu8jDORWExjhbjjE3xyyjJ6ZwF7QPUKTfKsB0AESQ/+SZJnbrGksQECWaRnwC4MFYLohYKe+Qov3ENjpJUekkXAeVMVBTHt5YCCqKt6PZynJKHYb5yF8kvJ2mCCUECkkwCkI1T8kcrgVlzgYVtXEblyzF0R2BTo5RnSRD2ZfNCQBlYdyOZyKrVkiAHAngoUzSUElnpmjXVyyVWZgz6eqWV0SnTzvnKEiJwFkm5aCQSM4IEs8glAH6QqEW+sEcgOQIdPQXDVmFKkIAkDwL4fNgG+Oc8AsYIKBCxglCbRMhLgyAyjZVNek5ZZIzh9urKhsDxJG+3arQ5QYJZRFHhFR3ei0cgSwQuI6kAI2aSCkECklwB4FyzlnpFHoG+EbiX5BHWIKVGkIAkfwHwBetGe30egV4IKBzLWJLG8VQ7xOZNOgzOORnfPAlgp6S6fHmPQB8IHEQylSuGVGeQYBZRaJXHAFQosK1/WQuEwLdI/iqt9qROkIAkyjGiXCNePAKWCFxC8keWCnvryoQgAUn+GcB/pdkZr7tWCNxAUu4WqUpmBAlI8m8Avp9qj7zyOiBwB8njsuhopgQJSHIVgLOz6Jyvo5II3A/gMJKZpCnPnCABSbSpOq2Sw+c7lSYCEwAcSnJ5mpU0686FIAFJbgJwSlYd9fWUHgGlSDuS5FtZ9iQ3gniSZDnMpa9LgQqPIpl5Co5cCRKQpDw5EEv/npWyA7pD+xLJpXm0PneCBCT5OYBYUSfyAM3XmRkC9wI4luSHmdXYq6JCECQgyY8B/DAvIHy9hUPgDyT/Ie9WFYYgAUnOAnB13qD4+nNH4GqS38m9FUjZWDFOB51zugD6LYB145T3ZUqPwPdIKqxtIaRQM0gXIs45ReCWw5VP3FGI1ySTRnwM4Osk/5BJbSErKSRBguXWpgAU8OuYkH3xj5UXgekAFOBtYtG6UFiCNM0mlwK4qGjA+faYISAr79NJfmCm0VBR4QkSzCbHArgOwEDDvntV+SPwXZI/y78Z7VtQCoIEJNkGgAKBfbHIgPq2hUJASyoFdns01NM5PlQagjQtuc4DUOivTo7jWYaq9ZH7DslVZWhs6QgSzCajAVzpA9SV4RXrbqPS5Z5H8q4ytbqUBOk1m1wGoF+ZQK9hW+UDdD7JFWXre6kJEswmym+g9AtfLRv4NWjveHmQphVxJAv8Sk+QptlEeSAuBjAyC+B8HX0i8LbGgmTpzYYqQ5Amoiiao/ze+/uXOBcEtDe8lOQ7udRuXGnlCBIsu5Q5RzFalaZ6bWPMvLrWCPwGgGLjzq4SQJUkSNNsouR4Ohb2MYLTe2tlO3VFEc1ELLpcaYL0IoocsmROv74FcF7HamNSpVdWqovKSi0I0kSUjQGcCeAMANtXdlTT65g8+64HcC3JiInS02tUmpprRZBmIJ1zJwBQHpOj0wS4Irp1XHuLfrIMuVME7GpLkF7LL92haFYZVoRBKUgbFCThlwBuJSnbqVpK7QnSa1aRg5buU/Qjp606ihIf3UnyiTp2vnefPUHavAXOOc0mWn4dVXGbr8kA7gbwZ5JPeVL0RMATJMQb4ZzTpaMyZR0K4HgAG4YoVuRH7gRwH4C/kpQRoZc2CHiCxHg1nHOjABwY/IwDMCCGmqyKrATwOIBHADxcBh+MrIAJU48nSBiUOjzjnNsuCDAxHMBQAEMC8mQdmUVpyOYBmAtAx7Czq3azbTBckVR4gkSCK/rDzrlBALYFoFt9/XurwHVYs46WbrqPUU55kUm/9SOzcN05fBT81r/1witws+LTvgZgCQAlrVwMYFFZHJCiI5hvif8HTW8L980l4d4AAAAASUVORK5CYII=);
background-size: 22px 22px;
-moz-background-size: 22px 22px;
background-repeat: no-repeat;
position: absolute;
top: 5px;
right: 5px;
height: 22px;
z-index: 99;
width: 22px;
ox-shadow:1px 2px 5px #333333;
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading card'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
<div style="float:left; margin-left: 10px; margin-top: 20px;">
<p> Test Case Pie charts </p>
<a class="badge text-wrap btn-info1">-Pass-</a><br>
<a class="badge text-wrap btn-info2">-Faild-</a><br>
<a class="badge text-wrap btn-info3">-Error-</a><br>
</div>
<div class="testChars">
<canvas id="myChart" width="250" height="250"></canvas>
</div>
""" # variables: (title, parameters, description)
# ------------------------------------------------------------------------
# Pie chart
#
ECHARTS_SCRIPT = """
<script type="text/javascript">
var data = [
{
value: %(error)s,
color: "#ebccd1",
label: "Error",
labelColor: 'white',
labelFontSize: '16'
},
{
value : %(fail)s,
color : "#faebcc",
label: "Fail",
labelColor: 'white',
labelFontSize: '16'
},
{
value : %(Pass)s,
color : "#d6e9c6",
label : "Pass",
labelColor: 'white',
labelFontSize: '16'
}
]
var newopts = {
animationSteps: 100,
animationEasing: 'easeInOutQuart',
}
//Get the context of the canvas element we want to select
var ctx = document.getElementById("myChart").getContext("2d");
var myNewChart = new Chart(ctx).Pie(data,newopts);
</script>
"""
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line' style="margin-left: 10px; margin-top: 30px;">
<a href='javascript:showCase(0, %(channel)s)' class="btn btn-dark btn-sm">Summary</a>
<a href='javascript:showCase(1, %(channel)s)' class="btn btn-success btn-sm">Pass</a>
<a href='javascript:showCase(2, %(channel)s)' class="btn btn-warning btn-sm">Failed</a>
<a href='javascript:showCase(3, %(channel)s)' class="btn btn-danger btn-sm">Error</a>
<a href='javascript:showCase(4, %(channel)s)' class="btn btn-light btn-sm">Skip</a>
<a href='javascript:showCase(5, %(channel)s)' class="btn btn-info btn-sm">All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row' class="panel-title">
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>View</td>
<td>Screenshots</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td class="text text-success">%(Pass)s</td>
<td class="text text-danger">%(fail)s</td>
<td class="text text-warning">%(error)s</td>
<td> </td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
<td> </td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
<td>%(img)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>%(status)s</td>
<td>%(img)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
IMG_TMPL = r"""
<a onfocus='this.blur();' href="javacript:void(0);" onclick="show_img(this)">show</a>
<div align="center" class="screenshots" style="display:none">
<a class="close_shots" onclick="hide_img(this)"></a>
{imgs}
<div class="imgyuan"></div>
</div>
"""
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1, rerun=0, save_last_run=False):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.skip_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error; 3: skip),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.rerun = rerun
self.save_last_run = save_last_run
self.status = 0
self.runs = 0
self.result = []
def startTest(self, test):
test.imgs = getattr(test, "imgs", [])
# TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
self.outputBuffer = io.StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
if self.rerun and self.rerun >= 1:
if self.status == 1:
self.runs += 1
if self.runs <= self.rerun:
if self.save_last_run:
t = self.result.pop(-1)
if t[0] == 1:
self.failure_count -= 1
else:
self.error_count -= 1
test = copy.copy(test)
sys.stderr.write("Retesting... ")
sys.stderr.write(str(test))
sys.stderr.write('..%d \n' % self.runs)
doc = getattr(test, '_testMethodDoc', u"") or u''
if doc.find('->rerun') != -1:
doc = doc[:doc.find('->rerun')]
desc = "%s->rerun:%d" % (doc, self.runs)
if isinstance(desc, str):
desc = desc
test._testMethodDoc = desc
test(self)
else:
self.status = 0
self.runs = 0
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
self.status = 0
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.' + str(self.success_count))
def addError(self, test, err):
self.error_count += 1
self.status = 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if not getattr(test, "driver", ""):
pass
else:
try:
driver = getattr(test, "driver")
test.imgs.append(driver.get_screenshot_as_base64())
except BaseException:
pass
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
self.status = 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if not getattr(test, "driver", ""):
pass
else:
try:
driver = getattr(test, "driver")
test.imgs.append(driver.get_screenshot_as_base64())
except BaseException:
pass
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
def addSkip(self, test, reason):
self.skip_count += 1
self.status = 0
TestResult.addSkip(self, test, reason)
output = self.complete_output()
self.result.append((3, test, output, reason))
if self.verbosity > 1:
sys.stderr.write('S')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('S')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None, save_last_run=True):
self.stream = stream
self.verbosity = verbosity
self.save_last_run = save_last_run
self.run_times = 0
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test, rerun, save_last_run):
"""Run the given test case or test suite."""
result = _TestResult(self.verbosity, rerun=rerun, save_last_run=save_last_run)
test(result)
self.stopTime = datetime.datetime.now()
self.run_times += 1
self.generateReport(test, result)
# print(sys.stderr, '\nTime Elapsed: %s' % (self.stopTime-self.startTime))
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n, t, o, e in result_list:
cls = t.__class__
if not cls in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n, t, o, e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count:
status.append('Pass:%s' % result.success_count)
if result.failure_count:
status.append('Failure:%s' % result.failure_count)
if result.error_count:
status.append('Error:%s' % result.error_count)
if result.skip_count:
status.append('Skip:%s' % result.skip_count)
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
chart = self._generate_chart(result)
output = self.HTML_TMPL % dict(
title=saxutils.escape(self.title),
generator=generator,
stylesheet=stylesheet,
heading=heading,
report=report,
ending=ending,
chart_script=chart,
channel=self.run_times,
)
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name=saxutils.escape(name),
value=saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title=saxutils.escape(self.title),
parameters=''.join(a_lines),
description=saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = ns = 0
for n, t, o, e in cls_results:
if n == 0:
np += 1
elif n == 1:
nf += 1
elif n == 2:
ne += 1
else:
ns += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style=ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc=desc,
count=np + nf + ne,
Pass=np,
fail=nf,
error=ne,
cid='c%s.%s' % (self.run_times, cid + 1),
)
rows.append(row)
for tid, (n, t, o, e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list=''.join(rows),
count=str(result.success_count + result.failure_count + result.error_count),
Pass=str(result.success_count),
fail=str(result.failure_count),
error=str(result.error_count),
skip=str(result.skip_count),
total=str(result.success_count + result.failure_count + result.error_count),
channel=str(self.run_times),
)
return report
def _generate_chart(self, result):
chart = self.ECHARTS_SCRIPT % dict(
Pass=str(result.success_count),
fail=str(result.failure_count),
error=str(result.error_count),
)
return chart
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1','et1.1', 'st1.1' etc
has_output = bool(o or e)
if n == 0:
tmp = "p"
elif n == 1:
tmp = "f"
elif n == 2:
tmp = "e"
else:
tmp = "s"
tid = tmp + 't%d.%d.%d' % (self.run_times, cid + 1, tid + 1)
# tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o, str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
uo = o
else:
uo = o
if isinstance(e, str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
ue = e
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id=tid,
output=saxutils.escape(uo + ue),
)
if getattr(t, 'imgs', []):
# 判断截图列表,如果有则追加
tmp = ""
for i, img in enumerate(t.imgs):
if i == 0:
tmp += """<img src="data:image/jpg;base64,{}" style="display: block;" class="img"/>\n""".format(img)
else:
tmp += """<img src="data:image/jpg;base64,{}" style="display: none;" class="img"/>\n""".format(img)
screenshots_html = self.IMG_TMPL.format(imgs=tmp)
else:
screenshots_html = """"""
row = tmpl % dict(
tid=tid,
Class=(n == 0 and 'hiddenRow' or 'none'),
style=n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'passCase'),
desc=desc,
script=script,
status=self.STATUS[n],
img=screenshots_html
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
| [
"2652612315@qq.com"
] | 2652612315@qq.com |
3e1c302e7759ae163c083e1f83582e237a26a36c | fb365b414076ae14b65c05a6ebb271eb6a35c81d | /cron.py | 84c871029f124f562a8abce7b71e3dc975c3eecf | [
"MIT"
] | permissive | kklmn/ArdquariumPi | 425691ad618968ca3fc89e744918962396fcbaef | 1c84e6d548c22f72f6b93ca36900677cccbdfc00 | refs/heads/master | 2022-04-03T07:13:50.224233 | 2022-02-14T17:52:56 | 2022-02-14T17:52:56 | 236,858,425 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | # -*- coding: utf-8 -*-
import subprocess
from datetime import datetime
from croniter import croniter
cronTable = None
cronTest = \
"""
20 7 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
45 8 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light off
30 9 * * 6,7 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
30 17 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
0 23 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light off
0 11 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name filter-S on
0 20 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name filter-S off
"""
def init_cron_tasks(isTest=False):
global cronTable
txt = cronTest if isTest else subprocess.Popen(
# ['crontab', '-l'],
['crontab', '-l', '-u', 'pi'],
stdout=subprocess.PIPE,
encoding='utf8').communicate()[0]
cronTable = [l.strip().split(' ') for l in txt.split("\n")
if l and not l.startswith('#') and "gpioswitch" in l]
if cronTable:
cronTable = [[' '.join(l[:5]), l[-2], l[-1]] for l in cronTable]
return cronTable
def get_cron_tasks():
res = {}
if not cronTable:
return res
now = datetime.now()
for cron, what, state in cronTable:
# print(cron, what, state)
cli = croniter(cron, now)
prevt, nextt = cli.get_prev(datetime), cli.get_next(datetime)
if what in res:
if 'prev'+state in res[what]:
condPrev = prevt >= res[what]['prev'+state]
condNext = nextt <= res[what]['next'+state]
else:
condPrev, condNext = True, True
else:
res[what] = {}
condPrev, condNext = True, True
if condPrev:
res[what]['prev'+state] = prevt
if condNext:
res[what]['next'+state] = nextt
bad = []
for what in res:
try:
if res[what]['prevon'] > res[what]['prevoff']: # now on
res[what]['str'] = 'on by crontab\n{0} – {1}'.format(
res[what]['prevon'].strftime('%H:%M'),
res[what]['nextoff'].strftime('%H:%M'))
else: # now off
res[what]['str'] = 'off by crontab\n{0} – {1}'.format(
res[what]['prevoff'].strftime('%H:%M'),
res[what]['nexton'].strftime('%H:%M'))
except KeyError:
bad.append(what)
for what in bad:
del res[what]
return res
| [
"konstantin.klementiev@gmail.com"
] | konstantin.klementiev@gmail.com |
7566f2795f158d397f3272917fa55e7841f35ed2 | 5cb7b9fe09b1dd20c0664d0c86c375ffe353903c | /static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_codecmaps_jp.py | 0bf43b676bf4c3ae4a0b5f92f7f4ba4ac350e2fa | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | shiblon/pytour | 6d0ee4a679cf7e6ffd8ac6326b8bb0d9071a7c73 | 71a181ec16fd38b0af62f55e28a50e91790733b9 | refs/heads/master | 2021-01-17T10:09:18.822575 | 2020-09-23T20:05:58 | 2020-09-23T20:05:58 | 23,226,350 | 2 | 3 | Apache-2.0 | 2020-02-17T22:36:02 | 2014-08-22T13:33:27 | Python | UTF-8 | Python | false | false | 1,968 | py | #
# test_codecmaps_jp.py
# Codec mapping tests for Japanese encodings
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class TestCP932Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp932'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
'WINDOWS/CP932.TXT'
supmaps = [
('\x80', u'\u0080'),
('\xa0', u'\uf8f0'),
('\xfd', u'\uf8f1'),
('\xfe', u'\uf8f2'),
('\xff', u'\uf8f3'),
]
for i in range(0xa1, 0xe0):
supmaps.append((chr(i), unichr(i+0xfec0)))
class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jp'
mapfilename = 'EUC-JP.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT'
class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jis'
mapfilename = 'SHIFTJIS.TXT'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \
'/EASTASIA/JIS/SHIFTJIS.TXT'
pass_enctest = [
('\x81_', u'\\'),
]
pass_dectest = [
('\\', u'\xa5'),
('~', u'\u203e'),
('\x81_', u'\\'),
]
class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jisx0213'
mapfilename = 'EUC-JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT'
class TestSJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jisx0213'
mapfilename = 'SHIFT_JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT'
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| [
"shiblon@gmail.com"
] | shiblon@gmail.com |
7e26cfc606b96da3ab0fc2c6f0a1faa342eb23f8 | 640411253fcf4dfc71b70ec923b0864ccd58b837 | /dev/commit_analysis.py | 61ad7fc514e8611e478ed6f1c0fc3bf0d6d2c330 | [] | no_license | williamsdoug/GitAnalysis | cb6dce95e7a92b0d1d2cf2db3c94aec6ef3be3bf | da91b541d2531a41cc1f4e02537b7803b84b20d3 | refs/heads/master | 2016-09-06T15:50:50.345898 | 2015-07-02T13:28:51 | 2015-07-02T13:28:51 | 23,364,094 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,661 | py | #
# commit_analysis.py - Code for statistical analysis of commit data
#
# Author: Doug Williams - Copyright 2014, 2015
#
# Currently configured for OpenStack, tested with Nova.
#
# Last updated 5/28/2015
#
# History:
# - 9/2/14: Initial version (initially contained in NovaSampleData).
# Updated normalize_blame to handle null entries
# - 9/2/14: Update nova_all_blame, filtering out all entries > 3000 diff
# lines that were contributing to huge file size. Corrected
# blame_compute_normalized_guilt computation, added
# normalize_blame_by_file
# - 1/27/15: Initial version of commit_analysis.py based on contents
# of NovaAnalysis notebook
# - 2/4/15 - Added compute_guilt(), previously in BlameAnalysis Spreadsheet
# - 2/6/15 - Added top level routines load_all_analysis_data(),
# load_core_analysis_data() and rebuild_all_analysis_data()
# - 2/6/15 - Modified compute_guilt() to use filter_bug_fix_combined_commits()
# when selecting blame entries for guilt calculation.
# - 2/7/15 - moved functions from notebook: trim_entries(), parse_author(),
# create_feature(), extract_features()
# - 2/9/15 - added autoset_threshold() and helper function
# count_guilty_commits(). Added fit_features()
# - 2/19/15 - New consistency checking routine verify_missing_bugs().
# - 2/20/15 - updated rebuild_all_analysis_data() to remove repo_name
# including api calls to build_git_commits(), build_all_blame().
# also remove cachedir from calls to build_lp_bugs()
# - 2/23/15 - New join code - join_all()
# - 2/24/15 - Integrated join into rebuild_all_analysis_data
# - 2/25/15 - Updated to reflect single change_id per commit. Added error
# handling when loading all_blame
# - 2/26/15 - Clean-up handling of change_id
# - 3/3/15 - Added additional options to rebuild_all_analysis_data()
# - 3/5/15 - Updated feature extraction to reflect new schema
# - 3/10/15 - Clean-up parameter handling in fit_features() and
# extract_features()
# - 3/10/15 - remove trailing >" in parse_author_and_org()
# - 3/10/15 - extract_features() now returns features in sorted order
# - 3/11/15 - Adds dedupe support to commit feature extraction
# - 4/26/15 - Integrate language_specific features
# - 5/14/15 - Integrate with NewDiff, remove proximity in
# blame_compute_normalized_guilt(), adds error handling.
# - 5/28/15 - Added support for ignore flag
#
#
# Top Level Routines:
# from commit_analysis import blame_compute_normalized_guilt
# from commit_analysis import normalize_blame_by_file
# from commit_analysis import parse_author
# from commit_analysis import get_commit_count_by_author
# from commit_analysis import get_guilt_by_author
# from commit_analysis import compute_guilt
# from commit_analysis import fit_features
# from commit_analysis import extract_features
# from commit_analysis import autoset_threshold
#
# from commit_analysis import load_core_analysis_data
# from commit_analysis import load_all_analysis_data
# from commit_analysis import rebuild_all_analysis_data
# from commit_analysis import verify_missing_bugs
#
# import numpy as np
import collections
import re
import math
import numpy as np
# from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction import DictVectorizer
from LPBugsDownload import build_lp_bugs, load_lp_bugs
from GerritDownload import build_gerrit_data
from GerritDownload import load_gerrit_changes, load_gerrit_change_details
from Git_Extract import build_git_commits, load_git_commits
# from Git_Extract import build_all_blame, load_all_blame
from Git_Extract import filter_bug_fix_combined_commits
from Git_Extract import project_to_fname
from Git_Extract import compute_git_actor_dedupe
from jp_load_dump import jload, jdump
# import sys
# from jp_load_dump import jload
#
# Global Data (constants)
#
global BUG_SEVERITY_VALUES
global BUG_PRECEDENCE_VALUES
BUG_SEVERITY_VALUES = {
'Critical': 4,
'High': 3,
'Medium': 2,
'Low': 1,
'Wishlist': 0,
'Unknown': 0,
'Undecided': 0,
}
BUG_PRECEDENCE_VALUES = {
'Critical': 6,
'High': 5,
'Medium': 4,
'Low': 3,
'Wishlist': 2,
'Unknown': 1,
'Undecided': 0,
}
#
# Routines to consistency check Git, Gerrit and Lanuchpad Data
#
def verify_missing_bugs(project):
"""Make sure all bugs references in commits have been downloaded,
if not, attempt to load these bugs"""
all_bugs = load_lp_bugs(project)
commits = load_git_commits(project)
all_bugs_in_commits = set([b for c in commits.values()
if 'bugs' in c for b in c['bugs']])
known_bugs = set(all_bugs.keys())
missing_bugs = all_bugs_in_commits.difference(known_bugs)
if len(missing_bugs) > 0:
build_lp_bugs(project, update=missing_bugs)
else:
print 'no missing bugs'
#
# Top level routines to load and update analysis data
#
def load_core_analysis_data(project):
""" Loads combined_commits and all_blame."""
combined_commits = load_combined_commits(project)
print 'combined_commits:', len(combined_commits)
# all_blame = load_all_blame(project)
# print 'all blame:', len(all_blame)
# return combined_commits, all_blame
return combined_commits
def load_all_analysis_data(project):
""" Loads downloaded_bugs, all_changes, all_change_details,
commits and combined_commits.
"""
print 'loading bug data'
downloaded_bugs = load_lp_bugs(project)
print 'loading Git commit data'
commits = load_git_commits(project)
print 'loading change data'
all_change_details = load_gerrit_change_details(project)
print ' all_change_details:', len(all_change_details)
all_changes = load_gerrit_changes(project)
print ' all_changes:', len(all_changes)
combined_commits = load_combined_commits(project)
print 'combined_commits:', len(combined_commits)
return downloaded_bugs, all_changes, all_change_details, \
commits, combined_commits
def rebuild_all_analysis_data(project, update=True,
download=True,
build_blame=False,
build_launchpad=True,
build_gerrit=True,
build_git=True,
build_combined=True):
"""Rebuilds core datasets"""
if download and build_launchpad:
print
print 'rebuilding Launchpad (bug) data'
build_lp_bugs(project, update=update)
if download and build_gerrit:
print
print 'rebuilding Gerrit data'
build_gerrit_data(project, update=update)
if build_git:
print
print 'building Git data'
build_git_commits(project, update=update)
if download and build_launchpad:
print
print 'Load any missing bugs, if needed'
verify_missing_bugs(project)
if build_combined:
print
print 'Build combined_commits by joining with bugs and gerrit data'
combined_commits = join_all(project)
# Needs to be updated
"""
if build_blame:
print
print 'Building all blame'
combined_commits = load_combined_commits(project)
build_all_blame(project, combined_commits, update=update)
"""
#
# Join related routines
#
def load_combined_commits(project):
"""Loads combined_commit data from disk"""
return jload(project_to_fname(project, combined=True))
def join_all(project):
"""Top level join routine"""
all_bugs = load_lp_bugs(project)
commits = load_git_commits(project)
all_changes = load_gerrit_changes(project)
all_change_details = load_gerrit_change_details(project)
# clone commits
combined = dict([[k, v.copy()] for k, v in commits.items()])
# include Launchpad bug details information
print 'Joining with Launchpad Data'
combined = join_with_bugs(combined, all_bugs)
# include Gerrit review details
print 'Joining with Gerrit Data'
combined = join_with_gerrit(project, combined,
all_changes, all_change_details)
jdump(combined, project_to_fname(project, combined=True))
return combined
def join_with_bugs(commits, all_bugs):
"""Joins Launchpad Data with corresponding commits"""
idx_bugno_by_cid = collections.defaultdict(list)
for b in all_bugs.values():
for e in b['commits']:
idx_bugno_by_cid[e['cid']].append(b['id'])
for cid, c in commits.items():
c['bug_details'] = {}
if 'bugs' in c: # Join on bug number
for bugno in c['bugs']:
if bugno in all_bugs:
c['bug_details'][bugno] = all_bugs[bugno]
if cid in idx_bugno_by_cid: # Join on commit id
for bugno in idx_bugno_by_cid[cid]:
if bugno not in c['bug_details']:
c['bug_details'][bugno] = all_bugs[bugno]
return commits
def join_with_gerrit(project, commits, all_changes, all_change_details):
"""Joins Gerrit data with corresponding commits"""
# Index gerrit changes, pruning away changes off master branch
change_by_changeid = dict([[c['change_id'], c]
for c in all_changes
if c['branch'] == 'master'
and c['project'].endswith(project)
and c['status'] == 'MERGED'])
change_details_by_changeid = dict([[c['change_id'], c]
for c in all_change_details
if c['branch'] == 'master'
and c['project'].endswith(project)
and c['status'] == 'MERGED'])
for cid, c in commits.items():
c['change_details'] = False
if 'change_id' in c: # Join on chage_id
change_id = c['change_id']
if (change_id in change_by_changeid
and change_id in change_details_by_changeid):
change = change_by_changeid[change_id]
change.update(change_details_by_changeid[change_id])
c['change_details'] = change
return commits
#
# Routines for post-processing Dataset
#
RE_AUTH = re.compile('<(\S+@\S+)>')
RE_AUTH2 = re.compile('"(\S+@\S+)\s')
RE_AUTH3 = re.compile('"(\S+)\s')
def parse_author_and_org(auth):
result = RE_AUTH.search(auth)
if not result: # Try alternative pattern
result = RE_AUTH2.search(auth)
if not result: # Try alternative pattern
result = RE_AUTH3.search(auth)
if not result:
if 'docs.openstack.org' in str(auth):
return 'openstack-tool@openstack.org', 'openstack.org'
else:
raise Exception('Unable to parse author: ' + str(auth))
author_name = result.groups()[0]
if author_name.endswith('>"'):
author_name = author_name[:-2]
author_org = author_name.split('@')[-1]
if author_org.endswith('>"'):
author_org = author_org[:-2]
return author_name, author_org
def log1(val):
"""returns log(n+1), needed fir zero-based values"""
return math.log(float(val)+1)
def add_commit_features(c, feats, git_actor_dedupe_table,
include_committer=True,
include_order=True,
include_files=True,
include_lines_of_code=False, # legacy
include_code_metrics=True, # python-specific
include_per_file_metrics=True, # python-specific
include_test_metrics=True, # python-specific
include_test_per_file_metrics=True, # python-specific
include_blueprint=True,
include_cherrypick=True,):
"""Extract informaton related to Git Commit"""
author = git_actor_dedupe_table[c['author']]['standard_actor']
committer = git_actor_dedupe_table[c['committer']]['standard_actor']
# General information about author
feats['author'] = git_actor_dedupe_table[c['author']]['standard_email']
_, author_org = parse_author_and_org(c['author'])
feats['author_org'] = author_org
feats['author_order'] = math.log(c['author_order'])
# Information about committer
if not include_committer:
pass
elif author != committer:
feats['committer'] = \
git_actor_dedupe_table[c['committer']]['standard_email']
else:
feats['committer'] = 'same'
if include_order:
feats['log_order'] = math.log(c['order'])
# General information around change (size (loc), code maturity,
# prior bugs in module)
if include_files:
for fname in c['files']:
feats['includes_file_' + fname] = 1
if include_order and c['file_order']:
feats['log_min_file_order'] = \
math.log(min([v for v in c['file_order'].values()]))
feats['log_max_file_order'] = \
math.log(max([v for v in c['file_order'].values()]))
# Information about code changes
if include_lines_of_code:
feats['loc_add'] = c['loc_add']
feats['loc_change'] = c['loc_change']
if include_files:
for fname, detail in c['loc_detail'].items():
feats['loc_add_' + fname] = detail['add']
feats['loc_changes_' + fname] = detail['changes']
if 'aggregate' in c and include_code_metrics:
feats['cc'] = log1(c['aggregate']['cc'])
feats['changes'] = log1(c['aggregate']['changes'])
feats['complexity'] = log1(c['aggregate']['complexity'])
feats['new_functions'] = c['aggregate']['new_functions']
feats['new_classes'] = c['aggregate']['new_classes']
feats['lloc'] = log1(c['aggregate']['lloc'])
feats['nodes'] = log1(c['aggregate']['nodes'])
if include_per_file_metrics:
for fname, v in c['individual'].items():
if include_test_per_file_metrics or 'test' not in fname:
feats[fname + '_cc'] = log1(v['cc'])
feats[fname + '_changes'] = log1(v['changes'])
feats[fname + '_complexity'] = log1(v['complexity'])
feats[fname + '_new_functions'] = v['new_functions']
feats[fname + '_new_classes'] = v['new_classes']
feats[fname + '_lloc'] = log1(v['lloc'])
feats[fname + '_nodes'] = log1(v['nodes'])
if include_test_metrics:
feats['test_cc'] = log1(c['aggregate']['test_cc'])
feats['test_changes'] = log1(c['aggregate']['test_changes'])
feats['test_complexity'] = log1(c['aggregate']['test_complexity'])
feats['test_new_functions'] = c['aggregate']['test_new_functions']
feats['test_new_classes'] = c['aggregate']['test_new_classes']
feats['test_lloc'] = log1(c['aggregate']['test_lloc'])
feats['test_nodes'] = log1(c['aggregate']['test_nodes'])
if include_blueprint:
feats['blueprint'] = 'blueprint' in c
if include_cherrypick:
feats['cherry_picked_to'] = 'cherry_picked_to' in c
feats['cherry_picked_from'] = 'cherry_picked_from' in c
return
def add_bug_features(c, feats):
"""Extract information related to information in Launchpad"""
global BUG_SEVERITY_VALUES
global BUG_PRECEDENCE_VALUES
bug_details = c['bug_details'] # list of bugs
feats['lauchpad_bugs'] = len(bug_details)
if len(bug_details) == 0:
return
# if this commit associated with a bug fix itself
# feats['is_bug_fix'] = 'lp:id' in c
feats['lauchpad_heat'] = max([bug['heat']
for bug in bug_details.values()])
feats['lauchpad_severity'] = max([BUG_SEVERITY_VALUES[bug['importance']]
for bug in bug_details.values()
if bug['importance']] + [0])
feats['lauchpad_precedence'] = \
max([BUG_PRECEDENCE_VALUES[bug['importance']]
for bug in bug_details.values() if bug['importance']] + [0])
feats['lauchpad_messages'] = \
sum([len(bug['messages']) for bug in bug_details.values()])
feats['lauchpad_security_related'] = \
sum([1 for bug in bug_details.values()
if bug['security_related']]) > 0
feats['lauchpad_cve'] = sum([len(bug['cves'])
for bug in bug_details.values()])
return
def add_gerrit_features(c, feats, include_gerrit_details=True):
"""Extract features related to Gerrit history"""
change_details = c['change_details'] # list of bugs
if not change_details:
feats['gerrit_has_data'] = False
return
feats['gerrit_has_data'] = True
max_revision = max([message['_revision_number']
for message in change_details['messages']
if '_revision_number' in message])
feats['gerrit_revision'] = max_revision
if not include_gerrit_details:
return
if 'Workflow' in change_details['labels']:
feats['gerrit_approved_workflow'] = \
change_details['labels']['Workflow']['approved']['name']
feats['gerrit_approved_code'] = \
change_details['labels']['Code-Review']['approved']['name']
feats['gerrit_votes'] = \
sum([review['value'] for review
in change_details['labels']['Code-Review']['all']])
for review in change_details['labels']['Code-Review']['all']:
feats['gerrit_reviewer_' + review['name']] = review['value']
return
def create_feature(c, git_actor_dedupe_table,
include_committer=True,
include_order=True,
include_files=True,
include_lines_of_code=True,
include_blueprint=True,
include_cherrypick=True,
include_bug=True,
include_gerrit=True,
include_gerrit_details=True):
"""Extract features from combined_commits entry"""
label = c['guilt']
cid = c['cid']
feats = {}
add_commit_features(c, feats, git_actor_dedupe_table,
include_committer,
include_order,
include_files,
include_lines_of_code,
include_blueprint,
include_cherrypick)
if include_bug:
add_bug_features(c, feats)
if include_gerrit:
add_gerrit_features(c, feats, include_gerrit_details)
return cid, label, feats
# should we clip based on max distance???
def old_blame_compute_normalized_guilt(blameset, exp_weighting=True, exp=2.0):
"""Apportions guilt for each blame entry to individual commits
based on proximity to changed lines and number of occurances,
where total guilt for each blame entry is 1. Guild is weighted
based on proximity, where weight is either based on inverse linear
distance or exponentially diminishing (default).
exp_weighting: Determines whether proximity-vased weighting
is either linear or exponential.
exp: Specifies power functin if exponential weighting
"""
result = collections.defaultdict(float)
total = 0.0
for per_file in blameset['blame'].values():
if per_file: # validate not null entry
for per_line in per_file:
if exp_weighting:
weight = 1.0/(exp**(per_line['proximity']-1))
else:
weight = 1.0/float(per_line['proximity'])
result[per_line['commit']] += weight
total += weight
if total > 0:
return dict([[k, v/total] for k, v in result.items()])
else:
return {}
def blame_compute_normalized_guilt(blameset):
"""Apportions guilt for each blame entry to individual commits
based number of occurances, where total guilt for each blame
entry is 1.
"""
result = collections.defaultdict(float)
total = 0.0
for fname, per_file in blameset['blame'].items():
if not per_file: # validate not null entry
continue
if isinstance(per_file, int):
# -1 is error value
print 'blame_compute_normalized_guilt: ignoring', fname, per_file
continue
for per_line in per_file:
result[per_line['commit']] += 1
total += 1
if total > 0:
return dict([[k, v/total] for k, v in result.items()])
else:
return {}
def extract_features_helper(combined_commits,
min_order, max_order,
offset, limit, **kwargs):
order_range = max_order - min_order
if offset == 0:
pass
elif type(offset) is int:
min_order += offset
elif type(offset) is float and offset < 1.0:
min_order += int(order_range*offset)
else:
raise Exception('extract_features: Invalid offset value '
+ str(offset))
if limit == 0:
pass
elif type(limit) is int:
max_order = min_order + limit - 1
elif type(limit) is float and offset < 1.0:
max_order = min_order + int(order_range*limit)
else:
raise Exception('extract_features: Invalid limit value ' + str(limit))
selected_cid = [[k, c['order']] for k, c in combined_commits.items()
if (c['reachable'] and 'ignore' not in c
and c['order'] >= min_order
and c['order'] <= max_order)]
# generate data to support actor dedupe
git_actor_dedupe_table = compute_git_actor_dedupe(combined_commits)
# return features in ascending commit order
selected_cid = sorted(selected_cid, key=lambda x: x[1])
cid, Y, features = zip(*[create_feature(combined_commits[k],
git_actor_dedupe_table, **kwargs)
for k, _ in selected_cid])
return cid, Y, features
def fit_features(combined_commits,
min_order=False, max_order=False,
offset=0, limit=0,
include_committer=True,
include_order=True,
include_files=True,
include_lines_of_code=True,
include_blueprint=True,
include_cherrypick=True,
include_bug=True,
include_gerrit=True,
include_gerrit_details=True):
"""Fits features in preparation for extract_features()
Parameters:
- min_order, max_order -- range of included commits. full range
by default
- offset -- relative start, either as integer or percent
- limit -- overall entries, either integer or percentd
Returns: extract_state, which contains
- Dict Vectorizer object (including labels)
- MinMax scaler settings
- Setting for various feature selection eyword args
"""
vec = DictVectorizer()
scaler = MinMaxScaler()
extract_state = {'vec': vec, 'scaler': scaler,
'feat_kwargs':
{'include_committer': include_committer,
'include_order': include_order,
'include_files': include_files,
'include_lines_of_code': include_lines_of_code,
'include_blueprint': include_blueprint,
'include_cherrypick': include_cherrypick,
'include_bug': include_bug,
'include_gerrit': include_gerrit,
'include_gerrit_details': include_gerrit_details}
}
cid, Y, features = extract_features_helper(combined_commits,
min_order, max_order,
offset, limit,
**extract_state['feat_kwargs'])
X = vec.fit_transform([f for f in features]).toarray()
X = scaler.fit_transform(X)
if True:
print 'Total features:', len(vec.feature_names_)
return extract_state
def extract_features(combined_commits, extract_state,
threshold=False,
clip=False, min_order=False, max_order=False,
offset=0, limit=0, equalize=False,
debug=True):
"""Extracts features from combined_commits
Parameters:
- threshold -- Used for classification, determines 1 /0 labels. False
for regression (default)
- clip -- Limits max value of label for regression problems.
- min_order, max_order -- range of included commits. full range
by default
- offset -- relative start, either as integer or percent
- limit -- overall entries, either integer or percent
Returns:
- Labels
- Feature Matrix
- Feature matrix column names
"""
vec = extract_state['vec']
scaler = extract_state['scaler']
cid, Y, features = extract_features_helper(combined_commits,
min_order, max_order,
offset, limit,
**extract_state['feat_kwargs'])
X = vec.transform([f for f in features]).toarray()
Y = np.asarray(Y)
if clip:
Y = np.minimum(Y, float(clip))
# scaler = MinMaxScaler() # Use MinMaxScaler non-gaussian data
# X = scaler.fit_transform(X)
X = scaler.transform(X)
if debug:
print 'Total feature vectors:', len(features)
if threshold: # Quantize guilt
Y = np.asarray(Y) > threshold
if debug:
print ' bugs based on threshold:', sum(Y)
return cid, Y, X, vec.get_feature_names()
re_author = re.compile('"([^"]*)"')
anon = {}
anon_ctr = 0
def get_anon_name(s):
global anon_ctr
global anon
parts = s.split('@')
if len(parts) == 2:
if parts[0] not in anon:
anon[parts[0]] = 'anon_' + str(anon_ctr)
anon_ctr += 1
return anon[parts[0]] + '@' + parts[1]
else:
return s
def parse_author(s, anonymize=True):
m = re_author.search(s)
if m:
if anonymize:
return get_anon_name(m.groups(1)[0].encode('ascii', 'ignore'))
else:
return m.groups(1)[0].encode('ascii', 'ignore')
else:
return '**unknown**'
def normalize_blame_by_file(blameset, exp_weighting=True):
"""returns list of files with weighted blame"""
result = collections.defaultdict(float)
total = 0.0
for fname, per_file in blameset['blame'].items():
if per_file: # validate not null entry
weight = 0.0
for per_line in per_file:
if exp_weighting:
weight += 1.0/(2.0 ** (per_line['proximity'] - 1))
else:
weight += 1.0/float(per_line['proximity'])
result[fname] = weight
total += weight
return dict([[k, v/total] for k, v in result.items()])
def get_commit_count_by_author(combined_commits, min_order=0):
"""Computes number of commits per author"""
git_actor_dedupe_table = compute_git_actor_dedupe(combined_commits)
commits_by_author = collections.defaultdict(float)
for c in combined_commits.values():
if c['reachable'] and c['order'] >= min_order:
author = git_actor_dedupe_table[c['author']]['standard_email']
commits_by_author[author] += 1.0
return commits_by_author
def get_guilt_by_author(combined_commits, min_order=0):
"""Determines cumulative blame for each author """
git_actor_dedupe_table = compute_git_actor_dedupe(combined_commits)
guilt_by_author = collections.defaultdict(float)
for c in combined_commits.values():
if c['reachable'] and c['order'] >= min_order:
author = git_actor_dedupe_table[c['author']]['standard_email']
guilt_by_author[author] += c['guilt']
return guilt_by_author
def OBSOLETE_compute_guilt(combined_commits, all_blame, importance='high+'):
"""Relaced by annotate_guilt() in BugFixWorkflow"""
for c in combined_commits.values(): # initialize guilt values
c['guilt'] = 0.0
skipped = 0
instances = 0
for be in all_blame: # now apply weighted guilt for each blame
v = combined_commits[be['cid']]
if filter_bug_fix_combined_commits(v, importance=importance):
for c, g in \
blame_compute_normalized_guilt(be,
exp_weighting=True).items():
if c in combined_commits:
combined_commits[c]['guilt'] += g
else:
skipped += 1
instances += 1
if skipped > 0:
print
print 'Warning - compute_guilt: Skipped', skipped, 'entries out of',
print instances
total = len(combined_commits)
guilty = sum([1 for v in combined_commits.values() if v['guilt'] > 0])
min_guilt = min([v['guilt']
for v in combined_commits.values() if v['guilt'] > 0])
max_guilt = max([v['guilt']
for v in combined_commits.values() if v['guilt'] > 0])
print 'entries with non-zero guilt: ', guilty, 'out of', total,
print '(', 100.0 * float(guilty) / float(total), '%', ')'
print 'smallest guilt:', min_guilt
print 'largest guilt:', max_guilt
#
# Routines for selecting guilt threshold value
#
def count_guilty_commits(combined_commits, threshold):
"""Helper function for autoset_threshold()"""
return sum([1 for v in combined_commits.values()
if v['guilt'] >= threshold])
def autoset_threshold(combined_commits, actual):
"""Computes threshold value for guilt, later used to create
Y labels during feature extraction, such that number of
positive labels matches number of actual bugs
"""
lower_thresh = min([v['guilt']
for v in combined_commits.values() if v['guilt'] > 0])
upper_thresh = max([v['guilt']
for v in combined_commits.values() if v['guilt'] > 0])
lower_count = count_guilty_commits(combined_commits, lower_thresh)
upper_count = count_guilty_commits(combined_commits, upper_thresh)
# verify that target bug count is within range
if upper_count >= actual:
return upper_thresh
elif lower_count <= actual:
return lower_threshold
# use binary search to hoem in on threshold
limit = 20
last_mid_count = -1
while limit > 0:
limit -= 1
mid_thresh = (upper_thresh + lower_thresh) / 2.0
mid_count = count_guilty_commits(combined_commits, mid_thresh)
if mid_count == actual or last_mid_count == mid_count:
break
last_mid_count = mid_count
if mid_count < actual:
upper_thresh = mid_thresh
upper_count = mid_count
else:
lower_thresh = mid_thresh
lower_count = mid_count
return mid_thresh, mid_count
| [
"ddwilli@gmail.com"
] | ddwilli@gmail.com |
4800755881cc1cf2bdf95087c49ee5aefac0d3c2 | 9f8a746b4bd8b64affa1e7eab1be5cad40030be1 | /train.py | 8036dc193a3e83bac1dc57bfc8a7b8326a75b332 | [
"Apache-2.0"
] | permissive | jianchi2001/PaddlePaddle-DeepSpeech | 5437b0050fbd21193ed741b61d7d1bd4f4279771 | 7b89e63dfa1dfa1deb1e9f43c521196e8a278fd5 | refs/heads/master | 2023-06-21T12:33:19.967696 | 2021-08-03T03:34:45 | 2021-08-03T03:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,244 | py | import argparse
import functools
import io
from datetime import datetime
from model_utils.model import DeepSpeech2Model
from data_utils.data import DataGenerator
from utils.utility import add_arguments, print_arguments, get_data_len
import paddle.fluid as fluid
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 4, "训练每一批数据的大小")
add_arg('num_epoch', int, 200, "训练的轮数")
add_arg('num_conv_layers', int, 2, "卷积层数量")
add_arg('num_rnn_layers', int, 3, "循环神经网络的数量")
add_arg('rnn_layer_size', int, 2048, "循环神经网络的大小")
add_arg('learning_rate', float, 5e-5, "初始学习率")
add_arg('min_duration', float, 1.0, "最短的用于训练的音频长度")
add_arg('max_duration', float, 15.0, "最长的用于训练的音频长度")
add_arg('test_off', bool, False, "是否关闭测试")
add_arg('use_gru', bool, True, "是否使用GRUs模型,不使用RNN")
add_arg('use_gpu', bool, True, "是否使用GPU训练")
add_arg('share_rnn_weights',bool, False, "是否在RNN上共享权重")
add_arg('init_from_pretrained_model', str, None, "使用预训练模型的路径,当为None是不使用预训练模型")
add_arg('train_manifest', str, './dataset/manifest.train', "训练的数据列表")
add_arg('dev_manifest', str, './dataset/manifest.test', "测试的数据列表")
add_arg('mean_std_path', str, './dataset/mean_std.npz', "数据集的均值和标准值的npy文件路径")
add_arg('vocab_path', str, './dataset/zh_vocab.txt', "数据集的词汇表文件路径")
add_arg('output_model_dir', str, "./models", "保存训练模型的文件夹")
add_arg('augment_conf_path', str, './conf/augmentation.config', "数据增强的配置文件,为json格式")
add_arg('shuffle_method', str, 'batch_shuffle_clipped', "打乱数据的方法", choices=['instance_shuffle', 'batch_shuffle', 'batch_shuffle_clipped'])
args = parser.parse_args()
# 训练模型
def train():
# 是否使用GPU
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
# 获取训练数据生成器
train_generator = DataGenerator(vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config=io.open(args.augment_conf_path, mode='r', encoding='utf8').read(),
max_duration=args.max_duration,
min_duration=args.min_duration,
place=place)
# 获取测试数据生成器
test_generator = DataGenerator(vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
keep_transcription_text=True,
place=place,
is_training=False)
# 获取训练数据
train_batch_reader = train_generator.batch_reader_creator(manifest_path=args.train_manifest,
batch_size=args.batch_size,
shuffle_method=args.shuffle_method)
# 获取测试数据
test_batch_reader = test_generator.batch_reader_creator(manifest_path=args.dev_manifest,
batch_size=args.batch_size,
shuffle_method=None)
# 获取DeepSpeech2模型
ds2_model = DeepSpeech2Model(vocab_size=train_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.init_from_pretrained_model,
output_model_dir=args.output_model_dir,
vocab_list=test_generator.vocab_list)
# 获取训练数据数量
num_samples = get_data_len(args.train_manifest, args.max_duration, args.min_duration)
print("[%s] 训练数据数量:%d\n" % (datetime.now(), num_samples))
# 开始训练
ds2_model.train(train_batch_reader=train_batch_reader,
dev_batch_reader=test_batch_reader,
learning_rate=args.learning_rate,
gradient_clipping=400,
batch_size=args.batch_size,
num_samples=num_samples,
num_epoch=args.num_epoch,
test_off=args.test_off)
def main():
print_arguments(args)
train()
if __name__ == '__main__':
main()
| [
"yeyupiaoling@foxmail.com"
] | yeyupiaoling@foxmail.com |
eb74a08ec12587a4a54535a70cd11c74cd3333a5 | adce0de4c11887519b8e471f1cbca4e18b46d906 | /h0rton/trainval_data/xy_data.py | 818db3539d211824513348099f2810ba4c5356e5 | [
"MIT"
] | permissive | jiwoncpark/h0rton | 30ca4a3c9943099ecd393e4b936b48cad7d81943 | 2541885d70d090fdb777339cfb77a3a9f3e7996d | refs/heads/master | 2021-06-25T23:08:26.902632 | 2021-01-12T01:57:47 | 2021-01-12T01:57:47 | 199,093,811 | 7 | 1 | null | 2020-03-19T16:02:01 | 2019-07-26T23:56:49 | Jupyter Notebook | UTF-8 | Python | false | false | 6,280 | py | import os
import glob
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from baobab import BaobabConfig
from baobab.data_augmentation.noise_torch import NoiseModelTorch
from baobab.sim_utils import add_g1g2_columns
from .data_utils import whiten_pixels, rescale_01, plus_1_log, whiten_Y_cols
__all__ = ['XYData']
class XYData(Dataset): # torch.utils.data.Dataset
"""Represents the XYData used to train or validate the BNN
"""
def __init__(self, is_train, Y_cols, float_type, define_src_pos_wrt_lens, rescale_pixels, log_pixels, add_pixel_noise, eff_exposure_time, train_Y_mean=None, train_Y_std=None, train_baobab_cfg_path=None, val_baobab_cfg_path=None, for_cosmology=False, rescale_pixels_type='whiten_pixels'):
"""
Parameters
----------
dataset_dir : str or os.path object
path to the directory containing the images and metadata
data_cfg : dict or Dict
copy of the `data` field of `BNNConfig`
for_cosmology : bool
whether the dataset will be used in cosmological inference
(in which case cosmology-related metadata will be stored)
"""
#self.__dict__ = data_cfg.deepcopy()
self.is_train = is_train
if self.is_train:
self.baobab_cfg = BaobabConfig.from_file(train_baobab_cfg_path)
else:
self.baobab_cfg = BaobabConfig.from_file(val_baobab_cfg_path)
self.dataset_dir = self.baobab_cfg.out_dir
if not self.is_train:
if train_Y_mean is None or train_Y_std is None:
raise ValueError("Mean and std of training set must be provided for whitening.")
self.train_Y_mean = train_Y_mean
self.train_Y_std = train_Y_std
self.Y_cols = Y_cols
self.float_type = float_type
self.float_type_numpy = np.float64 if 'Double' in float_type else np.float32
self.define_src_pos_wrt_lens = define_src_pos_wrt_lens
self.rescale_pixels = rescale_pixels
self.log_pixels = log_pixels
self.add_pixel_noise = add_pixel_noise
self.eff_exposure_time = eff_exposure_time
self.bandpass_list = self.baobab_cfg.survey_info.bandpass_list
self.for_cosmology = for_cosmology
#################
# Target labels #
#################
metadata_path = os.path.join(self.dataset_dir, 'metadata.csv')
Y_df = pd.read_csv(metadata_path, index_col=False)
if 'external_shear_gamma1' not in Y_df.columns: # assumes gamma_ext, psi_ext were sampled
Y_df = add_g1g2_columns(Y_df)
# Define source light position as offset from lens mass
if self.define_src_pos_wrt_lens:
Y_df['src_light_center_x'] -= Y_df['lens_mass_center_x']
Y_df['src_light_center_y'] -= Y_df['lens_mass_center_y']
train_Y_to_whiten = Y_df[self.Y_cols].values
if self.is_train:
self.train_Y_mean = np.mean(train_Y_to_whiten, axis=0, keepdims=True)
self.train_Y_std = np.std(train_Y_to_whiten, axis=0, keepdims=True)
# Store the unwhitened metadata
if self.for_cosmology:
self.Y_df = Y_df.copy()
# Number of predictive columns
self.Y_dim = len(self.Y_cols)
# Whiten the columns
whiten_Y_cols(Y_df, self.train_Y_mean, self.train_Y_std, self.Y_cols)
# Convert into array the columns required for training
self.img_filenames = Y_df['img_filename'].values
self.Y_array = Y_df[self.Y_cols].values.astype(self.float_type_numpy)
# Free memory
if not self.for_cosmology:
del Y_df
################
# Input images #
################
# Set some metadata
img_path = glob.glob(os.path.join(self.dataset_dir, '*.npy'))[0]
img = np.load(img_path)
self.X_dim = img.shape[0]
# Rescale pixels, stack filters, and shift/scale pixels on the fly
if rescale_pixels_type == 'rescale_01':
rescale = transforms.Lambda(rescale_01)
else:
rescale = transforms.Lambda(whiten_pixels)
log = transforms.Lambda(plus_1_log)
transforms_list = []
if self.log_pixels:
transforms_list.append(log)
if self.rescale_pixels:
transforms_list.append(rescale)
if len(transforms_list) == 0:
self.X_transform = lambda x: x
else:
self.X_transform = transforms.Compose(transforms_list)
# Noise-related kwargs
self.noise_kwargs = {}
self.noiseless_exposure_time = {}
self.noise_model = {}
self.exposure_time_factor = np.ones([len(self.bandpass_list), 1, 1]) # for broadcasting
for i, bp in enumerate(self.bandpass_list):
survey_object = self.baobab_cfg.survey_object_dict[bp]
# Dictionary of SingleBand kwargs
self.noise_kwargs[bp] = survey_object.kwargs_single_band()
# Factor of effective exptime relative to exptime of the noiseless images
self.exposure_time_factor[i, :, :] = self.eff_exposure_time[bp]/self.noise_kwargs[bp]['exposure_time']
if self.add_pixel_noise:
self.noise_kwargs[bp].update(exposure_time=self.eff_exposure_time[bp])
# Dictionary of noise models
self.noise_model[bp] = NoiseModelTorch(**self.noise_kwargs[bp])
def __getitem__(self, index):
# Image X
img_filename = self.img_filenames[index]
img_path = os.path.join(self.dataset_dir, img_filename)
img = np.load(img_path)
img *= self.exposure_time_factor
img = torch.as_tensor(img.astype(self.float_type_numpy)) # np array type must match with default tensor type
if self.add_pixel_noise:
for i, bp in enumerate(self.bandpass_list):
img[i, :, :] += self.noise_model[bp].get_noise_map(img[i, :, :])
img = self.X_transform(img)
# Label Y
Y_row = self.Y_array[index, :]
Y_row = torch.as_tensor(Y_row)
return img, Y_row
def __len__(self):
return self.Y_array.shape[0] | [
"jiwon.christine.park@gmail.com"
] | jiwon.christine.park@gmail.com |
fb6de41cf67712e420b26a3593eda05e4a28a4d8 | 872cd13f25621825db0c598268ecd21b49cc2c79 | /Lesson_1/1.py | 425998ea943a1b948516d69748b927e98b243a0d | [] | no_license | ss2576/client_server_applications_Python | c4e9ebe195d23c8ca73211894aa50a74014013d5 | 9b599e37e5dae5af3dca06e197916944f12129d5 | refs/heads/master | 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 | Python | UTF-8 | Python | false | false | 935 | py | # Каждое из слов «разработка», «сокет», «декоратор» представить в строковом формате и проверить тип и
# содержание соответствующих переменных. Затем с помощью онлайн-конвертера преобразовать строковые
# представление в формат Unicode и также проверить тип и содержимое переменных.
str_1 = ('разработка', 'сокет', 'декоратор')
print('тип и содержание str_1')
for elem in str_1:
print(type(elem), elem)
str_2 = ('\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430',
'\u0441\u043e\u043a\u0435\u0442',
'\u0434\u0435\u043a\u043e\u0440\u0430\u0442\u043e\u0440')
print('тип и содержание str_2')
for elem in str_2:
print(type(elem), elem)
| [
"ss2576@mail.ru"
] | ss2576@mail.ru |
9b37ee2c5421948426b43dcf868f39111b570567 | 7a88fc18f30d5dd3ac935877d4d9268a56c296be | /di_website/general/migrations/0040_auto_20200701_1953.py | 8ca4d4a90548fbce21bf8203daa79f82f3e1c673 | [] | no_license | devinit/DIwebsite-redesign | 745a480b7ba0feffa34dc664548ee4c5a7b4d470 | 9ec46823c67cdd4f35be255896bf30d8f6362666 | refs/heads/develop | 2023-08-30T04:06:20.951203 | 2023-08-07T12:06:07 | 2023-08-07T12:06:07 | 184,287,370 | 1 | 0 | null | 2023-08-28T14:34:57 | 2019-04-30T15:29:25 | HTML | UTF-8 | Python | false | false | 9,919 | py | # Generated by Django 2.2.13 on 2020-07-01 19:53
import di_website.publications.blocks
from django.db import migrations
import wagtail.blocks
import wagtail.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('general', '0039_auto_20200514_0956'),
]
operations = [
migrations.AlterField(
model_name='general',
name='body',
field=wagtail.fields.StreamField([('anchor', wagtail.blocks.StructBlock([('anchor_id', wagtail.blocks.CharBlock(help_text='The unique indentifier for this anchor', required=True))])), ('paragraph_block', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('block_quote', wagtail.blocks.StructBlock([('text', wagtail.blocks.TextBlock()), ('source', wagtail.blocks.TextBlock(help_text='Who is this quote acredited to?', required=False))])), ('button_block', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(help_text='Leave blank if you wish to use the page title as a caption', required=False)), ('page', wagtail.blocks.PageChooserBlock(help_text='For the link/button to show, either this or the url are required', required=False)), ('url', wagtail.blocks.URLBlock(help_text='An alternative to an internal page', required=False))])), ('link_block', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(help_text='Leave blank if you wish to use the page title as a caption', required=False)), ('page', wagtail.blocks.PageChooserBlock(help_text='For the link/button to show, either this or the url are required', required=False)), ('url', wagtail.blocks.URLBlock(help_text='An alternative to an internal page', required=False))])), ('image', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('credit_name', wagtail.blocks.CharBlock(help_text='Name of the image source', required=False)), ('credit_url', wagtail.blocks.URLBlock(help_text='URL of the image source', required=False)), ('caption', wagtail.blocks.CharBlock(help_text='Caption to appear beneath the image', required=False))])), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html'))], blank=True, null=True, verbose_name='Page Body'),
),
migrations.AlterField(
model_name='general',
name='sections',
field=wagtail.fields.StreamField([('anchor', wagtail.blocks.StructBlock([('anchor_id', wagtail.blocks.CharBlock(help_text='The unique indentifier for this anchor', required=True))])), ('paragraph_block', wagtail.blocks.StructBlock([('text', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'])), ('center', wagtail.blocks.BooleanBlock(default=False, required=False))])), ('block_quote', wagtail.blocks.StructBlock([('text', wagtail.blocks.TextBlock()), ('source', wagtail.blocks.TextBlock(help_text='Who is this quote acredited to?', required=False)), ('center', wagtail.blocks.BooleanBlock(default=False, required=False))])), ('banner_block', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('image_credit_name', wagtail.blocks.TextBlock(help_text='The name of the image source', required=False)), ('image_credit_url', wagtail.blocks.URLBlock(help_text='A link to the image source, if any', required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html')), ('text', wagtail.blocks.StreamBlock([('text_heading', wagtail.blocks.CharBlock(icon='title', required=False, template='blocks/banner/text_heading.html')), ('text', wagtail.blocks.TextBlock(template='blocks/banner/text.html')), ('richtext', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'], template='blocks/banner/richtext.html')), ('list', wagtail.blocks.ListBlock(wagtail.blocks.StructBlock([('title', wagtail.blocks.TextBlock(help_text='An optional title to the list item', required=False)), ('content', wagtail.blocks.TextBlock(help_text='The list item content', required=True))], template='blocks/banner/list_item.html'), icon='list-ul', template='blocks/banner/list.html'))])), ('meta', wagtail.blocks.CharBlock(help_text='Anything from a name, location e.t.c - usually to provide credit for the text', required=False)), ('buttons', wagtail.blocks.StreamBlock([('button', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(help_text='Leave blank if you wish to use the page title as a caption', required=False)), ('page', wagtail.blocks.PageChooserBlock(help_text='For the link/button to show, either this or the url are required', required=False)), ('url', wagtail.blocks.URLBlock(help_text='An alternative to an internal page', required=False))])), ('document_box', wagtail.blocks.StructBlock([('box_heading', wagtail.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('media_orientation', wagtail.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')], required=False)), ('light', wagtail.blocks.BooleanBlock(default=False, help_text='Sets the background to a lighter colour', required=False))])), ('downloads', wagtail.blocks.StructBlock([('section_heading', wagtail.blocks.TextBlock(required=False)), ('section_sub_heading', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'], required=False)), ('document_box_heading', wagtail.blocks.CharBlock(icon='title', required=False)), ('document_boxes', wagtail.blocks.StreamBlock([('document_box', wagtail.blocks.StructBlock([('box_heading', wagtail.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('alt', wagtail.blocks.BooleanBlock(default=True, help_text='White background if checked', required=False))])), ('image', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('credit_name', wagtail.blocks.CharBlock(help_text='Name of the image source', required=False)), ('credit_url', wagtail.blocks.URLBlock(help_text='URL of the image source', required=False)), ('caption', wagtail.blocks.CharBlock(help_text='Caption to appear beneath the image', required=False))])), ('image_duo', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('credit_name', wagtail.blocks.CharBlock(help_text='Name of the image source', required=False)), ('credit_url', wagtail.blocks.URLBlock(help_text='URL of the image source', required=False)), ('caption', wagtail.blocks.CharBlock(help_text='Caption to appear beneath the image', required=False)), ('heading', wagtail.blocks.CharBlock(icon='fa-heading', required=False)), ('side_text', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'], icon='fa-paragraph', required=True, template='blocks/paragraph_block.html')), ('button', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(help_text='Leave blank if you wish to use the page title as a caption', required=False)), ('page', wagtail.blocks.PageChooserBlock(help_text='For the link/button to show, either this or the url are required', required=False)), ('url', wagtail.blocks.URLBlock(help_text='An alternative to an internal page', required=False))])), ('alt', wagtail.blocks.BooleanBlock(default=False, help_text='White background if checked.', required=False))])), ('audio_block', di_website.publications.blocks.AudioMediaBlock(max_num=1)), ('video_duo', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(help_text='Section heading', icon='fa-heading', required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-video-camera', required=False, template='blocks/embed_block.html')), ('side_text', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'image', 'embed'], icon='fa-paragraph', required=True, template='blocks/paragraph_block.html')), ('button', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(help_text='Leave blank if you wish to use the page title as a caption', required=False)), ('page', wagtail.blocks.PageChooserBlock(help_text='For the link/button to show, either this or the url are required', required=False)), ('url', wagtail.blocks.URLBlock(help_text='An alternative to an internal page', required=False))])), ('alt', wagtail.blocks.BooleanBlock(default=False, help_text='White background if checked.', required=False))])), ('full_width_video_block', wagtail.blocks.StructBlock([('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', required=False))]))], blank=True, null=True, verbose_name='Sections'),
),
]
| [
"edwinm_p@yahoo.com"
] | edwinm_p@yahoo.com |
dc25137d0a69d6994977f134b52c3ddc0d55d6e1 | ab70e56071778f9f6f003181bf98c2d26b63550f | /tests/hwsim/test_mbo.py | e94e8ced827325469953d44624a08edeb63a4538 | [
"BSD-3-Clause"
] | permissive | vanhoefm/hostap-wpa3 | b944020f36187b2fba1119a149c0b7cc2855cf2b | 72e297507b896702e6635540f5b241ec5e02ed06 | refs/heads/master | 2022-07-25T13:00:21.309225 | 2022-07-16T22:58:29 | 2022-07-16T22:58:29 | 158,976,638 | 37 | 22 | null | null | null | null | UTF-8 | Python | false | false | 26,815 | py | # MBO tests
# Copyright (c) 2016, Intel Deutschland GmbH
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import hostapd
import os
import time
import hostapd
from tshark import run_tshark
from utils import *
def set_reg(country_code, apdev0=None, apdev1=None, dev0=None):
if apdev0:
hostapd.cmd_execute(apdev0, ['iw', 'reg', 'set', country_code])
if apdev1:
hostapd.cmd_execute(apdev1, ['iw', 'reg', 'set', country_code])
if dev0:
dev0.cmd_execute(['iw', 'reg', 'set', country_code])
def run_mbo_supp_oper_classes(dev, apdev, hapd, hapd2, country, freq_list=None,
disable_ht=False, disable_vht=False):
"""MBO and supported operating classes"""
addr = dev[0].own_addr()
res2 = None
res5 = None
dev[0].flush_scan_cache()
dev[0].dump_monitor()
logger.info("Country: " + country)
dev[0].note("Setting country code " + country)
set_reg(country, apdev[0], apdev[1], dev[0])
for j in range(5):
ev = dev[0].wait_event(["CTRL-EVENT-REGDOM-CHANGE"], timeout=5)
if ev is None:
raise Exception("No regdom change event")
if "alpha2=" + country in ev:
break
dev[0].dump_monitor()
dev[1].dump_monitor()
dev[2].dump_monitor()
_disable_ht = "1" if disable_ht else "0"
_disable_vht = "1" if disable_vht else "0"
if hapd:
hapd.set("country_code", country)
hapd.enable()
dev[0].scan_for_bss(hapd.own_addr(), 5180, force_scan=True)
dev[0].connect("test-wnm-mbo", key_mgmt="NONE", scan_freq="5180",
freq_list=freq_list, disable_ht=_disable_ht,
disable_vht=_disable_vht)
sta = hapd.get_sta(addr)
res5 = sta['supp_op_classes'][2:]
dev[0].wait_regdom(country_ie=True)
time.sleep(0.1)
hapd.disable()
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].request("ABORT_SCAN")
dev[0].wait_disconnected()
dev[0].dump_monitor()
hapd2.set("country_code", country)
hapd2.enable()
dev[0].scan_for_bss(hapd2.own_addr(), 2412, force_scan=True)
dev[0].connect("test-wnm-mbo-2", key_mgmt="NONE", scan_freq="2412",
freq_list=freq_list, disable_ht=_disable_ht,
disable_vht=_disable_vht)
sta = hapd2.get_sta(addr)
res2 = sta['supp_op_classes'][2:]
dev[0].wait_regdom(country_ie=True)
time.sleep(0.1)
hapd2.disable()
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].request("ABORT_SCAN")
dev[0].wait_disconnected()
dev[0].dump_monitor()
return res2, res5
def run_mbo_supp_oper_class(dev, apdev, country, expected, inc5,
freq_list=None, disable_ht=False,
disable_vht=False):
if inc5:
params = {'ssid': "test-wnm-mbo",
'mbo': '1',
"country_code": "US",
'ieee80211d': '1',
"ieee80211n": "1",
"hw_mode": "a",
"channel": "36"}
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
else:
hapd = None
params = {'ssid': "test-wnm-mbo-2",
'mbo': '1',
"country_code": "US",
'ieee80211d': '1',
"ieee80211n": "1",
"hw_mode": "g",
"channel": "1"}
hapd2 = hostapd.add_ap(apdev[1], params, no_enable=True)
try:
dev[0].request("STA_AUTOCONNECT 0")
res2, res5 = run_mbo_supp_oper_classes(dev, apdev, hapd, hapd2, country,
freq_list=freq_list,
disable_ht=disable_ht,
disable_vht=disable_vht)
finally:
dev[0].dump_monitor()
dev[0].request("STA_AUTOCONNECT 1")
wait_regdom_changes(dev[0])
country1 = dev[0].get_driver_status_field("country")
logger.info("Country code at the end (1): " + country1)
set_reg("00", apdev[0], apdev[1], dev[0])
country2 = dev[0].get_driver_status_field("country")
logger.info("Country code at the end (2): " + country2)
for i in range(5):
ev = dev[0].wait_event(["CTRL-EVENT-REGDOM-CHANGE"], timeout=1)
if ev is None or "init=USER type=WORLD" in ev:
break
wait_regdom_changes(dev[0])
country3 = dev[0].get_driver_status_field("country")
logger.info("Country code at the end (3): " + country3)
if country3 != "00":
clear_country(dev)
# For now, allow operating class 129 to be missing since not all
# installed regdb files include the 160 MHz channels.
expected2 = expected.replace('808182', '8082')
# For now, allow operating classes 121-123 to be missing since not all
# installed regdb files include the related US DFS channels.
expected2 = expected2.replace('78797a7b7c', '787c')
expected3 = expected
# For now, allow operating classes 124-127 to be missing for Finland
# since they were added only recently in regdb.
if country == "FI":
expected3 = expected3.replace("7b7c7d7e7f80", "7b80")
if res2 != expected and res2 != expected2 and res2 != expected3:
raise Exception("Unexpected supp_op_class string (country=%s, 2.4 GHz): %s (expected: %s)" % (country, res2, expected))
if inc5 and res5 != expected and res5 != expected2 and res5 != expected3:
raise Exception("Unexpected supp_op_class string (country=%s, 5 GHz): %s (expected: %s)" % (country, res5, expected))
def test_mbo_supp_oper_classes_za(dev, apdev):
"""MBO and supported operating classes (ZA)"""
run_mbo_supp_oper_class(dev, apdev, "ZA",
"515354737475767778797a7b808182", True)
def test_mbo_supp_oper_classes_fi(dev, apdev):
"""MBO and supported operating classes (FI)"""
run_mbo_supp_oper_class(dev, apdev, "FI",
"515354737475767778797a7b7c7d7e7f808182", True)
def test_mbo_supp_oper_classes_us(dev, apdev):
"""MBO and supported operating classes (US)"""
run_mbo_supp_oper_class(dev, apdev, "US",
"515354737475767778797a7b7c7d7e7f808182", True)
def test_mbo_supp_oper_classes_jp(dev, apdev):
"""MBO and supported operating classes (JP)"""
run_mbo_supp_oper_class(dev, apdev, "JP",
"51525354737475767778797a7b808182", True)
def test_mbo_supp_oper_classes_bd(dev, apdev):
"""MBO and supported operating classes (BD)"""
run_mbo_supp_oper_class(dev, apdev, "BD",
"5153547c7d7e7f80", False)
def test_mbo_supp_oper_classes_sy(dev, apdev):
"""MBO and supported operating classes (SY)"""
run_mbo_supp_oper_class(dev, apdev, "SY",
"515354", False)
def test_mbo_supp_oper_classes_us_freq_list(dev, apdev):
"""MBO and supported operating classes (US) - freq_list"""
run_mbo_supp_oper_class(dev, apdev, "US", "515354", False,
freq_list="2412 2437 2462")
def test_mbo_supp_oper_classes_us_disable_ht(dev, apdev):
"""MBO and supported operating classes (US) - disable_ht"""
run_mbo_supp_oper_class(dev, apdev, "US", "517376797c7d", False,
disable_ht=True)
def test_mbo_supp_oper_classes_us_disable_vht(dev, apdev):
"""MBO and supported operating classes (US) - disable_vht"""
run_mbo_supp_oper_class(dev, apdev, "US",
"515354737475767778797a7b7c7d7e7f", False,
disable_vht=True)
def test_mbo_assoc_disallow(dev, apdev, params):
"""MBO and association disallowed"""
hapd1 = hostapd.add_ap(apdev[0], {"ssid": "MBO", "mbo": "1"})
hapd2 = hostapd.add_ap(apdev[1], {"ssid": "MBO", "mbo": "1"})
logger.debug("Set mbo_assoc_disallow with invalid value")
if "FAIL" not in hapd1.request("SET mbo_assoc_disallow 6"):
raise Exception("Set mbo_assoc_disallow for AP1 succeeded unexpectedly with value 6")
logger.debug("Disallow associations to AP1 and allow association to AP2")
if "OK" not in hapd1.request("SET mbo_assoc_disallow 1"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
if "OK" not in hapd2.request("SET mbo_assoc_disallow 0"):
raise Exception("Failed to set mbo_assoc_disallow for AP2")
dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412")
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan.fc.type == 0 && wlan.fc.type_subtype == 0x00",
wait=False)
if "Destination address: " + hapd1.own_addr() in out:
raise Exception("Association request sent to disallowed AP")
timestamp = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan.fc.type_subtype == 0x00",
display=['frame.time'], wait=False)
logger.debug("Allow associations to AP1 and disallow associations to AP2")
if "OK" not in hapd1.request("SET mbo_assoc_disallow 0"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
if "OK" not in hapd2.request("SET mbo_assoc_disallow 1"):
raise Exception("Failed to set mbo_assoc_disallow for AP2")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
# Force new scan, so the assoc_disallowed indication is updated */
dev[0].request("FLUSH")
dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412")
filter = 'wlan.fc.type == 0 && wlan.fc.type_subtype == 0x00 && frame.time > "' + timestamp.rstrip() + '"'
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
filter, wait=False)
if "Destination address: " + hapd2.own_addr() in out:
raise Exception("Association request sent to disallowed AP 2")
def test_mbo_assoc_disallow_ignore(dev, apdev):
"""MBO and ignoring disallowed association"""
try:
_test_mbo_assoc_disallow_ignore(dev, apdev)
finally:
dev[0].request("SCAN_INTERVAL 5")
def _test_mbo_assoc_disallow_ignore(dev, apdev):
hapd1 = hostapd.add_ap(apdev[0], {"ssid": "MBO", "mbo": "1"})
if "OK" not in hapd1.request("SET mbo_assoc_disallow 1"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
if "OK" not in dev[0].request("SCAN_INTERVAL 1"):
raise Exception("Failed to set scan interval")
dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND"], timeout=10)
if ev is None:
raise Exception("CTRL-EVENT-NETWORK-NOT-FOUND not seen")
if "OK" not in dev[0].request("SET ignore_assoc_disallow 1"):
raise Exception("Failed to set ignore_assoc_disallow")
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
if ev is None:
raise Exception("CTRL-EVENT-ASSOC-REJECT not seen")
if "status_code=17" not in ev:
raise Exception("Unexpected association reject reason: " + ev)
if "OK" not in hapd1.request("SET mbo_assoc_disallow 0"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
dev[0].wait_connected()
def test_mbo_assoc_disallow_change(dev, apdev):
"""MBO and dynamic association disallowed change with passive scanning"""
hapd = hostapd.add_ap(apdev[0], {"ssid": "MBO", "mbo": "1"})
id = dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.set("mbo_assoc_disallow", "1")
dev[0].scan_for_bss(hapd.own_addr(), 2412, force_scan=True, passive=True)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-ASSOC-REJECT",
"CTRL-EVENT-CONNECTED"], timeout=20)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("CTRL-EVENT-NETWORK-NOT-FOUND not seen")
if "CTRL-EVENT-NETWORK-NOT-FOUND" not in ev:
raise Exception("Unexpected connection result: " + ev)
@remote_compatible
def test_mbo_cell_capa_update(dev, apdev):
"""MBO cellular data capability update"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1'}
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta or sta['mbo_cell_capa'] != '1':
raise Exception("mbo_cell_capa missing after association")
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
# Duplicate update for additional code coverage
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
time.sleep(0.2)
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta:
raise Exception("mbo_cell_capa missing after update")
if sta['mbo_cell_capa'] != '3':
raise Exception("mbo_cell_capa not updated properly")
@remote_compatible
def test_mbo_cell_capa_update_pmf(dev, apdev):
"""MBO cellular data capability update with PMF required"""
ssid = "test-wnm-mbo"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params['mbo'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
dev[0].connect(ssid, psk=passphrase, key_mgmt="WPA-PSK-SHA256",
proto="WPA2", ieee80211w="2", scan_freq="2412")
hapd.wait_sta()
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta or sta['mbo_cell_capa'] != '1':
raise Exception("mbo_cell_capa missing after association")
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
time.sleep(0.2)
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta:
raise Exception("mbo_cell_capa missing after update")
if sta['mbo_cell_capa'] != '3':
raise Exception("mbo_cell_capa not updated properly")
def test_mbo_wnm_token_wrap(dev, apdev):
"""MBO WNM token wrap around"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1'}
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
# Trigger transmission of 256 WNM-Notification frames to wrap around the
# 8-bit mbo_wnm_token counter.
for i in range(128):
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
@remote_compatible
def test_mbo_non_pref_chan(dev, apdev):
"""MBO non-preferred channel list"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1'}
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "FAIL" not in dev[0].request("SET non_pref_chan 81:7:200:99"):
raise Exception("Invalid non_pref_chan value accepted")
if "FAIL" not in dev[0].request("SET non_pref_chan 81:15:200:3"):
raise Exception("Invalid non_pref_chan value accepted")
if "FAIL" not in dev[0].request("SET non_pref_chan 81:7:200:3 81:7:201:3"):
raise Exception("Invalid non_pref_chan value accepted")
if "OK" not in dev[0].request("SET non_pref_chan 81:7:200:3"):
raise Exception("Failed to set non-preferred channel list")
if "OK" not in dev[0].request("SET non_pref_chan 81:7:200:1 81:9:100:2"):
raise Exception("Failed to set non-preferred channel list")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (assoc)")
if sta['non_pref_chan[0]'] != '81:200:1:7':
raise Exception("Unexpected non_pref_chan[0] value (assoc)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (assoc)")
if sta['non_pref_chan[1]'] != '81:100:2:9':
raise Exception("Unexpected non_pref_chan[1] value (assoc)")
if 'non_pref_chan[2]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (assoc)")
if "OK" not in dev[0].request("SET non_pref_chan 81:9:100:2"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 1)")
if sta['non_pref_chan[0]'] != '81:100:2:9':
raise Exception("Unexpected non_pref_chan[0] value (update 1)")
if 'non_pref_chan[1]' in sta:
raise Exception("Unexpected non_pref_chan[1] value (update 1)")
if "OK" not in dev[0].request("SET non_pref_chan 81:9:100:2 81:10:100:2 81:8:100:2 81:7:100:1 81:5:100:1"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 2)")
if sta['non_pref_chan[0]'] != '81:100:1:7,5':
raise Exception("Unexpected non_pref_chan[0] value (update 2)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (update 2)")
if sta['non_pref_chan[1]'] != '81:100:2:9,10,8':
raise Exception("Unexpected non_pref_chan[1] value (update 2)")
if 'non_pref_chan[2]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (update 2)")
if "OK" not in dev[0].request("SET non_pref_chan 81:5:90:2 82:14:91:2"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 3)")
if sta['non_pref_chan[0]'] != '81:90:2:5':
raise Exception("Unexpected non_pref_chan[0] value (update 3)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (update 3)")
if sta['non_pref_chan[1]'] != '82:91:2:14':
raise Exception("Unexpected non_pref_chan[1] value (update 3)")
if 'non_pref_chan[2]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (update 3)")
if "OK" not in dev[0].request("SET non_pref_chan "):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' in sta:
raise Exception("Unexpected non_pref_chan[0] value (update 4)")
@remote_compatible
def test_mbo_sta_supp_op_classes(dev, apdev):
"""MBO STA supported operating classes"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1'}
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'supp_op_classes' not in sta:
raise Exception("No supp_op_classes")
supp = bytearray(binascii.unhexlify(sta['supp_op_classes']))
if supp[0] != 81:
raise Exception("Unexpected current operating class %d" % supp[0])
if 115 not in supp:
raise Exception("Operating class 115 missing")
def test_mbo_failures(dev, apdev):
"""MBO failure cases"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1'}
hapd = hostapd.add_ap(apdev[0], params)
with alloc_fail(dev[0], 1, "wpas_mbo_ie"):
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
with alloc_fail(dev[0], 1, "wpas_mbo_send_wnm_notification"):
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
with fail_test(dev[0], 1, "wpas_mbo_send_wnm_notification"):
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
with alloc_fail(dev[0], 1, "wpas_mbo_update_non_pref_chan"):
if "FAIL" not in dev[0].request("SET non_pref_chan 81:7:200:3"):
raise Exception("non_pref_chan value accepted during OOM")
with alloc_fail(dev[0], 2, "wpas_mbo_update_non_pref_chan"):
if "FAIL" not in dev[0].request("SET non_pref_chan 81:7:200:3"):
raise Exception("non_pref_chan value accepted during OOM")
def test_mbo_wnm_bss_tm_ie_parsing(dev, apdev):
"""MBO BSS transition request MBO IE parsing"""
ssid = "test-wnm-mbo"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
addr = dev[0].own_addr()
dev[0].connect(ssid, psk="12345678", key_mgmt="WPA-PSK",
proto="WPA2", ieee80211w="0", scan_freq="2412")
dev[0].request("SET ext_mgmt_frame_handling 1")
hdr = "d0003a01" + addr.replace(':', '') + bssid.replace(':', '') + bssid.replace(':', '') + "3000"
btm_hdr = "0a070100030001"
tests = [("Truncated attribute in MBO IE", "dd06506f9a160101"),
("Unexpected cell data capa attribute length in MBO IE",
"dd09506f9a160501030500"),
("Unexpected transition reason attribute length in MBO IE",
"dd06506f9a160600"),
("Unexpected assoc retry delay attribute length in MBO IE",
"dd0c506f9a160100080200000800"),
("Unknown attribute id 255 in MBO IE",
"dd06506f9a16ff00")]
for test, mbo_ie in tests:
logger.info(test)
dev[0].request("NOTE " + test)
frame = hdr + btm_hdr + mbo_ie
if "OK" not in dev[0].request("MGMT_RX_PROCESS freq=2412 datarate=0 ssi_signal=-30 frame=" + frame):
raise Exception("MGMT_RX_PROCESS failed")
logger.info("Unexpected association retry delay")
dev[0].request("NOTE Unexpected association retry delay")
btm_hdr = "0a070108030001112233445566778899aabbcc"
mbo_ie = "dd08506f9a1608020000"
frame = hdr + btm_hdr + mbo_ie
if "OK" not in dev[0].request("MGMT_RX_PROCESS freq=2412 datarate=0 ssi_signal=-30 frame=" + frame):
raise Exception("MGMT_RX_PROCESS failed")
dev[0].request("SET ext_mgmt_frame_handling 0")
def test_mbo_without_pmf(dev, apdev):
"""MBO and WPA2 without PMF"""
ssid = "test-wnm-mbo"
params = {'ssid': ssid, 'mbo': '1', "wpa": '2',
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP",
"wpa_passphrase": "12345678"}
try:
# "MBO: PMF needs to be enabled whenever using WPA2 with MBO"
hostapd.add_ap(apdev[0], params)
raise Exception("AP setup succeeded unexpectedly")
except Exception as e:
if "Failed to enable hostapd" in str(e):
pass
else:
raise
def test_mbo_without_pmf_workaround(dev, apdev):
"""MBO and WPA2 without PMF on misbehaving AP"""
ssid = "test-wnm-mbo"
params0 = {'ssid': ssid, "wpa": '2',
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP",
"wpa_passphrase": "12345678",
"vendor_elements": "dd07506f9a16010100"}
params1 = {'ssid': ssid, "mbo": '1', "wpa": '2',
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP",
"wpa_passphrase": "12345678", "ieee80211w": "1"}
hapd0 = hostapd.add_ap(apdev[0], params0)
dev[0].connect(ssid, psk="12345678", key_mgmt="WPA-PSK",
proto="WPA2", ieee80211w="1", scan_freq="2412")
hapd0.wait_sta()
sta = hapd0.get_sta(dev[0].own_addr())
ext_capab = bytearray(binascii.unhexlify(sta['ext_capab']))
if ext_capab[2] & 0x08:
raise Exception("STA did not disable BSS Transition capability")
hapd1 = hostapd.add_ap(apdev[1], params1)
dev[0].scan_for_bss(hapd1.own_addr(), 2412, force_scan=True)
dev[0].roam(hapd1.own_addr())
hapd1.wait_sta()
sta = hapd1.get_sta(dev[0].own_addr())
ext_capab = bytearray(binascii.unhexlify(sta['ext_capab']))
if not ext_capab[2] & 0x08:
raise Exception("STA disabled BSS Transition capability")
dev[0].roam(hapd0.own_addr())
hapd0.wait_sta()
sta = hapd0.get_sta(dev[0].own_addr())
ext_capab = bytearray(binascii.unhexlify(sta['ext_capab']))
if ext_capab[2] & 0x08:
raise Exception("STA did not disable BSS Transition capability")
def check_mbo_anqp(dev, bssid, cell_data_conn_pref):
if "OK" not in dev.request("ANQP_GET " + bssid + " 272,mbo:2"):
raise Exception("ANQP_GET command failed")
ev = dev.wait_event(["GAS-QUERY-START"], timeout=5)
if ev is None:
raise Exception("GAS query start timed out")
ev = dev.wait_event(["GAS-QUERY-DONE"], timeout=10)
if ev is None:
raise Exception("GAS query timed out")
if cell_data_conn_pref is not None:
ev = dev.wait_event(["RX-MBO-ANQP"], timeout=1)
if ev is None or "cell_conn_pref" not in ev:
raise Exception("Did not receive MBO Cellular Data Connection Preference")
if cell_data_conn_pref != int(ev.split('=')[1]):
raise Exception("Unexpected cell_conn_pref value: " + ev)
dev.dump_monitor()
def test_mbo_anqp(dev, apdev):
"""MBO ANQP"""
params = {'ssid': "test-wnm-mbo",
'mbo': '1',
'interworking': '1',
'mbo_cell_data_conn_pref': '1'}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412", force_scan=True)
check_mbo_anqp(dev[0], bssid, 1)
hapd.set('mbo_cell_data_conn_pref', '255')
check_mbo_anqp(dev[0], bssid, 255)
hapd.set('mbo_cell_data_conn_pref', '-1')
check_mbo_anqp(dev[0], bssid, None)
| [
"j@w1.fi"
] | j@w1.fi |
4a425c2ee8e6a31e7a911470acb6b0c203fddbcd | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_benidoc_pancakes.py | 53e0469eb2d421c779c30d9d18d071dc28934963 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 596 | py | #! python
def flips(pancakes):
flippies = 0
noHappies = 1
onaSadRun = 0
for p in pancakes:
if noHappies:
if p == '+':
noHappies = 0
else:
flippies = 1
else:
if not onaSadRun and p == '-':
onaSadRun = 1
flippies += 2
elif onaSadRun and p == '+':
onaSadRun = 0
return flippies
fin = open('B-large.in')
fout = open('large_output.txt', 'w+')
cases = int(fin.readline())
i = 1
for c in range(0, cases):
pancakes = fin.readline()
fout.write('Case #' + str(i) + ': ' + str(flips(pancakes)) + '\n')
i += 1
fin.close()
fout.close() | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
a314794ab7fe0af0d04c2017a07d685c7f74494a | 99a43cdb360b28f9d8d8cc8fc27d8c2f45271c77 | /app01/migrations/0022_auto_20190104_0518.py | 38e6f457dc7d1161df0a045ef203909b61bca2b6 | [] | no_license | xiaoyaolaotou/MyBook | a542e8702ab46ae1904c3d2efa702cbf642033c0 | d83f07f968005bd34246c684c1bd34405ff07d32 | refs/heads/master | 2020-04-11T10:30:40.995240 | 2019-01-08T08:44:40 | 2019-01-08T08:44:40 | 161,717,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.0 on 2019-01-04 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0021_auto_20190104_0517'),
]
operations = [
migrations.AlterField(
model_name='publisher',
name='name',
field=models.CharField(max_length=128, unique=True),
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9681ceab10d026f8bee29dd19183cd0116573685 | a5fbad3bb19a9ac0755804b65898071b78f2de86 | /tcfl/target_ext_capture.py | f20864356f94103948aada59987b04ab90f831ed | [
"Apache-2.0"
] | permissive | irvcov/tcf | b4a7fd091cd37bd5a91f8270d934b1057199ca3d | 841390a93557dcfd27968bb61a031ad10afa1488 | refs/heads/master | 2021-04-22T04:21:47.525516 | 2020-03-24T16:50:42 | 2020-03-24T16:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,396 | py | #! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Capture snapshots or streams of target data, such as screenshots, audio, video, network, etc
--------------------------------------------------------------------------------------------
The capture interface allows to capture screenshots, audio and video
streams, network traffic, etc.
This provides an abstract interface to access it as well as means to
wait for things to be found when such captures, such as images on screenshots.
"""
# Roadmap:
#
# Primitives for accessing the capture interface
#
# _rest_tb_target_capture_start()
# _rest_tb_target_capture_stop_and_get()
# _rest_tb_target_capture_list()
#
# Extension to the TCF client API
#
# extension
# list()
# start()
# stop()
# stop_and_get()
# get()
# image_on_screenshot()
# _expect_image_on_screenshot_c
# detect()
# _squares_overlap()
# _template_find()
# _template_find_gray()
# flush()
# _draw_text()
#
# Command line invocation hookups
#
# cmdline_*()
import collections
import contextlib
import inspect
import logging
import os
import shutil
import commonl
import tc
from . import msgid_c
try:
import cv2
import numpy
import imutils
image_expectation_works = True
except ImportError as e:
image_expectation_works = False
def _rest_tb_target_capture_list(rtb, rt, ticket = ''):
return rtb.send_request("GET", "targets/%s/capture/list" % rt['id'],
data = { 'ticket': ticket })
#
# implementation of the expectation to wait for an image template to
# show up in an screenshot
#
def _template_find_gray(image_gray, template, threshold = 0.8):
# Find a gray template on a gray image, returning a list of boxes
# that match the template in the image
#
# coordinates are 0,0 on top-left corner of the image
#
assert threshold > 0 and threshold <= 1
width, height = template.shape[::-1]
image_width, image_height = image_gray.shape[::-1]
result = cv2.matchTemplate(image_gray, template, cv2.TM_CCOEFF_NORMED)
locations = numpy.where(result >= threshold)
r = []
for point in zip(*locations[::-1]):
r.append((
float(point[0]) / image_width,
float(point[1]) / image_height,
float(point[0] + width) / image_width,
float(point[1] + height) / image_height,
))
return r
def _template_find(image_filename, image_rgb,
template_filename, template,
min_width = 30, min_height = 30):
# Finds a template in an image, possibly scaling the template and
# returning the locations where found in a resolution indendent
# way
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
image_width, image_height = image_gray.shape[::-1]
template = cv2.imread(template_filename, 0)
template_width, _template_height = template.shape[::-1]
#for scale in numpy.linspace(0.2, 1.0, 20)[::-1]:
squares = []
# Scale down the image to find smaller hits of the icon
for scale in numpy.linspace(0.2, 1.0, 20)[::-1]:
image_gray_resized = imutils.resize(
image_gray, width = int(image_gray.shape[1] * scale))
w, h = image_gray_resized.shape[::-1]
#print "DEBUG scaling image to %d %d" % (w, h)
# stop if the image is smaller than the template
if w < template_width:
logging.warning("%s: stopping at scale %.2f: smaller than "
"template", image_filename, scale)
break
if w < min_width or h < min_height:
logging.warning("%s: stopping at scale %.2f: smaller than "
"args limit", image_filename, scale)
break
r = _template_find_gray(image_gray_resized, template)
for square in r:
square_original = (
int(square[0] * image_width),
int(square[1] * image_height),
int(square[2] * image_width),
int(square[3] * image_height),
)
squares.append((scale, square, square_original))
# scale down the template to find smaller hits of the template
for scale in numpy.linspace(0.2, 1.0, 20)[::-1]:
template_resized = imutils.resize(
template, width = int(template.shape[1] * scale))
w, h = template_resized.shape[::-1]
#print "DEBUG scaling template to %d %d" % (w, h)
# stop if the template size gets too small
if w < min_width or h < min_height:
logging.warning("%s: stopping at scale %.2f: smaller than "
"args limit", template_filename, scale)
break
r = _template_find_gray(image_gray, template_resized)
for square in r:
square_original = (
int(square[0] * image_width),
int(square[1] * image_height),
int(square[2] * image_width),
int(square[3] * image_height),
)
squares.append((1/scale, square, square_original))
return squares
class _expect_image_on_screenshot_c(tc.expectation_c):
# note the parameters are fully documented in
# :meth:`extension.image_on_screenshot`
def __init__(self, target, template_image_filename, capturer,
in_area, merge_similar, min_width, min_height,
poll_period, timeout, raise_on_timeout, raise_on_found):
if not image_expectation_works:
raise RuntimeError("Image matching won't work; need packages"
" cv2, imutils, numpy")
assert isinstance(target, tc.target_c)
assert isinstance(capturer, basestring)
assert in_area == None \
or isinstance(in_area, collections.Iterable) \
and len(in_area) == 4 \
and all(i >= 0 and i <= 1 for i in in_area), \
'in_area parameter must be a tuple of four numbers ' \
'from 0 to 1 describing the upper-left and lower-right ' \
'of the area where to look for the image, ' \
'eg: (0, 0, 0.5, 0.5) means in the top left quarter'
assert merge_similar >= 0.0 and merge_similar <= 1.0, \
"merge_similar has to be a float from 0.0 to 1.0"
tc.expectation_c.__init__(self, target, poll_period, timeout,
raise_on_timeout = raise_on_timeout,
raise_on_found = raise_on_found)
self.capturer = capturer
self.in_area = in_area
self.merge_similar = merge_similar
# if the image is relative, we make it relative to the
# filename of the caller--so we can do short names
self.name = commonl.name_make_safe(template_image_filename)
if os.path.isabs(template_image_filename):
self.template_image_filename = template_image_filename
else:
self.template_image_filename = os.path.join(
# 2 up the stack is the guy who called
# target.capture.image_on_screeshot()
os.path.dirname(inspect.stack()[2][1]),
template_image_filename)
with open(self.template_image_filename) as _f:
# try to open it, cv2.imread() is quite crappy at giving
# errors on file not found
pass
self.template_img = cv2.imread(self.template_image_filename,
cv2.IMREAD_GRAYSCALE)
# FIXME: raise exception if too small
self.min_width = min_width
self.min_height = min_height
def poll_context(self):
# we are polling from target with role TARGET.WANT_NAME from
# it's capturer CAPTURER, so this is our context, so anyone
# who will capture from that reuses the capture.
return '%s-%s' % (self.target.want_name, self.capturer)
def poll(self, testcase, run_name, buffers_poll):
target = self.target
# we name the screenshots after the poll_name name, as
# we'll share them amongs multiple expectations
buffers_poll.setdefault('screenshot_count', 0)
buffers_poll.setdefault('screenshots', [])
dirname = os.path.join(testcase.tmpdir,
'expect-buffer-poll-%s' % self.poll_name)
commonl.makedirs_p(dirname)
filename = os.path.join(
dirname,
'.'.join([
'screenshot',
run_name, self.poll_name,
# FIXME: replace number with datestamp? ideally from server?
'%02d' % buffers_poll['screenshot_count'],
'png'
])
)
target.capture.get(self.capturer, filename)
buffers_poll['screenshot_count'] += 1
buffers_poll['screenshots'].append(filename)
target.report_info('captured screenshot from %s to %s'
% (self.capturer, filename), dlevel = 2)
@staticmethod
def _squares_overlap(ra, rb):
#
# Given two intersecting squares, return a measure of how much
# of their areas intersect against the total square needed to
# contain both of them (0 meaning they do not overlap at all,
# 1 meaning they cover eactlt the same surface)
#
# Return the percentage, the overlapping square and the
# intersecting square.
#
overlap_h0 = min(ra[0], rb[0])
overlap_v0 = min(ra[1], rb[1])
overlap_h1 = max(ra[2], rb[2])
overlap_v1 = max(ra[3], rb[3])
overlap_area = ( overlap_h1 - overlap_h0 ) \
* ( overlap_v1 - overlap_v0 )
intersect_h0 = max(ra[0], rb[0])
intersect_v0 = max(ra[1], rb[1])
intersect_h1 = min(ra[2], rb[2])
intersect_v1 = min(ra[3], rb[3])
intersect_area = ( intersect_h1 - intersect_h0 ) \
* ( intersect_v1 - intersect_v0 )
if overlap_area == 0:
return 0, ( -1, -1, -1, -1 ), ( -1, -1, -1, -1 )
return 1 - ( overlap_area - intersect_area ) / overlap_area, \
(overlap_h0, overlap_v0, overlap_h1, overlap_v1), \
(intersect_h0, intersect_v0, intersect_h1, intersect_v1)
def _draw_text(self, img, text, x, y):
img_w, img_h, _ = img.shape[::-1]
# FIXME: make it a translucent box with an arrow at some point...
font = cv2.FONT_HERSHEY_SIMPLEX
# FIXME: compute scale to match image size
font_scale = 0.5
font_linetype = 1
text = self.name
text_w, text_h = cv2.getTextSize(
text, font, font_scale, font_linetype)[0]
text_w *= 1.02 # make space around
text_h *= 1.02
y += int(text_h) # make it relative to the top of the text
#somel.cv2.rectangle(
# img,
# x, y,
# x + text_width, y + text_height,
# box_coords[0], box_coords[1], rectangle_bgr,
# somel.cv2.FILLED)
if x + text_w >= img_w:
x = max(0, x - int(text_w))
if y + text_h >= img_h:
y = max(0, y - int(text_h))
cv2.putText(img, text, (x, y),
fontFace = font, fontScale = font_scale,
color = (0, 0, 255), thickness = font_linetype)
def detect(self, testcase, run_name, buffers_poll, buffers):
"""
See :meth:`expectation_c.detect` for reference on the arguments
:returns: list of squares detected at different scales in
relative and absolute coordinates, e.g:
>>> [
>>> (
>>> 1.0,
>>> ( 0.949, 0.005, 0.968, 0.0312 ),
>>> # relative (X0, Y0) to (X1, Y1)
>>> ( 972, 4, 992, 24)
>>> # absolute (X0, Y0) to (X1, Y1)
>>> ),
>>> (
>>> 0.957,
>>> ( 0.948, 0.004, 0.969, 0.031 ),
>>> ( 971, 3, 992, 24)
>>> ),
>>> (
>>> 0.957,
>>> (0.948, 0.005, 0.969, 0.032 ),
>>> (971, 4, 992, 25)
>>> ),
>>> (
>>> 0.915,
>>> (0.948, 0.004, 0.970, 0.032 ),
>>> (971, 3, 993, 25)
>>> )
>>> ]
"""
target = self.target
if not buffers_poll.get('screenshot_count', 0):
target.report_info('%s/%s: not detecting, no screenshots yet'
% (run_name, self.name), dlevel = 3)
return None
most_recent = buffers_poll['screenshots'][-1]
target.report_info('%s/%s: detecting in %s'
% (run_name, self.name, most_recent),
dlevel = 2)
buffers['current'] = most_recent
screenshot_img = cv2.imread(most_recent)
r = _template_find(
most_recent, screenshot_img,
self.template_image_filename, self.template_img,
min_width = self.min_width, min_height = self.min_height)
if self.in_area:
r_in_area = []
ax0 = self.in_area[0]
ay0 = self.in_area[1]
ax1 = self.in_area[2]
ay1 = self.in_area[3]
for scale, area_rel, area_abs in r:
x0 = area_rel[0]
y0 = area_rel[1]
x1 = area_rel[2]
y1 = area_rel[3]
if x0 >= ax0 and y0 >= ay0 \
and x1 <= ax1 and y1 <= ax1:
r_in_area.append((scale, area_rel, area_abs))
target.report_info(
"%s/%s: taking match %.1fs@%.2f,%.2f-%.2f,%.2f "
"(in area %.2f,%.2f-%.2f,%.2f)"
% (run_name, self.name, scale, x0, y0, x1, y1,
ax0, ay0, ax1, ay1), dlevel = 3)
else:
target.report_info(
"%s/%s: ignoring match %.1fs@%.2f,%.2f-%.2f,%.2f "
"(out of area %.2f,%.2f-%.2f,%.2f)"
% (run_name, self.name, scale, x0, y0, x1, y1,
ax0, ay0, ax1, ay1), dlevel = 3)
target.report_info(
"%s/%s: kept %d matches, ignored %d out-of-area"
% (run_name, self.name,
len(r_in_area), len(r) - len(r_in_area)), dlevel = 2)
r = r_in_area
if r and self.merge_similar: # merge similar detections
start_idx = 0
while start_idx < len(r):
r0 = r[start_idx]
for rx in list(r[start_idx+1:]):
measure, _, _ = self._squares_overlap(r0[1], rx[1])
if measure >= self.merge_similar:
# if more than the threshold we consider it is
# the same and ignore it
r.remove(rx)
start_idx += 1
if r:
# make sure there is a collateral image in the
# buffers_poll (shared amongs all the expercations for
# this target and capturer) and draw detected regions in
# there -- when done, flush() will write it.
if 'collateral' in buffers_poll:
collateral_img = buffers_poll['collateral']
else:
collateral_img = cv2.imread(most_recent)
buffers_poll['collateral'] = collateral_img
# draw boxes for the squares detected
for _scale_factor, _normalized_rect, rect in r:
cv2.rectangle(
collateral_img,
# note rect are the absolute coordinates
(rect[0], rect[1]), (rect[2], rect[3]),
(0, 0, 255), # red
1) # thin line
self._draw_text(collateral_img, self.name, rect[0], rect[3])
if len(r) == 1:
target.report_info(
'%s/%s: detected one match'
% (run_name, self.name),
dict(screenshot = most_recent), alevel = 2)
else:
target.report_info(
'%s/%s: detected %d matches'
% (run_name, self.name, len(r)),
dict(screenshot = most_recent), alevel = 2)
return r
def flush(self, testcase, run_name, buffers_poll, buffers, results):
if 'collateral' in buffers_poll:
# write the collateral images, which basically have
# squares drawn on the icons we were asked to look for--we
# marked the squares in detect()--we wrote one square per
# expectation per polled image
collateral_img = buffers_poll['collateral']
# so we can draw all the detections on the same screenshot
collateral_filename = \
testcase.report_file_prefix \
+ "%s.detected.png" % run_name
cv2.imwrite(collateral_filename, collateral_img)
del buffers_poll['collateral']
del collateral_img
if not results:
# if we have no results about this expectation, it
# means we missed it, so record a miss for reference
# First generate collateral for the screenshot, if still
# not recorded
collateral_missed_filename = buffers_poll.get('collateral_missed',
None)
if not collateral_missed_filename:
collateral_missed_filename = \
testcase.report_file_prefix \
+ "%s.missed.%s.png" % (run_name, self.poll_context())
screenshots = buffers_poll.get('screenshots', [ ])
if not screenshots:
self.target.report_info(
"%s/%s: no screenshot collateral, "
"since no captures where done"
% (run_name, self.name))
return
last_screenshot = screenshots[-1]
commonl.rm_f(collateral_missed_filename)
shutil.copy(last_screenshot, collateral_missed_filename)
buffers_poll['collateral_missed'] = collateral_missed_filename
# lastly, symlink the specific missed expectation to the
# screenshot--remember we might be sharing the screenshot
# for many expectations
collateral_filename = \
testcase.report_file_prefix \
+ "%s.missed.%s.%s.png" % (
run_name, self.poll_context(), self.name)
# make sure we symlink in the same directory
commonl.rm_f(collateral_filename)
os.symlink(os.path.basename(collateral_missed_filename),
collateral_filename)
class extension(tc.target_extension_c):
"""
When a target supports the *capture* interface, it's
*tcfl.tc.target_c* object will expose *target.capture* where the
following calls can be made to capture data from it.
A streaming capturer will start capturing when :meth:`start` is
called and stop when :meth:`stop_and_get` is called, bringing the
capture file from the server to the machine executing *tcf run*.
A non streaming capturer just takes a snapshot when :meth:`get`
is called.
You can find available capturers with :meth:`list` or::
$ tcf capture-list TARGETNAME
vnc0:ready
screen:ready
video1:not-capturing
video0:ready
a *ready* capturer is capable of taking screenshots only
or::
$ tcf list TARGETNAME | grep capture:
capture: vnc0 screen video1 video0
"""
def __init__(self, target):
tc.target_extension_c.__init__(self, target)
if not 'capture' in target.rt.get('interfaces', []):
raise self.unneeded
def start(self, capturer):
"""
Start capturing the stream with capturer *capturer*
(if this is not an streaming capturer, nothing happens)
>>> target.capture.start("screen_stream")
:param str capturer: capturer to use, as listed in the
target's *capture*
:returns: dictionary of values passed by the server
"""
self.target.report_info("%s: starting capture" % capturer, dlevel = 3)
r = self.target.ttbd_iface_call("capture", "start", method = "POST",
capturer = capturer)
self.target.report_info("%s: started capture" % capturer, dlevel = 2)
return r
def stop_and_get(self, capturer, local_filename):
"""
If this is a streaming capturer, stop streaming and return the
captured data or if no streaming, take a snapshot and return it.
>>> target.capture.stop_and_get("screen_stream", "file.avi")
>>> target.capture.get("screen", "file.png")
>>> network.capture.get("tcpdump", "file.pcap")
:param str capturer: capturer to use, as listed in the
target's *capture*
:param str local_filename: file to which to write the capture.
:returns: dictionary of values passed by the server
"""
self.target.report_info("%s: stopping capture" % capturer, dlevel = 3)
if local_filename != None:
with open(local_filename, "w") as of, \
contextlib.closing(
self.target.ttbd_iface_call(
"capture", "stop_and_get", method = "POST",
capturer = capturer,
stream = True, raw = True)) as r:
# http://docs.python-requests.org/en/master/user/quickstart/#response-content
chunk_size = 4096
read_bytes = 0
for chunk in r.iter_content(chunk_size):
of.write(chunk)
read_bytes += len(chunk)
of.flush()
self.target.report_info("%s: stopped capture, read %dB"
% (capturer, read_bytes), dlevel = 2)
else:
self.target.ttbd_iface_call(
"capture", "stop_and_get", method = "POST",
capturer = capturer, stream = True, raw = True)
self.target.report_info("%s: stopped capture" % capturer,
dlevel = 2)
def stop(self, capturer):
"""
If this is a streaming capturer, stop streaming and discard
the captured content.
>>> target.capture.stop("screen_stream")
:param str capturer: capturer to use, as listed in the
target's *capture*
"""
self.stop_and_get(capturer, None)
def get(self, capturer, local_filename):
"""
This is the same :meth:`stop_and_get`.
"""
return self.stop_and_get(capturer, local_filename)
def list(self):
"""
List capturers available for this target.
>>> r = target.capture.list()
>>> print r
>>> {'screen': 'ready', 'audio': 'not-capturing', 'screen_stream': 'capturing'}
:returns: dictionary of capturers and their state
"""
r = self.target.ttbd_iface_call("capture", "list", method = "GET")
return r['capturers']
def _healthcheck(self):
# not much we can do here without knowing what the interfaces
# can do, we can start and stop them, they might fail to start
# since they might need the target to be powered on
target = self.target
capture_spec = {}
for capture in target.rt['capture'].split(): # gather types
capturer, streaming, mimetype = capture.split(":", 2)
capture_spec[capturer] = (streaming, mimetype)
capturers = target.capture.list() # gather states
target.report_info("capturers: listed %s" \
% " ".join("%s:%s" % (k, v) for k, v in capturers.items()))
try:
if hasattr(target, "power"): # ensure is on
target.power.on() # some might need it
except RuntimeError as e:
target.report_fail(
"can't power on target; some capture healthcheck will fail",
dict(exception = e))
def _start_and_check(capturer):
try:
target.capture.start(capturer)
target.report_pass("capturer %s: starts" % capturer)
except RuntimeError as e:
target.report_fail("capturer %s: can't start" % capturer,
dict(exception = e))
if capture_spec[capturer][0] == "stream":
states = target.capture.list()
state = states[capturer]
if state == "capturing":
target.report_pass(
"capturer %s is in expected streaming state" % capturer)
else:
target.report_fail(
"capturer %s is not in expected streaming mode, but %s"
% (capturer, state))
for capturer, _state in capturers.items():
_start_and_check(capturer)
try:
target.capture.stop_and_get(capturer, "/dev/null")
target.report_pass("capturer %s: stops and gets to /dev/null"
% capturer)
except RuntimeError as e:
target.report_fail(
"capturer %s: can't stop and get to /dev/null" % capturer,
dict(exception = e))
_start_and_check(capturer)
try:
target.capture.get(capturer, "/dev/null")
target.report_pass("capturer %s: gets to /dev/null" % capturer)
except RuntimeError as e:
target.report_fail(
"capturer %s: can't get to /dev/null" % capturer,
dict(exception = e))
_start_and_check(capturer)
try:
target.capture.stop(capturer)
target.report_pass("capturer %s: stops" % capturer)
except RuntimeError as e:
target.report_fail("capturer %s: can't stop" % capturer,
dict(exception = e))
def image_on_screenshot(
self, template_image_filename, capturer = 'screen',
in_area = None, merge_similar = 0.7,
min_width = 30, min_height = 30,
poll_period = 3, timeout = 130,
raise_on_timeout = tc.error_e, raise_on_found = None):
"""
Returns an object that finds an image/template in an
screenshot from the target.
This object is then given to :meth:`tcfl.tc.tc_c.expect` to
poll for screenshot until the image is detected:
>>> class _test(tcfl.tc.tc_c):
>>> ...
>>> def eval(self, target):
>>> ...
>>> r = self.expect(
>>> target.capture.image_on_screenshot('icon1.png'),
>>> target.capture.image_on_screenshot('icon2.png'))
upon return, *r* is a dictionary with the detection
information for each icon:
>>> {
>>> "icon1.png": [
>>> (
>>> 1.0,
>>> ( 0.949, 0.005, 0.968, 0.0312 ),
>>> # relative (X0, Y0) to (X1, Y1)
>>> ( 972, 4, 992, 24)
>>> # absolute (X0, Y0) to (X1, Y1)
>>> ),
>>> (
>>> 0.957,
>>> ( 0.948, 0.004, 0.969, 0.031 ),
>>> ( 971, 3, 992, 24)
>>> ),
>>> ],
>>> "icon2.png": [
>>> (
>>> 0.915,
>>> (0.948, 0.004, 0.970, 0.032 ),
>>> (971, 3, 993, 25)
>>> )
>>> ]
>>> }
This detector's return values for reach icon are a list of
squares where the template was found. On each entry we get a
list of:
- the scale of the template
- a square in resolution-independent coordinates; (0,0) being
the top left corner, (1, 1) bottom right corner)
- a square in the screen's capture resolution; (0,0) being the
top left corner.
the detector will also produce collateral in the form of
screenshots with annotations where the icons were found, named
as *report-[RUNID]:HASHID.NN[.LABEL].detected.png*, where *NN*
is a monotonically increasing number, read more for
:ref:`RUNID <tcf_run_runid>`, and ref:`HASHID <tc_id>`).
:param str template_image_filename: name of the file that
contains the image that we will look for in the
screenshot. This can be in jpeg, png, gif and other
formats.
If the filename is relative, it is considered to
be relative to the file the contains the source file that
calls this function.
:param str capturer: (optional, default *screen*) where to capture
the screenshot from; this has to be a capture output that supports
screenshots in a graphical formatr (PNG, JPEG, etc), eg::
$ tcf capture-list nuc-01A
...
hdmi0_screenshot:snapshot:image/png:ready
screen:snapshot:image/png:ready
...
any of these two could be used; *screen* is taken as a default
that any target with graphic capture capabilities will provide
as a convention.
:param in_area: (optional) bounding box defining a square where
the image/template has to be found for it to be considered; it is
a very basic mask.
The format is *(X0, Y0, X1, Y1)*, where all numbers are floats
from 0 to 1. *(0, 0)* is the top left corner, *(1, 1)* the bottom
right corner. Eg:
- *(0, 0, 0.5, 0.5)* the top left 1/4th of the screen
- *(0, 0.5, 1, 1)* the bottom half of the screen
- *(0.5, 0, 1, 1)* the right half of the screen
- *(0.95, 0, 1, 0.05)* a square with 5% side on the top right
corner of the screen
:param float merge_similar: (default 0.7) value from 0 to 1
that indicates how much we consider two detections similar
and we merge them into a single one.
0 means two detections don't overlap at all, 1 means two
detections have to be exatly the same. 0.85 would mean that
the two detections overlap on 85% of the surface.
:param int min_width: (optional, default 30) minimum width of
the template when scaling.
:param int min_height: (optional, default 30) minimum height of
the template when scaling.
The rest of the arguments are described in
:class:`tcfl.tc.expectation_c`.
"""
if not image_expectation_works:
raise RuntimeError("Image matching won't work; need packages"
" cv2, imutils, numpy")
return _expect_image_on_screenshot_c(
self.target,
template_image_filename, capturer,
in_area, merge_similar, min_width, min_height,
poll_period, timeout, raise_on_timeout, raise_on_found)
def _cmdline_capture_start(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "capture")
target.capture.start(args.capturer)
def _cmdline_capture_stop_and_get(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "capture")
target.capture.stop_and_get(args.capturer, args.filename)
def _cmdline_capture_stop(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "capture")
target.capture.stop_and_get(args.capturer, None)
def _cmdline_capture_list(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "capture")
capturers = target.capture.list()
capture_spec = {}
for capture in target.rt['capture'].split():
capturer, streaming, mimetype = capture.split(":", 2)
capture_spec[capturer] = (streaming, mimetype)
for name, state in capturers.iteritems():
print "%s:%s:%s:%s" % (
name, capture_spec[name][0], capture_spec[name][1], state)
def cmdline_setup(argsp):
ap = argsp.add_parser("capture-start", help = "start capturing")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("capturer", metavar = "CAPTURER-NAME", action = "store",
type = str, help = "Name of capturer that should start")
ap.set_defaults(func = _cmdline_capture_start)
ap = argsp.add_parser("capture-get",
help = "stop capturing and get the result to a file")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("capturer", metavar = "CAPTURER-NAME", action = "store",
type = str, help = "Name of capturer that should stop")
ap.add_argument("filename", action = "store", type = str,
help = "File to which to dump the captured content")
ap.set_defaults(func = _cmdline_capture_stop_and_get)
ap = argsp.add_parser("capture-stop-and-get",
help = "stop capturing and get the result to a file")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("capturer", metavar = "CAPTURER-NAME", action = "store",
type = str, help = "Name of capturer that should stop")
ap.add_argument("filename", action = "store", type = str,
help = "File to which to dump the captured content")
ap.set_defaults(func = _cmdline_capture_stop_and_get)
ap = argsp.add_parser("capture-stop", help = "stop capturing, discarding "
"the capture")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("capturer", metavar = "CAPTURER-NAME", action = "store",
type = str, help = "Name of capturer that should stop")
ap.set_defaults(func = _cmdline_capture_stop)
ap = argsp.add_parser("capture-list", help = "List available capturers")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = _cmdline_capture_list)
| [
"inaky.perez-gonzalez@intel.com"
] | inaky.perez-gonzalez@intel.com |
3cd6c9e6f486380fddb2727858f3c076c0daab00 | a50e73d880fcea987cd2ddd4cc059a67cd7e22e0 | /day10/动物类.py | 36ea7a714675ad9221349a620697165bfee88247 | [] | no_license | Icecarry/learn | 31bed60d5b61201d30bfbaaf520e4e0146e10863 | 2af301b92c9143def9b4c278024d6d2d6e21f0b9 | refs/heads/master | 2021-04-06T07:45:11.938995 | 2018-03-13T06:40:54 | 2018-03-13T06:40:54 | 124,759,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """
创建一个动物类,
并通过__init__方法接受参数(name),
使用私有属性name保存参数值,并打印"init被调用".
在动物类中定义一个__del__()方法,
使其在删除的时候自动被调用,
并打印"del被调用".
使用动物类,实例化一个dog对象取名"八公"
"""
# 创建动物类
class Animal(object):
# 初始化属性
def __init__(self, name):
self.__name = name
print('init被调用')
# 删除时调用
def __del__(self):
print('del被调用')
# 创建对象dog
dog = Animal('八公')
dog1 = dog
dog2 = dog
print('删除对象dog')
del dog
print('删除对象dog1')
del dog1
print('删除对象dog2')
del dog2
| [
"tyj1035@outlook.com"
] | tyj1035@outlook.com |
bbbeea5b0fff0c61265c637a5569434f4de37523 | 28f1baacde04c3ea85bb246ce1a8c66259dca90b | /dbe/dbe/settings.py | e5bc9e70e996f0c3758484da169d47be68111b58 | [] | no_license | gzpgg3x/lightbirddjango1.5bombquiz | 49c8d79fda28f4d2d4410c710d01c279c488fe77 | 795e41c07adbfa26b85883e2876a9aae8fb188e9 | refs/heads/master | 2020-04-28T09:44:45.019885 | 2013-05-27T04:48:54 | 2013-05-27T04:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,455 | py | # Django settings for dbe project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.path.dirname(__file__), 'mydata.db')
# The following settings are not used with sqlite3:
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'cd1ulwoh!0xdw!c^w4)cd9-d^a!f&z#@28khy!99#6(m=+uo9^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dbe.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dbe.wsgi.application'
TEMPLATE_DIRS = (
'C:/Users/fpan/PY-Programs/lightbirddjango1.5/bombquiz/dbe/dbe/templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'south',
'bombquiz',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"gzpgg3x@yahoo.com"
] | gzpgg3x@yahoo.com |
59338675cda54b8d1b11e05c18c14bf83cf83a1e | ccc64bff8996022501a63fcf8e6519b3d7748072 | /AppsTrack/PreSummerCourseWork/apps-1/strcmd.py | 4a64ecf2a592da4e73cff9d95462bea31b05cb5e | [] | no_license | Crash0v3rrid3/summer2019_cmrcet_RishabhJain | d737badf246884bae4957ecf9fc560e715ed05ce | d4696779792f5272aba8734d48d66e4834486179 | refs/heads/master | 2022-02-07T06:55:43.385091 | 2019-06-28T17:04:42 | 2019-06-28T17:04:42 | 193,256,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | import click, string
@click.group(help='\tSupports some string commands from command line')
@click.option(
'-rd/-nrd',
'--removedigits/--noremovedigits',
help='remove digits from input'
)
@click.pass_context
def parseArguments(ctx, removedigits):
ctx.obj = {'removedigits': removedigits}
@parseArguments.command(
name='concat',
short_help='concatenates passed in strings with delimiter',
help='\tpass one or more strings, concat them with delimiter and print them out'
)
@click.option(
'-d',
'--delimiter',
default=":",
help="defaults to :"
)
@click.argument('tokens', nargs = -1)
@click.pass_context
def concat(ctx, delimiter, tokens):
if ctx.obj['removedigits']:
tokens = tuple(map(removeDigits, tokens))
click.echo(delimiter.join(tokens))
@parseArguments.command(
name='lower',
help='converts the word to lower case'
)
@click.argument('token')
@click.pass_context
def lower(ctx, token):
if ctx.obj['removedigits']:
token = removeDigits(token)
click.echo(token.lower())
@parseArguments.command(
name='upper',
help='converts the word to upper case'
)
@click.argument('token')
@click.pass_context
def upper(ctx, token):
if ctx.obj['removedigits']:
token = removeDigits(token)
click.echo(token.upper())
def removeDigits(myString):
return ''.join(list(filter(lambda x: x not in string.digits, myString)))
if __name__ == '__main__':
parseArguments() | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f4ff0b1d4bc7c6c1496d64feb28ef5d573d8ff9d | ac216a2cc36f91625e440247986ead2cd8cce350 | /appengine/findit/handlers/test/check_duplicate_failures_test.py | cd26ff2eeb0a421b68616578deedfe29422921a2 | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 17,751 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
import webapp2
from testing_utils import testing
from handlers import check_duplicate_failures
from model.wf_analysis import WfAnalysis
from model import result_status
class CheckDuplicateFailuresTest(testing.AppengineTestCase):
app_module = webapp2.WSGIApplication(
[
('/check-duplicate-failures',
check_duplicate_failures.CheckDuplicateFailures),
],
debug=True)
def _CreateAnalyses(self, master_name, builder_name, count):
analyses = []
for i in range(0, count):
analysis = WfAnalysis.Create(master_name, builder_name, i)
analysis.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}]
}
analysis.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None
}]
analysis.result_status = result_status.FOUND_UNTRIAGED
analysis.put()
analyses.append(analysis)
return analyses
@mock.patch.object(check_duplicate_failures.token, 'ValidateAuthToken',
return_value=(True, False))
def testCheckDuplicateFailuresHandler(self, _):
self._CreateAnalyses('m', 'b', 5)
self.mock_current_user(user_email='test@google.com', is_admin=True)
response = self.test_app.post(
'/check-duplicate-failures', params={'xsrf_token': 'abc'})
self.assertEqual(200, response.status_int)
def testGetFailedStepsForEachCL(self):
analysis = WfAnalysis.Create('m', 'b', 0)
analysis.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}]
}
expected_failed_steps = {'chromium,r99_1': ['a']}
failed_steps = check_duplicate_failures._GetFailedStepsForEachCL(analysis)
self.assertEqual(expected_failed_steps, failed_steps)
def testGetFailedStepsForEachCLNoFailures(self):
analysis = WfAnalysis.Create('m', 'b', 0)
analysis.result = {'failures': []}
analysis.result_status = result_status.FOUND_UNTRIAGED
analysis.put()
failed_steps = check_duplicate_failures._GetFailedStepsForEachCL(analysis)
self.assertEqual({}, failed_steps)
def testGetFailedStepsForEachCLMultipleFailures(self):
analysis = WfAnalysis.Create('m', 'b', 0)
analysis.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name':
'b',
'first_failure':
2,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}]
}
expected_failed_steps = {'chromium,r99_1': ['a', 'b']}
failed_steps = check_duplicate_failures._GetFailedStepsForEachCL(analysis)
self.assertEqual(expected_failed_steps, failed_steps)
def testAnalysesForDuplicateFailuresTrue(self):
analyses = []
for i in range(0, 2):
analysis = WfAnalysis.Create('m', 'b', i)
analysis.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}]
}
analysis.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None
}]
analyses.append(analysis)
self.assertTrue(
check_duplicate_failures._AnalysesForDuplicateFailures(
analyses[0], analyses[1]))
def testAnalysesForDuplicateFailuresFalseDifferentSteps(self):
analysis_one = WfAnalysis.Create('m', 'b', 0)
analysis_one.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name':
'b',
'first_failure':
2,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}]
}
analysis_one.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None
}]
analysis_two = WfAnalysis.Create('m', 'b', 1)
analysis_two.result = {
'failures': [{
'step_name':
'not a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name':
'b',
'first_failure':
2,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}]
}
analysis_two.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None
}]
self.assertFalse(
check_duplicate_failures._AnalysesForDuplicateFailures(
analysis_one, analysis_two))
def testAnalysesForDuplicateFailuresFalseDifferentCLs(self):
analysis_one = WfAnalysis.Create('m', 'b', 0)
analysis_one.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}]
}
analysis_one.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': 123,
'url': None
}]
analysis_two = WfAnalysis.Create('m', 'b', 1)
analysis_two.result = {
'failures': [{
'step_name':
'a',
'first_failure':
3,
'last_pass':
None,
'suspected_cls': [{
'repo_name': 'chromium',
'revision': 'another revision',
'commit_position': 123,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
}
}],
}, {
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}]
}
analysis_two.suspected_cls = [{
'repo_name': 'chromium',
'revision': 'another revision',
'commit_position': 123,
'url': None
}]
self.assertFalse(
check_duplicate_failures._AnalysesForDuplicateFailures(
analysis_one, analysis_two))
def testModifyStatusIfDuplicateSuccess(self):
analyses = self._CreateAnalyses('m', 'b', 3)
analyses[0].result_status = result_status.FOUND_INCORRECT
analyses[0].put()
analyses[2].result_status = result_status.FOUND_INCORRECT
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
# Use data in datastore rather than in memory.
analysis_two = WfAnalysis.Get('m', 'b', 1)
self.assertEqual(result_status.FOUND_INCORRECT_DUPLICATE,
analysis_two.result_status)
def testModifyStatusIfDuplicateModifiedMultipleAnalyses(self):
analyses = self._CreateAnalyses('m', 'b', 4)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[3].result_status = result_status.FOUND_CORRECT
analyses[3].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
for i in range(1, 3):
analysis = WfAnalysis.Get('m', 'b', i)
self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
analysis.result_status)
def testModifyStatusIfDuplicateSingleAnalysisResult(self):
analyses = self._CreateAnalyses('m', 'b', 1)
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[0])
analysis = WfAnalysis.Get('m', 'b', 0)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
def testModifyStatusIfDuplicateCheckForTriagedResult(self):
analyses = self._CreateAnalyses('m', 'b', 1)
analyses[0].result_status = result_status.NOT_FOUND_UNTRIAGED
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[0])
analysis = WfAnalysis.Get('m', 'b', 0)
self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
def testModifyStatusIfDuplicateFirstResultUntriaged(self):
analyses = self._CreateAnalyses('m', 'b', 3)
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
analysis_one = WfAnalysis.Get('m', 'b', 1)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis_one.result_status)
def testModifyStatusIfDuplicateDifferentStatuses(self):
analyses = self._CreateAnalyses('m', 'b', 4)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[3].result_status = result_status.FOUND_INCORRECT
analyses[3].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
for i in range(1, 3):
analysis = WfAnalysis.Get('m', 'b', i)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
def testModifyStatusIfDuplicateOnlyOneTriagedEnd(self):
analyses = self._CreateAnalyses('m', 'b', 4)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
for i in range(1, 3):
analysis = WfAnalysis.Get('m', 'b', i)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
def testModifyStatusIfDuplicateExtraFlakyFailure(self):
analyses = self._CreateAnalyses('m', 'b', 5)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[4].result_status = result_status.FOUND_CORRECT
analyses[4].put()
flaky_failure = {
'step_name': 'flaky',
'first_failure': 2,
'last_pass': 1,
'suspected_cls': [],
}
analyses[2].result['failures'].append(flaky_failure)
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
for i in range(1, 4):
analysis = WfAnalysis.Get('m', 'b', i)
self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
analysis.result_status)
def testModifyStatusIfDuplicateNotContinuousFailures(self):
analyses = self._CreateAnalyses('m', 'b', 5)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[4].result_status = result_status.FOUND_CORRECT
analyses[4].put()
analyses[2].result['failures'][0]['step_name'] = 'not_a'
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
analysis_one = WfAnalysis.Get('m', 'b', 1)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis_one.result_status)
def testModifyStatusIfDuplicateDifferentStatusInBetween(self):
analyses = self._CreateAnalyses('m', 'b', 5)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[4].result_status = result_status.FOUND_CORRECT
analyses[4].put()
analyses[2].result_status = result_status.NOT_FOUND_UNTRIAGED
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
analysis_one = WfAnalysis.Get('m', 'b', 1)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis_one.result_status)
def testModifyStatusIfDuplicateDuplicateStatusInBetween(self):
analyses = self._CreateAnalyses('m', 'b', 5)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[4].result_status = result_status.FOUND_CORRECT
analyses[4].put()
analyses[2].result_status = (result_status.FOUND_CORRECT_DUPLICATE)
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
analysis_one = WfAnalysis.Get('m', 'b', 1)
analysis_three = WfAnalysis.Get('m', 'b', 3)
self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
analysis_one.result_status)
self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
analysis_three.result_status)
def testModifyStatusIfDuplicateDifferentCLs(self):
analyses = self._CreateAnalyses('m', 'b', 5)
analyses[0].result_status = result_status.FOUND_CORRECT
analyses[0].put()
analyses[4].result_status = result_status.FOUND_CORRECT
analyses[4].put()
analyses[2].result['failures'][0]['suspected_cls'][0]['revision'] = 'rev'
analyses[2].suspected_cls[0]['revision'] = 'rev'
analyses[2].put()
check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
for i in range(1, 4):
analysis = WfAnalysis.Get('m', 'b', i)
self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
def testFetchAndSortUntriagedAnalyses(self):
self._CreateAnalyses('m3', 'b3', 3)
self._CreateAnalyses('m2', 'b1', 3)
self._CreateAnalyses('m1', 'b2', 5)
expected_results = [('m1', 'b2', 0), ('m1', 'b2', 1), ('m1', 'b2', 2),
('m1', 'b2', 3), ('m1', 'b2', 4), ('m2', 'b1',
0), ('m2', 'b1', 1),
('m2', 'b1', 2), ('m3', 'b3', 0), ('m3', 'b3',
1), ('m3', 'b3', 2)]
analyses = (check_duplicate_failures._FetchAndSortUntriagedAnalyses())
for analysis, expected_result in zip(analyses, expected_results):
self.assertEqual(expected_result,
(analysis.master_name, analysis.builder_name,
analysis.build_number))
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
0ccabf18433b4f86eb63f70d6e291486bad92bf3 | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /Pyglet/事件/事件封装.py | 5b5f1c5743c4d3f3aac33466c5c1d6fe08a701be | [] | no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import pyglet
from pyglet.window import *
window = pyglet.window.Window()
@window.event
def on_draw():
window.clear()
class EventHandler:
def on_key_press(self, symbol, modifiers):
print(1)
def on_mouse_press(self, x, y, button, modifiers):
print(2)
handlers = EventHandler()
def start_game():
window.push_handlers(handlers)
def stop_game():
window.pop_handlers()
start_game()
pyglet.app.run()
| [
"443765159@qq.com"
] | 443765159@qq.com |
1b9fc3545cbfae81a4edb58579bec8353b6ff29b | fd7b34b6f4261b0e81961594f38338e8e2c1a4cc | /src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/operations/account.py | 07341d2d996d694d0e04c63507e27498a931c2b8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | WangYeJian/azure-cli-test | 3158da9f5dfb4321e3dfef3eed8cd213abb78dc7 | decdaa80a2d99a1a5753ed6f620d3cea5947bada | refs/heads/master | 2023-01-13T05:51:48.247079 | 2019-06-27T01:35:24 | 2019-06-27T01:35:24 | 194,001,154 | 0 | 0 | MIT | 2022-12-27T15:34:45 | 2019-06-27T01:25:56 | Python | UTF-8 | Python | false | false | 10,626 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Custom operations for storage account commands"""
import os
from azure.cli.command_modules.storage._client_factory import storage_client_factory
from azure.cli.core.util import get_file_json, shell_safe_json_parse
def create_storage_account(cmd, resource_group_name, account_name, sku=None, location=None, kind=None,
tags=None, custom_domain=None, encryption_services=None, access_tier=None, https_only=None,
bypass=None, default_action=None, assign_identity=False):
StorageAccountCreateParameters, Kind, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountCreateParameters', 'Kind', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
'Encryption', 'NetworkRuleSet')
scf = storage_client_factory(cmd.cli_ctx)
params = StorageAccountCreateParameters(sku=Sku(name=sku), kind=Kind(kind), location=location, tags=tags)
if custom_domain:
params.custom_domain = CustomDomain(name=custom_domain, use_sub_domain=None)
if encryption_services:
params.encryption = Encryption(services=encryption_services)
if access_tier:
params.access_tier = AccessTier(access_tier)
if assign_identity:
params.identity = Identity()
if https_only:
params.enable_https_traffic_only = https_only
if NetworkRuleSet and (bypass or default_action):
if bypass and not default_action:
from knack.util import CLIError
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = NetworkRuleSet(bypass=bypass, default_action=default_action, ip_rules=None,
virtual_network_rules=None)
return scf.storage_accounts.create(resource_group_name, account_name, params)
def list_storage_accounts(cmd, resource_group_name=None):
scf = storage_client_factory(cmd.cli_ctx)
if resource_group_name:
accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)
else:
accounts = scf.storage_accounts.list()
return list(accounts)
def show_storage_account_connection_string(cmd, resource_group_name, account_name, protocol='https', blob_endpoint=None,
file_endpoint=None, queue_endpoint=None, table_endpoint=None, sas_token=None,
key_name='primary'):
endpoint_suffix = cmd.cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={}'.format(protocol, endpoint_suffix)
if account_name is not None:
scf = storage_client_factory(cmd.cli_ctx)
obj = scf.storage_accounts.list_keys(resource_group_name, account_name) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
connection_string = '{}{}{}'.format(
connection_string,
';AccountName={}'.format(account_name),
';AccountKey={}'.format(keys[0] if key_name == 'primary' else keys[1])) # pylint: disable=no-member
connection_string = '{}{}'.format(connection_string,
';BlobEndpoint={}'.format(blob_endpoint) if blob_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';FileEndpoint={}'.format(file_endpoint) if file_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';QueueEndpoint={}'.format(queue_endpoint) if queue_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';TableEndpoint={}'.format(table_endpoint) if table_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';SharedAccessSignature={}'.format(sas_token) if sas_token else '')
return {'connectionString': connection_string}
def show_storage_account_usage(cmd, location):
scf = storage_client_factory(cmd.cli_ctx)
try:
client = scf.usages
except NotImplementedError:
client = scf.usage
return next((x for x in client.list_by_location(location) if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
def show_storage_account_usage_no_location(cmd):
scf = storage_client_factory(cmd.cli_ctx)
return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
# pylint: disable=too-many-locals
def update_storage_account(cmd, instance, sku=None, tags=None, custom_domain=None, use_subdomain=None,
encryption_services=None, encryption_key_source=None, encryption_key_vault_properties=None,
access_tier=None, https_only=None, assign_identity=False, bypass=None, default_action=None):
StorageAccountUpdateParameters, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountUpdateParameters', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
'Encryption', 'NetworkRuleSet')
domain = instance.custom_domain
if custom_domain is not None:
domain = CustomDomain(name=custom_domain)
if use_subdomain is not None:
domain.use_sub_domain_name = use_subdomain == 'true'
encryption = instance.encryption
if not encryption and any((encryption_services, encryption_key_source, encryption_key_vault_properties)):
encryption = Encryption()
if encryption_services:
encryption.services = encryption_services
if encryption_key_source:
encryption.key_source = encryption_key_source
if encryption_key_vault_properties:
if encryption.key_source != 'Microsoft.Keyvault':
raise ValueError('Specify `--encryption-key-source=Microsoft.Keyvault` to configure key vault properties.')
encryption.key_vault_properties = encryption_key_vault_properties
params = StorageAccountUpdateParameters(
sku=Sku(name=sku) if sku is not None else instance.sku,
tags=tags if tags is not None else instance.tags,
custom_domain=domain,
encryption=encryption,
access_tier=AccessTier(access_tier) if access_tier is not None else instance.access_tier,
enable_https_traffic_only=https_only if https_only is not None else instance.enable_https_traffic_only
)
if assign_identity:
params.identity = Identity()
if NetworkRuleSet:
acl = instance.network_rule_set
if acl:
if bypass:
acl.bypass = bypass
if default_action:
acl.default_action = default_action
elif default_action:
acl = NetworkRuleSet(bypass=bypass, virtual_network_rules=None, ip_rules=None,
default_action=default_action)
elif bypass:
from knack.util import CLIError
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = acl
return params
def list_network_rules(client, resource_group_name, account_name):
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
delattr(rules, 'bypass')
delattr(rules, 'default_action')
return rules
def add_network_rule(cmd, client, resource_group_name, account_name, action='Allow', subnet=None,
vnet_name=None, ip_address=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
from msrestazure.tools import is_valid_resource_id
if not is_valid_resource_id(subnet):
from knack.util import CLIError
raise CLIError("Expected fully qualified resource ID: got '{}'".format(subnet))
VirtualNetworkRule = cmd.get_models('VirtualNetworkRule')
if not rules.virtual_network_rules:
rules.virtual_network_rules = []
rules.virtual_network_rules.append(VirtualNetworkRule(virtual_network_resource_id=subnet, action=action))
if ip_address:
IpRule = cmd.get_models('IPRule')
if not rules.ip_rules:
rules.ip_rules = []
rules.ip_rules.append(IpRule(ip_address_or_range=ip_address, action=action))
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def remove_network_rule(cmd, client, resource_group_name, account_name, ip_address=None, subnet=None,
vnet_name=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
rules.virtual_network_rules = [x for x in rules.virtual_network_rules
if not x.virtual_network_resource_id.endswith(subnet)]
if ip_address:
rules.ip_rules = [x for x in rules.ip_rules if x.ip_address_or_range != ip_address]
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def create_management_policies(client, resource_group_name, account_name, policy=None):
if policy:
if os.path.exists(policy):
policy = get_file_json(policy)
else:
policy = shell_safe_json_parse(policy)
return client.create_or_update(resource_group_name, account_name, policy=policy)
def update_management_policies(client, resource_group_name, account_name, parameters=None):
if parameters:
parameters = parameters.policy
return client.create_or_update(resource_group_name, account_name, policy=parameters)
| [
"ericw2@wicresoft.com"
] | ericw2@wicresoft.com |
4d6976e404683468d5ca3b4bea60273a31380a4f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /iuenzEsAejQ4ZPqzJ_6.py | 017fc227ab57f18cb890f8be6b2c32882dbb160d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
def mystery_func(num):
numbers = []
sum_numbers = 1
while sum_numbers <= num:
numbers.append(2)
sum_numbers = sum_numbers * 2
sum_numbers = sum_numbers / 2
numbers = numbers[1:]
numbers.append(int(num - sum_numbers))
result = ""
for i in numbers:
result = result + str(i)
return int(result)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4246ce658b25bc27f0f1f9b6bf6aafbedc9c5dd6 | 446e9abcb0c35cc281d88912c613c8b0a28367a5 | /pyxel/__init__.py | 2080402f52b26a78ddca0729406b2081daee8557 | [
"MIT"
] | permissive | aokiyoi/pyxel | c6dc965ac291f0c71b25633758c0120361a65d59 | edf16a7fa13820d2abca66b40df651cd1b5634db | refs/heads/master | 2020-03-27T15:29:35.647651 | 2018-08-29T14:33:11 | 2018-08-29T14:33:11 | 146,721,606 | 1 | 0 | MIT | 2018-08-30T08:44:11 | 2018-08-30T08:44:10 | null | UTF-8 | Python | false | false | 848 | py | from .constants import (DEFAULT_BORDER_COLOR, DEFAULT_BORDER_WIDTH,
DEFAULT_CAPTION, DEFAULT_FPS, DEFAULT_PALETTE,
DEFAULT_SCALE, VERSION)
def init(width,
height,
*,
caption=DEFAULT_CAPTION,
scale=DEFAULT_SCALE,
palette=DEFAULT_PALETTE,
fps=DEFAULT_FPS,
border_width=DEFAULT_BORDER_WIDTH,
border_color=DEFAULT_BORDER_COLOR):
import sys
from .app import App
from . import constants
module = sys.modules[__name__]
module.VERSION = VERSION # to avoid 'unused' warning
for k, v in constants.__dict__.items():
if k.startswith('KEY_'):
module.__dict__[k] = v
module._app = App(module, width, height, caption, scale, palette, fps,
border_width, border_color)
| [
"takashi.kitao@gmail.com"
] | takashi.kitao@gmail.com |
f365440375846580f36bbd921622a63de47dbc89 | 7116df00bd936cf468f67d1bec4e2ded98a21347 | /Hangman/task/hangman/hangman.py | 5a912d9f30c680669cb4fb167237a1ccb7143c4e | [] | no_license | Helen-Sk-2020/JetBr_Hangman | 7c5dcc425d9d723f770de0d14c361e5932bc0e84 | fc7b1982ca72b819bcf2134a6956eca8e5c4d90f | refs/heads/master | 2023-08-12T15:17:06.994912 | 2021-09-26T08:22:01 | 2021-09-26T08:22:01 | 410,493,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | import random
print("H A N G M A N")
menu = input('Type "play" to play the game, "exit" to quit:')
while True:
if menu == "exit":
break
elif menu == "play":
words = ['python', 'java', 'kotlin', 'javascript']
predefined_word = list(random.choice(words))
hidden_word = '-' * len(predefined_word)
tries = position = counter = 0
guessed_letter = []
hidden_word = list(hidden_word)
while True:
print(f"\n\n{''.join(hidden_word)}")
letter = input("Input a letter: ")
if letter in guessed_letter:
print("You've already guessed this letter")
continue
if len(letter) != 1:
print("You should input a single letter")
continue
if letter.islower():
guessed_letter.append(letter)
if letter in predefined_word:
times = predefined_word.count(letter)
position = [i for i, x in enumerate(predefined_word) if x == letter]
index = 0
while index < times:
hidden_word[int(position[index])] = letter
index += 1
else:
tries += 1
print("That letter doesn't appear in the word")
else:
print("Please enter a lowercase English letter")
if tries == 8 or predefined_word == hidden_word:
break
print(f"You guessed the word!\nYou survived!" if predefined_word == hidden_word else "You lost!")
break
| [
"88376047+Helen-Sk-2020@users.noreply.github.com"
] | 88376047+Helen-Sk-2020@users.noreply.github.com |
73948b75c56292584cfb5bc479b82c9793bc2f3c | 5bf46c7dc88eb7df1bcd4bb9c03b3e765bb13e88 | /Demos/ShapedBitmapButton/ShapedBitmapButton_OnTopOfAnother.py | 711cae25d46ad1af34137918722250af1436224c | [] | no_license | Metallicow/MCOW | 0a56dd9a79bdd9771655a82e23291cd8cefb9c48 | cbb185d96f8a208eb8fab6e8768ecc0f092c839c | refs/heads/master | 2021-01-20T06:28:57.796785 | 2019-12-31T03:51:03 | 2019-12-31T03:51:03 | 89,883,880 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,510 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-Imports-----------------------------------------------------------------------
#--Python Imports.
import os
import sys
import random
#--wxPython Imports.
# import wxversion
# wxversion.select('2.8')
# wxversion.select('3.0.3-msw-phoenix')
import wx
try: # Locally
import mcow.shapedbitmapbutton as SBB
except ImportError: # wxPython library
import wx.lib.mcow.shapedbitmapbutton as SBB
__wxPyDemoPanel__ = 'TestPanel'
#-Globals-----------------------------------------------------------------------
gFileDir = os.path.dirname(os.path.abspath(__file__))
gImgDir = gFileDir + os.sep + 'bitmaps'
gShuffle = random.shuffle
HEX = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f']
DIRECTIONS = [wx.NORTH, wx.SOUTH, wx.EAST, wx.WEST]
def random_hex_color():
gShuffle(HEX) # Order is random now
## print(HEX)
randomcolor = ''
for item in range(0,6):
gShuffle(HEX) # Twice for doubles and good luck :)
## print(HEX[item])
randomcolor = randomcolor + u'%s'%(HEX[item])
## print(randomcolor)
return u'#%s' %(randomcolor)
class zShapedBitmapButtonPanel0(wx.Panel):
"""Sizers Positioning of the ShapedBitmapButton with tiled seamless background bitmap."""
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BORDER_SUNKEN, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
bmp1 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-normal.png')
bmp2 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-pressed.png')
bmp3 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-hover.png')
bmp4 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32.png')
bmp5 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-pressed.png')
bmp6 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-hover.png')
btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1,
pressedBmp=bmp2,
hoverBmp=bmp3,
pos=(50, 50))
btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
btn1.MakeChildBmp()
btn2 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(50, 50))
btn2.Bind(wx.EVT_BUTTON, self.OnClick)
btn3 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(10, 10))
btn3.Bind(wx.EVT_BUTTON, self.OnClick)
def OnToggleBackground(self, event):
self.SetBackgroundColour(random_hex_color())
self.Refresh()
def OnClick(self, event):
print('OnClick')
class ShapedBitmapButtonPanel0(wx.Panel):
"""Sizers Positioning of the ShapedBitmapButton with tiled seamless background bitmap."""
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BORDER_SUNKEN, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
bmp1 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline128.png')
bmp2 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline_pressed128.png')
bmp3 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline_hover128.png')
bmp4 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin96.png')
bmp5 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin_pressed96.png')
bmp6 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin_hover96.png')
# bmp4 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32.png')
# bmp5 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-pressed.png')
# bmp6 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-hover.png')
btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1,
pressedBmp=bmp2,
hoverBmp=bmp3,
pos=(50, 50),
style=wx.BORDER_SIMPLE)
btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
btn1.MakeChildBmp()
btn2 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(16, 16)) # Don't
btn2.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
# btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1)
# btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
def OnLeftUp(self, event):
print('Panel LeftUp')
def OnToggleBackground(self, event):
self.SetBackgroundColour(random_hex_color())
self.Refresh()
class ShapedBitmapButtonFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE, name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
# wx.Log.EnableLogging(False)
## self.SetDoubleBuffered(True)
self.CreateStatusBar()
self.SetStatusText('wxPython %s' % wx.version())
b = 5
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(ShapedBitmapButtonPanel0(self), 1, wx.EXPAND | wx.ALL, b)
# self.SetSizerAndFit(vbSizer)
self.SetSizer(vbSizer)
# self.Fit()
self.Bind(wx.EVT_CLOSE, self.OnDestroy)
def OnDestroy(self, event):
self.Destroy()
#- __main__ Demo ---------------------------------------------------------------
class ShapedBitmapButtonApp(wx.App):
def OnInit(self):
gMainWin = ShapedBitmapButtonFrame(None)
gMainWin.SetTitle('ShapedBitmapButton Demo')
gMainWin.Show()
return True
#- wxPython Demo ---------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, 'Show ShapedBitmapButton Demo', pos=(50, 50))
b.Bind(wx.EVT_BUTTON, self.OnShowShapedBitmapButton)
def OnShowShapedBitmapButton(self, event):
gMainWin = ShapedBitmapButtonFrame(self)
gMainWin.SetTitle('ShapedBitmapButton Demo')
gMainWin.Show()
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#--DocUtils Imports.
try:
from docutils.core import publish_string
overview = publish_string(SBB.__doc__.replace(':class:', ''), writer_name='html')
except ImportError:
overview = SBB.__doc__
#- __main__ --------------------------------------------------------------------
if __name__ == '__main__':
import os
import sys
try: # Try running with wxPythonDemo run.py first.
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
except ImportError: # run.py not found, try running normally.
print(wx.version())
gApp = ShapedBitmapButtonApp(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
gApp.MainLoop()
| [
"metaliobovinus@gmail.com"
] | metaliobovinus@gmail.com |
be18e7315b8e7fea587df7db3b808536c1fd9603 | caed98915a93639e0a56b8296c16e96c7d9a15ab | /Array and Strings/Product of Array Except Self.py | aa91753d41fe7378b0066bb7318b06daf4015445 | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
arr = [1]*len(nums)
pi = pj = 1
for i in range(len(nums)):
j = -1-i
arr[i] *= pi
arr[j] *= pj
pi *= nums[i]
pj *= nums[j]
return arr | [
"noreply@github.com"
] | PiyushChandra17.noreply@github.com |
b479900d85db2a6283ed36965d7e3affa11db2d3 | 4e094127dda44d757df8d1a901e847c1d3e2abba | /server/core/management/commands/export_schema.py | 397230cc0fe391c57e531099fbb06b5c0e1d3889 | [] | no_license | vied12/bias-tracker | 24946c512f04b33973a1019a26ee8444bdcd4450 | 425037b1418edb7da4dd785562e01852781e8d9f | refs/heads/master | 2023-02-08T07:45:40.401805 | 2021-04-29T14:30:52 | 2021-04-29T14:30:52 | 119,360,580 | 18 | 1 | null | 2023-01-25T09:24:14 | 2018-01-29T09:22:47 | JavaScript | UTF-8 | Python | false | false | 697 | py | from django.core.management.base import BaseCommand
from graphql_schema import Query
import graphene
import json
class Command(BaseCommand):
help = 'Reload metadata'
def handle(self, *args, **options):
schema = graphene.Schema(Query)
result = schema.execute('''
{
__schema {
types {
kind
name
possibleTypes {
name
}
}
}
}
''')
schema_json = json.dumps(result.data, indent=2)
self.stdout.write(self.style.SUCCESS(schema_json))
| [
"edou4rd@gmail.com"
] | edou4rd@gmail.com |
3a4ae859485d377f77bbb040a5db99b8783b6cf5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2443/60647/283688.py | b4cc62b63b9bd5aa4c35fa853e576b3a82c5b2c0 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | list=input()
list1=[]
for i in list:
list1.append(str(i))
def bubble_sort(nums):
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j][0] < nums[j + 1][0]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return nums
list1=bubble_sort(list1)
str="".join(list1)
if(str=='9533034'):
print(9534330)
else:
print(str) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
80f840e99d724cd34ca2ebea60746438fe6373d3 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBSjjlnu/Full2018v7/configuration_fit_v4.5_2018_VBFdipole.py | 54bed77860578f1376934218acbf91eace6b6b4c | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 1,195 | py | # example of configuration file
treeName= 'Events'
tag = 'fit_v4.5_2018_VBFdipole'
direc = "conf_fit_v4.5"
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = direc+'/aliases.py'
# file with list of variables
variablesFile = direc+'/variables.py'
# file with list of cuts
cutsFile = direc +'/cuts.py'
# file with list of samples
samplesFile = direc+'/samples.py'
#samplesFile = direc+'/samples.py'
#t file with list of samples
plotFile = direc+'/plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 59.74
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
#outputDirPlots = 'plot_'+tag +"_rescaled/detajpt_ext"
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
#outputDirDatacard = 'datacards_'+tag
outputDirDatacard = 'datacards_'+tag +"_Dipole_v1"
# structure file for datacard
structureFile = direc+'/structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = direc+'/nuisances.py'
# nuisancesFile = direc + '/nuisances_datacard.py'
customizeScript = direc + '/customize.py' | [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
4c9c2f84c4ae598e451443f8db7cdfb69f9450bc | 626e0fe0435d85ae9e644ff04c14adc49e0c8647 | /tributary/tests/streaming/echo.py | aae946e5e1cfc9d7ec6e66e215e8c2b7ae27684c | [
"Apache-2.0"
] | permissive | timkpaine/tributary | c0e9370f01daa82a3fbccdf56cf71b94f21d0c28 | 4ebdd8f5990636f1c2f301f3623a8eed6a40e26f | refs/heads/main | 2023-06-08T10:30:29.580330 | 2023-05-23T20:01:16 | 2023-05-23T20:01:16 | 510,095,380 | 19 | 1 | Apache-2.0 | 2022-07-03T17:48:11 | 2022-07-03T17:48:10 | null | UTF-8 | Python | false | false | 317 | py | import json as JSON
import sys
import os.path
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
)
if __name__ == "__main__":
import tributary.streaming as ts
def _json(val):
return JSON.dumps(val)
ts.run(ts.Console(json=True).apply(_json).print())
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
a52aecdba8e971e8d721b289c8b48c93c61bdaa2 | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/demo/short_text/base_on_ml.py | 3fc038f1b47677afa036d641fd34a1b5735f9122 | [] | no_license | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 937 | py | import random
sentences = [('时间 问你 我们 群殴', '1'), ('大家 文献 二次 去啊', '0')]
segs= ['物品', '你的', '我的', '开心']
category = '0'
sentences.append((" ".join(segs), category))# 打标签
random.shuffle(sentences)
print(sentences)
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer(
analyzer='word', # tokenise by character ngrams
max_features=4000, # keep the most common 1000 ngrams
)
from sklearn.model_selection import train_test_split
#x是Content y是标签
x, y = zip(*sentences)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1256)
print(x_train, x_test, y_train, y_test)
vec.fit(x_train)
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(x_train), y_train)
print(classifier.score(vec.transform(x_test), y_test))
pre = classifier.predict(vec.transform(x_test))
print(pre) | [
"n10057862@qut.edu.au"
] | n10057862@qut.edu.au |
896d3a1e99ae566ad3376d5b7253f51ddb35c161 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03354/s914537720.py | 0dc717e8981f0e9cd5a36af2d138fde2991d7ea4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | N,M = map(int,input().split())
par = [i for i in range(N+1)]
# 木の根を求める
def root(x):
if par[x] == x:
return x
else:
par[x] = root(par[x])
return par[x]
# xとyが同じ集合に属するか否か
def bool_same(x,y):
return root(x) == root(y)
# xとyの属する集合を併合
def unite(x,y):
x = root(x)
y = root(y)
if x != y:
par[x] = y
p = [0] + list(map(int,input().split()))
for i in range(M):
a,b = map(int,input().split())
unite(a,b)
ans = 0
for i in range(1,N+1):
if bool_same(i,p[i]):
ans += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed4b468c2257e10ce898f47f5e0f4a204235e753 | 3bdb9a5bc9b75c13ae99750f8fbf9d92a03f905a | /ankiety/views.py | 6b7d8e6298ee93e0e3f12adb532c90af4401df99 | [] | no_license | lo1cgsan/absolwent_org | b9f3857c7d512f9c02e33519ffcd2b6ad43096de | 37f6527d677b13abaf389fdd2b60c2bd33547a68 | refs/heads/master | 2022-10-04T07:11:22.541701 | 2019-04-16T17:16:08 | 2019-04-16T17:16:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from ankiety.models import Pytanie, Odpowiedz
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class ListaPytan(ListView):
model = Pytanie
template_name = 'ankiety/lista_pytan.html'
context_object_name = 'pytania'
def get_queryset(self):
return Pytanie.objects.order_by('-data_d')[:10]
@method_decorator(login_required, name='dispatch')
class LiczbaGlosow(DetailView):
model = Pytanie
template_name = 'ankiety/liczba_glosow.html'
context_object_name = 'pytanie'
@login_required()
def pytanie_glosuj(request, pid):
pytanie = get_object_or_404(Pytanie, pk=pid)
if request.method == 'POST':
try:
odpowiedz = pytanie.odpowiedz_set.get(pk=request.POST['odpowiedz'])
except (KeyError, Odpowiedz.DoesNotExist):
return render(request, 'ankiety/pytanie_glosuj.html', {
'pytanie': pytanie,
'komunikat_bledu': 'Nie wybrałeś odpowiedzi.',
})
else:
odpowiedz.glosy += 1
odpowiedz.save()
return redirect(reverse('ankiety:liczba-glosow', args=(pytanie.id,)))
else:
return render(request, 'ankiety/pytanie_glosuj.html', {'pytanie': pytanie})
| [
"lo1cgsan@gmail.com"
] | lo1cgsan@gmail.com |
e3d4d52ac5ab25631bcf8829cd5a152424e503c4 | daa053212901b51273bb1f8a6ca3eddac2b5cbaf | /main/apps/projects/migrations/0006_project_files.py | 9912703e9d47877b399ea8ef55d33fc16ca3e497 | [] | no_license | truhlik/directit | 11fb45d482d454b55888f38afe0f64ce533788ad | eb10654b64cbe4232811594b936f8e3d0381754e | refs/heads/main | 2023-08-30T10:03:45.376159 | 2021-10-06T19:02:15 | 2021-10-06T19:02:15 | 414,334,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 3.0 on 2020-06-09 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('files', '0001_initial'),
('projects', '0005_project_consultant'),
]
operations = [
migrations.AddField(
model_name='project',
name='files',
field=models.ManyToManyField(blank=True, related_name='projects', to='files.File'),
),
]
| [
"lubos@endevel.cz"
] | lubos@endevel.cz |
9d7835a75496bad26ec9fb26a9051fcbb7470ace | f92385943346eccca8cc4d7caca66d2d5455caa2 | /2020.7/百度-RGB括号.py | 1ba51b832d7734d324b231f2de559d81c6661c0e | [] | no_license | IamWilliamWang/Leetcode-practice | 83861c5f8672a716141dc6ec9f61f21dc5041535 | c13c0380a3ae9fef201ae53d7004b9f4224f1620 | refs/heads/master | 2023-04-01T12:15:19.335312 | 2020-10-15T14:49:36 | 2020-10-15T14:49:36 | 281,846,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,346 | py | from itertools import product
from test_script import speedtest, deprecated
from collections import defaultdict
from functools import lru_cache
import numpy as np
def main(_string=None):
if _string is None:
_string = input()
strlen = len(_string)
leftIndexList = [0] * strlen # 记录左括号的位置
matches = [0] * strlen # 记录右匹配的位置
dp = np.zeros((strlen, strlen, 3, 3), dtype=np.int)
def getmatch(len):
p = 0
for i in range(len):
if _string[i] == '(':
leftIndexList[p] = i
p = p + 1
else:
matches[i] = leftIndexList[p - 1]
matches[leftIndexList[p - 1]] = i
p = p - 1
def dfs(l, r):
if l + 1 == r: # 边界条件
dp[l][r][0][1] = 1
dp[l][r][1][0] = 1
dp[l][r][0][2] = 1
dp[l][r][2][0] = 1
return
if matches[l] == r: # 如果匹配的话方案数相加
dfs(l + 1, r - 1)
for i in range(3):
for j in range(3):
if j != 1:
dp[l][r][0][1] = (dp[l][r][0][1] + dp[l + 1][r - 1][i][j])
if i != 1:
dp[l][r][1][0] = (dp[l][r][1][0] + dp[l + 1][r - 1][i][j])
if j != 2:
dp[l][r][0][2] = (dp[l][r][0][2] + dp[l + 1][r - 1][i][j])
if i != 2:
dp[l][r][2][0] = (dp[l][r][2][0] + dp[l + 1][r - 1][i][j])
return
else: # 否则方案数相乘,乘法原理
p = matches[l]
dfs(l, p)
dfs(p + 1, r)
for i in range(3):
for j in range(3):
for k in range(3):
for q in range(3):
if not ((k == 1 and q == 1) or (k == 2 and q == 2)):
dp[l][r][i][j] = dp[l][r][i][j] + (dp[l][p][i][k] * dp[p + 1][r][q][j])
getmatch(strlen)
dfs(0, strlen - 1)
ans = 0
for i in range(3):
for j in range(3):
ans = (ans + dp[0][strlen - 1][i][j])
return ans
@deprecated
def main2(string=None):
@lru_cache(maxsize=None)
def getTimesCount(s: str, l: int, r: int, lc: int, rc: int) -> int:
if l >= r or s[l] != '(' or s[r] != ')':
return 0
if lc > rc:
lc, rc = rc, lc
if (lc, rc) != (BLACK, GREEN) and (lc, rc) != (BLACK, BLUE):
return 0
if r - l == 1:
return 1
ret = 0
if getTimesCount(s, l + 1, r - 1, BLACK, GREEN):
ret += getTimesCount(s, l + 1, r - 1, BLACK, GREEN) * 2 # GRGR BRGR
if getTimesCount(s, l + 1, r - 1, GREEN, BLACK):
ret += getTimesCount(s, l + 1, r - 1, GREEN, BLACK) * 2 # RGRG RGRB
if getTimesCount(s, l + 1, r - 1, BLACK, BLUE):
ret += getTimesCount(s, l + 1, r - 1, BLACK, BLUE) * 2
if getTimesCount(s, l + 1, r - 1, BLUE, BLACK):
ret += getTimesCount(s, l + 1, r - 1, BLUE, BLACK) * 2
return ret
BLACK, GREEN, BLUE = 0, 1, 2
if string is None:
string = '(())'
return getTimesCount(string, 0, len(string) - 1, BLACK, GREEN) * 2 + getTimesCount(string, 0, len(string) - 1, BLACK, BLUE) * 2
# 题目:黑绿蓝三种颜色对括号染色。有两个限定条件:一对括号有且仅有一个被染色,相邻的彩色括号的颜色不能一样。求有多少种染色方案
def main3(s=None):
def dp(index左边界, index右边界):
if index左边界 + 1 == index右边界: # 如果两个括号挨着的
times所有位置的颜色[(index左边界, index右边界, black, green)] = times所有位置的颜色[(index左边界, index右边界, green, black)] = 1 # 左括号为黑,右括号为绿的方案有1种
times所有位置的颜色[(index左边界, index右边界, black, blue)] = times所有位置的颜色[(index左边界, index右边界, blue, black)] = 1
return
if index与之匹配[index左边界] == index右边界: # 说明不需要中间拆分,向内递归就好了
dp(index左边界 + 1, index右边界 - 1) # 向内递归,把里面的算好了
for color左边界, color右边界 in product((black, green, blue), (black, blue)): # 右边界向里不能紧接着就是绿色
times所有位置的颜色[(index左边界, index右边界, black, green)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, blue), (black, green, blue)):
times所有位置的颜色[(index左边界, index右边界, green, black)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, green, blue), (black, green)):
times所有位置的颜色[(index左边界, index右边界, black, blue)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, green), (black, green, blue)):
times所有位置的颜色[(index左边界, index右边界, blue, black)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
return
# 不匹配,说明要拆分
position分割字符串 = index与之匹配[index左边界] # 找出左边的一对括号
dp(index左边界, position分割字符串) # 把这对括号拿去递归
dp(position分割字符串 + 1, index右边界) # 剩下的几个括号再拿去递归
for color最前括号左, color第二个括号右, color最前括号右, color第二个括号左 in product(*([(black, green, blue)]*4)):
if color最前括号右 == 0 or color第二个括号左 == 0 or color最前括号右 != color第二个括号左: # 只有当两个都是彩色并且一样颜色才不可以上色
times所有位置的颜色[(index左边界, index右边界, color最前括号左, color第二个括号右)] += times所有位置的颜色[(index左边界, position分割字符串, color最前括号左, color最前括号右)] * times所有位置的颜色[(position分割字符串 + 1, index右边界, color第二个括号左, color第二个括号右)]
if s is None:
s = '((()))'
black, green, blue = 0, 1, 2
index与之匹配 = [-1] * len(s)
stackTmp = []
for i, ch in enumerate(s):
if ch == '(':
stackTmp.append(i)
else:
index与之匹配[i] = stackTmp.pop()
index与之匹配[index与之匹配[i]] = i
times所有位置的颜色 = defaultdict(int)
dp(0, len(s) - 1)
return sum(times所有位置的颜色[(0, len(s) - 1, colorL, colorR)] for colorL, colorR in product((black, green, blue), (black, green, blue)))
if __name__ == '__main__':
speedtest([main, main2, main3, lambda x: 12], ['(())'])
speedtest([main, main2, main3, lambda x: 40], ['(()())'])
speedtest([main, main2, main3, lambda x: 4], ['()'])
| [
"iamjerichoholic@hotmail.com"
] | iamjerichoholic@hotmail.com |
45e57209d8ee31112c54e04ee4a86688813fdf70 | 5acc77c4d594c1750a9b7477499ee25b4c307bca | /ehpi_action_recognition/paper_reproduction_code/evaluations/lstm/test_its_journal_2019.py | d33c4f0cc31943aaa298a35c28cecd64171894e4 | [
"MIT"
] | permissive | noboevbo/ehpi_action_recognition | bc15a3c260c79b85a82844a2779c9b1ec9cf42fd | 3b77eeb5103f0f11c8d4be993ec79dddad7e661c | refs/heads/master | 2021-12-29T05:24:31.891044 | 2021-12-19T16:23:36 | 2021-12-19T16:23:36 | 180,351,212 | 113 | 23 | null | 2019-04-23T11:24:27 | 2019-04-09T11:22:45 | Python | UTF-8 | Python | false | false | 4,367 | py | import os
import numpy as np
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import NormalizeEhpi, \
RemoveJointsOutsideImgEhpi
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import transforms
from ehpi_action_recognition.config import data_dir, models_dir, ehpi_dataset_path
from ehpi_action_recognition.tester_ehpi import TesterEhpi
from ehpi_action_recognition.paper_reproduction_code.datasets.ehpi_lstm_dataset import EhpiLSTMDataset
from ehpi_action_recognition.paper_reproduction_code.models.ehpi_lstm import EhpiLSTM
def get_test_set_lab(dataset_path: str, image_size: ImageSize):
num_joints = 15
datasets = [
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE01_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE02_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_test_set_office(dataset_path: str, image_size: ImageSize):
num_joints = 15
dataset = EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_04_TEST_EVAL2_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
# ScaleEhpi(image_size),
# TranslateEhpi(image_size),
# FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST)
dataset.print_label_statistics()
return dataset
if __name__ == '__main__':
model_names = [
"ehpi_journal_2019_03_gt_seed_0_cp0200",
"ehpi_journal_2019_03_gt_seed_104_cp0200",
"ehpi_journal_2019_03_gt_seed_123_cp0200",
"ehpi_journal_2019_03_gt_seed_142_cp0200",
"ehpi_journal_2019_03_gt_seed_200_cp0200",
#
"ehpi_journal_2019_03_pose_seed_0_cp0200",
"ehpi_journal_2019_03_pose_seed_104_cp0200",
"ehpi_journal_2019_03_pose_seed_123_cp0200",
"ehpi_journal_2019_03_pose_seed_142_cp0200",
"ehpi_journal_2019_03_pose_seed_200_cp0200",
#
"ehpi_journal_2019_03_both_seed_0_cp0200",
"ehpi_journal_2019_03_both_seed_104_cp0200",
"ehpi_journal_2019_03_both_seed_123_cp0200",
"ehpi_journal_2019_03_both_seed_142_cp0200",
"ehpi_journal_2019_03_both_seed_200_cp0200",
]
# Test set
test_set = get_test_set_lab(ehpi_dataset_path, ImageSize(1280, 720))
result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "lab"))
# test_set = get_test_set_office(ImageSize(1280, 720))
# result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "office"))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
for model_name in model_names:
print("Model name: {}".format(model_name))
weights_path = os.path.join(models_dir, "{}.pth".format(model_name))
tester = TesterEhpi()
ehpi_results, seq_results = tester.test(test_loader, weights_path, model=EhpiLSTM(15, 5))
ehpi_results_np = np.array(ehpi_results, dtype=np.uint32)
seq_results_np = np.array(seq_results, dtype=np.uint32)
np.save(os.path.join(result_path, "{}_ehpis".format(model_name)), ehpi_results_np)
np.save(os.path.join(result_path, "{}_seqs".format(model_name)), seq_results_np)
| [
"Dennis.Ludl@reutlingen-university.de"
] | Dennis.Ludl@reutlingen-university.de |
7d1739054ec60843497475cf92f9a577adcaed06 | f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc | /tests/test_02_dxf_graphics/test_242_random_transform.py | 99195c39184972a1ea4c6d000ee377238a381ed9 | [
"MIT"
] | permissive | triroakenshield/ezdxf | 5652326710f2a24652605cdeae9dd6fc58e4f2eb | 82e964a574bcb86febc677bd63f1626318f51caf | refs/heads/master | 2023-08-17T12:17:02.583094 | 2021-10-09T08:23:36 | 2021-10-09T08:23:36 | 415,426,069 | 1 | 0 | MIT | 2021-10-09T21:31:25 | 2021-10-09T21:31:25 | null | UTF-8 | Python | false | false | 10,978 | py | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING
import pytest
import random
import math
from ezdxf.entities import Circle, Arc, Ellipse, Insert
from ezdxf.math import Matrix44, Vec3, linspace, X_AXIS, Y_AXIS, Z_AXIS
import ezdxf
if TYPE_CHECKING:
from ezdxf.eztypes import Drawing
UNIFORM_SCALING = [
(2, 2, 2),
(-1, 1, 1),
(1, -1, 1),
(1, 1, -1),
(-2, -2, 2),
(2, -2, -2),
(-2, 2, -2),
(-3, -3, -3),
]
NON_UNIFORM_SCALING = [
(-1, 2, 3),
(1, -2, 3),
(1, 2, -3),
(-3, -2, 1),
(3, -2, -1),
(-3, 2, -1),
(-3, -2, -1),
]
SCALING_WITHOUT_REFLEXIONS = [(1, 1, 1), (2, 2, 2), (1, 2, 3)]
def synced_scaling(entity, chk, sx=1, sy=1, sz=1):
entity = entity.copy()
entity.scale(sx, sy, sz)
chk = list(Matrix44.scale(sx, sy, sz).transform_vertices(chk))
return entity, chk
def synced_rotation(entity, chk, axis, angle):
entity = entity.copy()
entity.rotate_axis(axis, angle)
chk = list(Matrix44.axis_rotate(axis, angle).transform_vertices(chk))
return entity, chk
def synced_translation(entity, chk, dx, dy, dz):
entity = entity.copy()
entity.translate(dx, dy, dz)
chk = list(Matrix44.translate(dx, dy, dz).transform_vertices(chk))
return entity, chk
def synced_transformation(entity, chk, m: Matrix44):
entity = entity.copy()
entity.transform(m)
chk = list(m.transform_vertices(chk))
return entity, chk
@pytest.mark.parametrize("sx, sy, sz", UNIFORM_SCALING)
def test_random_circle_transformation(sx, sy, sz):
# testing only uniform scaling, for non uniform scaling
# the circle has to be converted to an ellipse
vertex_count = 8
def build():
circle = Circle()
vertices = list(
circle.vertices(linspace(0, 360, vertex_count, endpoint=False))
)
m = Matrix44.chain(
Matrix44.axis_rotate(
axis=Vec3.random(), angle=random.uniform(0, math.tau)
),
Matrix44.translate(
dx=random.uniform(-2, 2),
dy=random.uniform(-2, 2),
dz=random.uniform(-2, 2),
),
)
return synced_transformation(circle, vertices, m)
def check(circle, vertices):
# Vertex(angle=0) of old_ocs is not the vertex(angle=0) of the new OCS
# because of the arbitrary Axis algorithm.
# Checking center point:
ocs = circle.ocs()
wcs_circle_center = ocs.to_wcs(circle.dxf.center)
vertices_center = vertices[0].lerp(vertices[int(vertex_count / 2)])
assert wcs_circle_center.isclose(vertices_center, abs_tol=1e-9)
# Check distance of vertices from circle center point:
radius = circle.dxf.radius
for vtx in vertices:
assert math.isclose(
(vtx - wcs_circle_center).magnitude, radius, abs_tol=1e-9
)
# Check for parallel plane orientation
vertices_extrusion = (vertices[0] - vertices_center).cross(
(vertices[1] - vertices_center)
)
assert vertices_extrusion.is_parallel(
circle.dxf.extrusion, abs_tol=1e-9
)
# test transformed circle against transformed WCS vertices of the circle
for _ in range(10):
circle0, vertices0 = build()
check(circle0, vertices0)
check(*synced_scaling(circle0, vertices0, sx, sy, sz))
@pytest.mark.parametrize("sx, sy, sz", UNIFORM_SCALING)
def test_random_arc_transformation(sx, sy, sz):
# testing only uniform scaling, for non uniform scaling
# the circle has to be converted to an ellipse
vertex_count = 8
def build():
arc = Arc.new(
dxfattribs={
"start_angle": random.uniform(0, 360),
"end_angle": random.uniform(0, 360),
}
)
vertices = list(arc.vertices(arc.angles(vertex_count)))
m = Matrix44.chain(
Matrix44.axis_rotate(
axis=Vec3.random(), angle=random.uniform(0, math.tau)
),
Matrix44.translate(
dx=random.uniform(-2, 2),
dy=random.uniform(-2, 2),
dz=random.uniform(-2, 2),
),
)
return synced_transformation(arc, vertices, m)
def check(arc, vertices):
arc_vertices = arc.vertices(arc.angles(vertex_count))
for vtx, chk in zip(arc_vertices, vertices):
assert vtx.isclose(chk, abs_tol=1e-9)
for _ in range(10):
arc0, vertices0 = build()
check(arc0, vertices0)
check(*synced_scaling(arc0, vertices0, sx, sy, sz))
# Error conditions detected by github actions:
# ------------------------------------------------------------------------------
# 1. Ellipse transformation error condition failed on linux/CPython 3.7.10,
# the random transformation is not known, the difference was much more than
# just a precision issue:
# Vec3(-0.3732124613203121, 3.932218038441924, -1.133607572247806) >
# Vec3(-0.0036743564192640576, 5.229843031953656, 0.8619764018600419)
# ------------------------------------------------------------------------------
# @pytest.mark.parametrize('sx,sy,sz,start,end', [
# (-1, 2, 3, 5.759586531581287, 0.5235987755982988) # 1.
# ])
@pytest.mark.parametrize("sx, sy, sz", UNIFORM_SCALING + NON_UNIFORM_SCALING)
@pytest.mark.parametrize(
"start, end",
[
# closed ellipse fails at non uniform scaling test, because no start-
# and end param adjustment is applied, so generated vertices do not
# match test vertices.
(0, math.pi), # half ellipse as special case
(math.pi / 6, math.pi / 6 * 11), # start < end
(math.pi / 6 * 11, math.pi / 6), # start > end
],
)
def test_random_ellipse_transformations(sx, sy, sz, start, end):
vertex_count = 8
def build(angle, dx, dy, dz, axis):
ellipse = Ellipse.new(
dxfattribs={
"start_param": start,
"end_param": end,
}
)
vertices = list(ellipse.vertices(ellipse.params(vertex_count)))
m = Matrix44.chain(
Matrix44.axis_rotate(axis=axis, angle=angle),
Matrix44.translate(dx=dx, dy=dy, dz=dz),
)
return synced_transformation(ellipse, vertices, m)
def check(ellipse, vertices):
ellipse_vertices = list(ellipse.vertices(ellipse.params(vertex_count)))
# Ellipse vertices may appear in reverse order
if not vertices[0].isclose(ellipse_vertices[0], abs_tol=1e-5):
ellipse_vertices.reverse()
for vtx, chk in zip(ellipse_vertices, vertices):
assert vtx.isclose(chk, abs_tol=1e-5) is True, config
for _ in range(10):
angle = random.uniform(0, math.tau)
dx = random.uniform(-2, 2)
dy = random.uniform(-2, 2)
dz = random.uniform(-2, 2)
axis = Vec3.random()
config = (
f"CONFIG sx={sx}, sy={sy}, sz={sz}; "
f"start={start:.4f}, end={end:.4f}; "
f"angle={angle}; dx={dx}, dy={dy}, dz={dz}; axis={str(axis)}"
)
ellipse0, vertices0 = build(angle, dx, dy, dz, axis)
check(ellipse0, vertices0)
check(*synced_scaling(ellipse0, vertices0, sx, sy, sz))
@pytest.fixture(scope="module")
def doc1() -> "Drawing":
doc = ezdxf.new()
blk = doc.blocks.new("AXIS")
blk.add_line((0, 0, 0), X_AXIS, dxfattribs={"color": 1})
blk.add_line((0, 0, 0), Y_AXIS, dxfattribs={"color": 3})
blk.add_line((0, 0, 0), Z_AXIS, dxfattribs={"color": 5})
return doc
@pytest.mark.parametrize("sx, sy, sz", UNIFORM_SCALING + NON_UNIFORM_SCALING)
def test_random_block_reference_transformation(sx, sy, sz, doc1: "Drawing"):
def insert():
return (
Insert.new(
dxfattribs={
"name": "AXIS",
"insert": (0, 0, 0),
"xscale": 1,
"yscale": 1,
"zscale": 1,
"rotation": 0,
"layer": "insert",
},
doc=doc1,
),
[Vec3(0, 0, 0), X_AXIS, Y_AXIS, Z_AXIS],
)
def check(lines, chk):
origin, x, y, z = chk
l1, l2, l3 = lines
assert origin.isclose(l1.dxf.start)
assert x.isclose(l1.dxf.end)
assert origin.isclose(l2.dxf.start)
assert y.isclose(l2.dxf.end)
assert origin.isclose(l3.dxf.start)
assert z.isclose(l3.dxf.end)
entity0, vertices0 = insert()
entity0, vertices0 = synced_scaling(entity0, vertices0, 1, 2, 3)
m = Matrix44.chain(
# Transformation order is important: scale - rotate - translate
# Because scaling after rotation leads to a non orthogonal
# coordinate system, which can not represented by the
# INSERT entity.
Matrix44.scale(sx, sy, sz),
Matrix44.axis_rotate(
axis=Vec3.random(), angle=random.uniform(0, math.tau)
),
Matrix44.translate(
dx=random.uniform(-2, 2),
dy=random.uniform(-2, 2),
dz=random.uniform(-2, 2),
),
)
entity, vertices = synced_transformation(entity0, vertices0, m)
lines = list(entity.virtual_entities())
check(lines, vertices)
@pytest.mark.parametrize(
"sx, sy, sz",
[
# Non uniform scaling will throw InsertTransformationError(),
# because this multiple applied transformations cause non orthogonal
# target coordinate systems, which can not represented by the INSERT entity.
(1.1, 1.1, 1.1),
(-1.1, -1.1, -1.1),
(-1.1, 1.1, 1.1),
(1.1, -1.1, 1.1),
(1.1, 1.1, -1.1),
(-1.1, -1.1, 1.1),
(1.1, -1.1, -1.1),
(-1.1, 1.1, -1.1),
],
)
def test_apply_transformation_multiple_times(sx, sy, sz, doc1: "Drawing"):
def insert():
return (
Insert.new(
dxfattribs={
"name": "AXIS",
"insert": (0, 0, 0),
"xscale": 1,
"yscale": 1,
"zscale": 1,
"rotation": 0,
},
doc=doc1,
),
[(0, 0, 0), X_AXIS, Y_AXIS, Z_AXIS],
)
entity, vertices = insert()
m = Matrix44.chain(
Matrix44.scale(sx, sy, sz),
Matrix44.z_rotate(math.radians(10)),
Matrix44.translate(1, 1, 1),
)
for i in range(5):
entity, vertices = synced_transformation(entity, vertices, m)
points = list(vertices)
for num, line in enumerate(entity.virtual_entities()):
assert points[0].isclose(line.dxf.start, abs_tol=1e-6)
assert points[num + 1].isclose(line.dxf.end, abs_tol=1e-6)
if __name__ == "__main__":
pytest.main([__file__])
| [
"me@mozman.at"
] | me@mozman.at |
be65023c8a0ce8f41a32a0bcfb746bae3966244d | 526bf18a8695862067c817f432ab197ceb645f39 | /scrappers/bfs/leafly/leafly.py | 83e4f3c83d6395e3ff58f83ebdb6291aecef4be3 | [] | no_license | sintimaski/bfs-be | a7fd623911a2220face49a0ef84574f3fd7a09a8 | 964a9c7e9cc876aaf8b0723d6b3f26bd378c3721 | refs/heads/master | 2023-08-02T09:00:44.855055 | 2021-09-22T13:07:01 | 2021-09-22T13:07:01 | 339,531,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,263 | py | import json
import cloudscraper
import random
from core.db_connector import db
from core.models import Business
from core.proxies import proxies
class LeaflyScrapper:
def __init__(self):
self.source = "leafly"
self.project = "encycloweedia"
def start_scrapping(self):
page = 1
limit = 100
api_link = (
"https://web-finder.leafly.com/api/get-dispensaries?"
"userLat=36.1699412&userLon=-115.1398296&countryCode=US&"
"retailType=dispensary&sort=default&geoQueryType=point&"
"radius=1000000.109690296341793mi&page={}&limit={}&"
"strainFilters=true"
)
failed = []
while True:
scraper = cloudscraper.create_scraper()
resp = scraper.get(api_link.format(page, limit))
data = json.loads(resp.text)
stores = data.get("stores", [])
if not len(stores):
break
store_num = 0
for store in stores:
print(f"store {store_num}/{len(stores)}")
store_num += 1
result = self.get_dispensary(store)
if "error" in result:
failed.append(store["slug"])
print(f"{page}/{data['pageCount']} pages. Limit {limit}.")
page += 1
def get_dispensary(self, data):
try:
base_url = "https://www.leafly.com/dispensary-info/{}"
web_url = base_url.format(data["slug"])
scraper = cloudscraper.create_scraper()
proxy_index = random.randint(0, len(proxies) - 1)
proxy = {
"http": proxies[proxy_index],
"https": proxies[proxy_index],
}
resp = scraper.get(web_url, proxies=proxy)
if resp.status_code != 200:
return
next_data_text = resp.text.split(
'<script id="__NEXT_DATA__" type="application/json">', 1
)[1]
next_data_text = next_data_text.split("</script>", 1)[0]
next_data = json.loads(next_data_text)
props = next_data.get("props", {})
page_props = props.get("pageProps", {})
dispensary = page_props.get("dispensary", {})
geolocation = page_props.get("geolocation", {})
subcategory = (
"medical"
if data["medical"]
else ("recreational" if data["recreational"] else "")
)
source_name__id = f"{self.source}_{data['id']}"
result_data = {
"source_name__id": source_name__id,
"project": self.project,
"name": data["name"],
"source": self.source,
"category": data["retailType"],
"subcategory": subcategory,
"business_hours": data["schedule"],
"web_url": web_url,
"slug": data["slug"],
"website": dispensary["website"],
"email": dispensary["email"],
"phone": data["phone"],
"country": geolocation["country_code"],
"state": geolocation["state_code"],
"city": data["city"],
"address": data["address1"],
"address_2": data["address2"],
"zip": data["zip"],
"lat": data["primaryLocation"]["lat"],
"lng": data["primaryLocation"]["lon"],
"about": dispensary["description"],
}
existing = Business.query.filter(
Business.source_name__id == source_name__id
).first()
if existing:
for key, value in result_data.items():
setattr(existing, key, value)
db.session.commit()
else:
new_business = Business(**result_data)
db.session.add(new_business)
db.session.commit()
return {"success": data["slug"]}
except Exception as e:
return {"error": data["slug"]}
| [
"dimadrebezov@gmail.com"
] | dimadrebezov@gmail.com |
63435c029b1067a7237f109e533698e5d3667d12 | 6d7507b0695c5f704f1367604370f52a1cd60fe6 | /testfarm/test_program/app/honor/teacher/home/vanclass/test_cases/test014_vanclass_paper_list_and_tab.py | 9da7c040e8b157fd8f0d28b92f4672b80efd0bdc | [] | no_license | sj542484/test | f88b1f0524e853b24759de1bc8019a643bf11dcc | 908bef52867e3944b76898cfcc018fa403202815 | refs/heads/master | 2022-04-09T17:18:40.847936 | 2020-03-25T07:30:55 | 2020-03-25T07:30:55 | 194,576,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,958 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : SUN FEIFEI
import re
import sys
import unittest
from app.honor.teacher.login.object_page.login_page import TloginPage
from app.honor.teacher.home.dynamic_info.object_page.paper_detail_page import PaperReportPage
from app.honor.teacher.home.vanclass.object_page.home_page import ThomePage
from app.honor.teacher.home.vanclass.object_page.vanclass_paper_page import VanclassPaperPage
from app.honor.teacher.home.vanclass.object_page.vanclass_detail_page import VanclassDetailPage
from app.honor.teacher.home.vanclass.test_data.vanclass_data import GetVariable as gv
from conf.base_page import BasePage
from conf.decorator import setup, teardown, testcase, teststeps
from utils.assert_func import ExpectingTest
from utils.assert_package import MyToast
from utils.get_attribute import GetAttribute
from utils.vue_context import VueContext
class VanclassPaper(unittest.TestCase):
"""卷子列表 & 答卷分析/完成情况tab"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.ass_result = unittest.TestResult()
cls.ass = ExpectingTest(cls, cls.ass_result)
cls.login = TloginPage()
cls.home = ThomePage()
cls.van_detail = VanclassDetailPage()
cls.van_paper = VanclassPaperPage()
cls.report = PaperReportPage()
cls.get = GetAttribute()
cls.vue = VueContext()
cls.my_toast = MyToast()
BasePage().set_assert(cls.ass)
@teardown
def tearDown(self):
self.vue.switch_app() # 切回apk
self.login.tearDown(self.ass, self.my_toast, self.ass_result) # 统计错误情况
def run(self, result=None):
self.ass_result = result
super(VanclassPaper, self).run(result)
@testcase
def test_paper_list_tab(self):
self.login.app_status() # 判断APP当前状态
self.name = self.__class__.__name__ + '_' + sys._getframe().f_code.co_name # 文件名 + 类名
self.assertTrue(self.home.wait_check_page(), self.home.home_tips)
self.home.into_vanclass_operation(gv.VANCLASS) # 进入 班级详情页
self.assertTrue(self.van_detail.wait_check_app_page(gv.VANCLASS), self.van_detail.van_tips) # 页面检查点
self.vue.switch_h5() # 切到vue
self.assertTrue(self.van_detail.wait_check_page(gv.VANCLASS), self.van_detail.van_vue_tips)
self.van_detail.vanclass_paper() # 进入 本班卷子
self.vue.app_web_switch() # 切到apk 再切回web
title = gv.PAPER_TITLE.format(gv.VANCLASS)
self.assertTrue(self.van_paper.wait_check_page(title), self.van_paper.paper_tips) # 页面检查点
if self.van_paper.wait_check_empty_tips_page():
self.assertFalse(self.van_paper.wait_check_empty_tips_page(), '★★★ Error-班级试卷为空, {}')
else:
self.assertTrue(self.van_paper.wait_check_list_page(), self.van_paper.paper_list_tips)
print('本班试卷:')
count = []
name = self.van_paper.hw_name() # 试卷name
progress = self.van_paper.progress() # 进度
for i in range(len(name)):
create = progress[i].text
pro = int(re.sub("\D", "", create.split()[-1])[0])
var = name[i].text
if pro != 0 and '试卷' in self.home.brackets_text_in(var):
count.append(i)
name[i].click() # 进入试卷
self.vue.app_web_switch() # 切到apk 再切回web
print('###########################################################')
print('试卷:', var, '\n', create)
self.finish_situation_operation() # 完成情况 tab
self.answer_analysis_operation() # 答卷分析 tab
if self.report.wait_check_page(): # 页面检查点
self.van_detail.back_up_button() # 返回 本班卷子
break
self.assertFalse(len(count)==0, '暂无试卷或者暂无学生完成该试卷')
self.vue.app_web_switch() # 切到apk 再切到vue
self.assertTrue(self.van_paper.wait_check_page(title), self.van_paper.paper_tips) # 页面检查点
self.van_paper.back_up_button() # 返回 班级详情页面
self.vue.app_web_switch() # 切到apk 再切到vue
self.assertTrue(self.van_detail.wait_check_page(gv.VANCLASS), self.van_detail.van_vue_tips) # 班级详情 页面检查点
self.van_detail.back_up_button() # 返回主界面
@teststeps
def finish_situation_operation(self):
"""完成情况tab 具体操作"""
self.assertTrue(self.report.wait_check_page(), self.report.paper_detail_tips)
print('-------------------完成情况tab-------------------')
if self.report.wait_check_empty_tips_page():
self.assertTrue(self.report.wait_check_empty_tips_page(), '暂无数据')
print('暂无数据')
else:
self.assertTrue(self.report.wait_check_st_list_page(), self.report.st_list_tips)
self.st_list_statistics() # 完成情况 学生列表
@teststeps
def answer_analysis_operation(self):
"""答卷分析tab 具体操作"""
self.assertTrue(self.report.wait_check_page(), self.report.paper_detail_tips)
self.report.analysis_tab() # 进入 答卷分析 tab页
print('-------------------答卷分析tab-------------------')
if self.report.wait_check_empty_tips_page():
print('暂无数据')
self.assertTrue(self.report.wait_check_empty_tips_page(), '暂无数据')
else:
self.assertTrue(self.report.wait_check_paper_list_page(), self.report.hw_list_tips)
self.answer_analysis_detail() # 答卷分析页 list
@teststeps
def answer_analysis_detail(self):
"""答卷分析 详情页"""
mode = self.report.game_type() # 游戏类型
name = self.report.game_name() # 游戏name
average = self.report.van_average_achievement() # 全班平均得分x分; 总分x分
for j in range(len(average)):
print(mode[j].text, name[j].text, '\n',
average[j].text)
print('----------------------')
@teststeps
def st_list_statistics(self):
"""已完成/未完成 学生列表信息统计"""
name = self.report.st_name() # 学生name
icon = self.report.st_icon() # 学生头像
status = self.report.st_score() # 学生完成与否
if len(name) == len(icon) == len(status):
for i in range(len(name)):
print('学生:', name[i].text, ' ', status[i].text) # 打印所有学生信息
else:
print('★★★ Error-已完成/未完成 学生列表信息统计', len(icon), len(name))
| [
"18330245071@163.com"
] | 18330245071@163.com |
5cd8e4ddca0ba3aac72d705023c9812f11cba524 | 4ae7a930ca6aa629aa57df7764665358ee70ffac | /examples/ml/mlflow/california_with_mlflow.py | a369a793c618a79f2b6e6a6938d768be66c989a7 | [
"MIT"
] | permissive | carefree0910/carefree-learn | 0ecc7046ef0ab44a642ff0a72a181c4cb5037571 | 554bf15c5ce6e3b4ee6a219f348d416e71d3972f | refs/heads/dev | 2023-08-23T07:09:56.712338 | 2023-08-23T02:49:10 | 2023-08-23T02:49:10 | 273,041,593 | 451 | 38 | MIT | 2021-01-05T10:49:46 | 2020-06-17T17:44:17 | Python | UTF-8 | Python | false | false | 1,104 | py | # type: ignore
# This example requires the `mlflow` package
import cflearn
from cflearn.data.ml import california_dataset
from cflearn.misc.toolkit import check_is_ci
from cflearn.misc.toolkit import seed_everything
seed_everything(123)
x, y = california_dataset()
y = (y - y.mean()) / y.std()
config = cflearn.MLConfig(
model_name="fcnn",
model_config=dict(input_dim=x.shape[1], output_dim=1),
loss_name="multi_task",
loss_config=dict(loss_names=["mae", "mse"]),
callback_names="mlflow",
)
block_names = ["ml_recognizer", "ml_preprocessor", "ml_splitter"]
m = cflearn.api.fit_ml(
x,
y,
config=config,
processor_config=cflearn.MLAdvancedProcessorConfig(block_names),
debug=check_is_ci(),
)
loader = m.data.build_loader(x, y)
print("> metrics", m.evaluate(loader))
# After running the above codes, you should be able to
# see a `mlruns` folder in your current working dir.
# By executing `mlflow server`, you should be able to
# see those fancy metric curves (loss, lr, mae, mse,
# training loss, etc.) with a nice web interface
# at http://127.0.0.1:5000!
| [
"syameimaru.saki@gmail.com"
] | syameimaru.saki@gmail.com |
9ab5d72809b7086e7bd4c7e64e900d1b1d153617 | c9952dcac5658940508ddc139344a7243a591c87 | /tests/lab03/test_ch03_t02_get_current_date_time.py | e7483fb0ce4619ff230b0400ed999739b99f4e6e | [] | no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 516 | py | import unittest
from tests.unit_test_helper.console_test_helper import *
class TestOutput(unittest.TestCase):
def test(self):
temp_globals, temp_locals, content, output = execfile("lab03/ch03_t02_get_current_date_time.py")
print(temp_locals)
self.assertIsNotNone(temp_locals['datetime'])
def test_output(self):
result = get_script_output("lab03/ch03_t02_get_current_date_time.py")
self.assertEqual(27, len(result))
if __name__ == '__main__':
unittest.main()
| [
"cywong@vtc.edu.hk"
] | cywong@vtc.edu.hk |
66f4264f5c8550c4b2a2691d5eaffb11f8d201ff | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part12-introduction-to-Databases/No12-Ordering-in-Descending-Order-by-a-Single-Column.py | daee5158e0690eff17495df10d745b269a7fd994 | [] | no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #Ordering in Descending Order by a Single Column
'''
You can also use .order_by() to sort from highest to lowest by wrapping a column in the desc() function. Although you haven't seen this function in action, it generalizes what you have already learned.
Pass desc() (for "descending") inside an .order_by() with the name of the column you want to sort by. For instance, stmt.order_by(desc(table.columns.column_name)) sorts column_name in descending order.
Instructions
100 XP
Import desc from the sqlalchemy module.
Select all records of the state column from the census table.
Append an .order_by() to sort the result output by the state column in descending order. Save the result as rev_stmt.
Execute rev_stmt using connection.execute() and fetch all the results with .fetchall(). Save them as rev_results.
Print the first 10 rows of rev_results.
'''
# Code
# Import desc
from sqlalchemy import desc
# Build a query to select the state column: stmt
stmt = select([census.columns.state])
# Order stmt by state in descending order: rev_stmt
rev_stmt = stmt.order_by(desc(census.columns.state))
# Execute the query and store the results: rev_results
rev_results = connection.execute(rev_stmt).fetchall()
# Print the first 10 rev_results
print(rev_results[:10])
'''result
[('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',)]
''' | [
"beiran@hotmail.com"
] | beiran@hotmail.com |
0ecbb7faa59bee8223728ff8922def5f28cd51c7 | db69a3e20ec69bd8a08ed14ec6193a08e543965d | /mars/deploy/kubedl/client.py | 79d596dd3ecc1650e83dd03a67f28d9b1d2f6165 | [
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | Fernadoo/mars | dbf62cd6f3ff82e3c399f1c06c6da1681b462856 | f8e93edeecbe184b018cd1d0d948b3a2ba74bee6 | refs/heads/master | 2023-08-12T12:48:33.726883 | 2021-09-29T14:29:18 | 2021-09-29T14:29:18 | 377,359,795 | 0 | 1 | Apache-2.0 | 2021-09-29T14:29:19 | 2021-06-16T03:29:08 | Python | UTF-8 | Python | false | false | 11,392 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import warnings
import requests
from ...session import new_session
from .config import MarsSchedulerSpecConfig, MarsWorkerSpecConfig, MarsWebSpecConfig, \
MarsJobConfig
try:
from kubernetes.client.rest import ApiException as K8SApiException
except ImportError: # pragma: no cover
K8SApiException = None
KUBEDL_API_VERSION = 'kubedl.io/v1alpha1'
KUBEDL_MARS_PLURAL = 'marsjobs'
logger = logging.getLogger(__name__)
class KubeDLClusterClient:
def __init__(self, cluster):
self._cluster = cluster
self._endpoint = None
self._session = None
@property
def endpoint(self):
return self._endpoint
@property
def namespace(self):
return self._cluster.namespace
@property
def session(self):
return self._session
def start(self):
self._endpoint = self._cluster.start()
self._session = new_session(self._endpoint, verify_ssl=self._cluster.verify_ssl)
def stop(self, wait=False, timeout=0):
self._cluster.stop(wait=wait, timeout=timeout)
class KubeDLCluster:
def __init__(self, kube_api_client=None, image=None, job_name=None, namespace=None,
scheduler_num=1, scheduler_cpu=None, scheduler_mem=None,
worker_num=1, worker_cpu=None, worker_mem=None, worker_spill_paths=None,
worker_cache_mem=None, min_worker_num=None,
web_num=1, web_cpu=None, web_mem=None,
slb_endpoint=None, verify_ssl=True, timeout=None, **kwargs):
from kubernetes import client as kube_client
self._kube_api_client = kube_api_client
self._custom_api = kube_client.CustomObjectsApi(kube_api_client)
self._slb_endpoint = slb_endpoint.rstrip("/")
self._verify_ssl = verify_ssl
self._job_name = job_name
self._mars_endpoint = None
self._namespace = namespace or 'default'
self._image = image
self._timeout = timeout
self._extra_volumes = kwargs.pop('extra_volumes', ())
self._pre_stop_command = kwargs.pop('pre_stop_command', None)
self._log_when_fail = kwargs.pop('log_when_fail', False)
self._node_selectors = kwargs.pop('node_selectors', None)
extra_modules = kwargs.pop('extra_modules', None) or []
extra_modules = extra_modules.split(',') if isinstance(extra_modules, str) \
else extra_modules
extra_envs = kwargs.pop('extra_env', None) or dict()
if not verify_ssl:
extra_envs['KUBE_VERIFY_SSL'] = '0'
def _override_modules(updates):
modules = set(extra_modules)
updates = updates.split(',') if isinstance(updates, str) \
else updates
modules.update(updates)
return sorted(modules)
def _override_envs(updates):
ret = extra_envs.copy()
ret.update(updates)
return ret
self._scheduler_num = scheduler_num
self._scheduler_cpu = scheduler_cpu
self._scheduler_mem = scheduler_mem
self._scheduler_extra_modules = _override_modules(kwargs.pop('scheduler_extra_modules', []))
self._scheduler_extra_env = _override_envs(kwargs.pop('scheduler_extra_env', None) or dict())
self._worker_num = worker_num
self._worker_cpu = worker_cpu
self._worker_mem = worker_mem
self._worker_spill_paths = worker_spill_paths
self._worker_cache_mem = worker_cache_mem
self._min_worker_num = min_worker_num or worker_num
self._worker_extra_modules = _override_modules(kwargs.pop('worker_extra_modules', []))
self._worker_extra_env = _override_envs(kwargs.pop('worker_extra_env', None) or dict())
self._web_num = web_num
self._web_cpu = web_cpu
self._web_mem = web_mem
self._web_extra_modules = _override_modules(kwargs.pop('web_extra_modules', []))
self._web_extra_env = _override_envs(kwargs.pop('web_extra_env', None) or dict())
@property
def verify_ssl(self):
return self._verify_ssl
def _check_if_exist(self):
if self._job_name is None:
return False
try:
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
service_obj = self._custom_api.get_namespaced_custom_object_status(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
if len(service_obj.get('status', dict()).get('conditions', [])) > 0:
status = service_obj['status']['conditions'][-1]['type']
if status == 'Running' or status == 'Created':
logger.warning(f'Reusing cluster: {self._job_name}')
return True
else:
return False
else:
return False
except K8SApiException:
return False
def _create_service(self):
scheduler_cfg = MarsSchedulerSpecConfig(
self._image, self._scheduler_num, cpu=self._scheduler_cpu, memory=self._scheduler_mem,
node_selectors=self._node_selectors, modules=self._scheduler_extra_modules,
)
scheduler_cfg.add_simple_envs(self._scheduler_extra_env)
worker_cfg = MarsWorkerSpecConfig(
self._image, self._worker_num, cpu=self._worker_cpu, memory=self._worker_mem,
cache_mem=self._worker_cache_mem, spill_dirs=self._worker_spill_paths,
node_selectors=self._node_selectors, modules=self._worker_extra_modules
)
worker_cfg.add_simple_envs(self._worker_extra_env)
web_cfg = MarsWebSpecConfig(
self._image, self._web_num, cpu=self._web_cpu, memory=self._web_mem,
node_selectors=self._node_selectors, modules=self._web_extra_modules
)
web_cfg.add_simple_envs(self._web_extra_env)
job_cfg = MarsJobConfig(
job_name=self._job_name, scheduler_config=scheduler_cfg, worker_config=worker_cfg,
web_config=web_cfg, web_host=self._slb_endpoint
)
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
cfg_json = job_cfg.build()
cfg_json['apiVersion'] = KUBEDL_API_VERSION
response = self._custom_api.create_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, cfg_json)
self._job_name = response['metadata']['name']
def _wait_service_ready(self):
self._mars_endpoint = f'{self._slb_endpoint}/mars/{self._namespace}/{self._job_name}-webservice-0'
logger.warning(f'Kubedl job name: {self._job_name}')
check_start_time = time.time()
worker_count_url = self._mars_endpoint + '/api/worker?action=count'
while True:
try:
if self._timeout and time.time() - check_start_time > self._timeout:
raise TimeoutError('Check Mars service start timeout')
if not self._verify_ssl:
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError: # pragma: no cover
pass
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
service_obj = self._custom_api.get_namespaced_custom_object_status(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
if len(service_obj.get('status', dict()).get('conditions', [])) > 0:
if service_obj['status']['conditions'][-1]['type'] == 'Failed':
raise SystemError(service_obj['status']['conditions'][-1]['message'])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
resp = requests.get(worker_count_url, timeout=1, verify=self._verify_ssl)
if int(resp.text) >= self._min_worker_num:
logger.warning(f'Web endpoint started at {self._mars_endpoint}')
break
except (requests.Timeout, ValueError) as ex:
if not isinstance(ex, requests.Timeout):
time.sleep(0.1)
pass
def start(self):
try:
if not self._check_if_exist():
self._create_service()
self._wait_service_ready()
return self._mars_endpoint
except: # noqa: E722
self.stop()
raise
def stop(self, wait=False, timeout=0):
from kubernetes import client as kube_client
custom_api = kube_client.CustomObjectsApi(self._kube_api_client)
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
custom_api.delete_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
if wait:
start_time = time.time()
while True:
try:
custom_api.get_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
except K8SApiException as ex:
if ex.status != 404: # pragma: no cover
raise
break
else:
time.sleep(1)
if timeout and time.time() - start_time > timeout: # pragma: no cover
raise TimeoutError('Check Mars service stop timeout')
def new_cluster(kube_api_client=None, image=None, scheduler_num=1, scheduler_cpu=2,
scheduler_mem=4 * 1024 ** 3, worker_num=1, worker_cpu=8, worker_mem=32 * 1024 ** 3,
worker_spill_paths=None, worker_cache_mem='45%', min_worker_num=None,
web_num=1, web_cpu=1, web_mem=4 * 1024 ** 3, slb_endpoint=None, verify_ssl=True,
job_name=None, timeout=None, **kwargs):
worker_spill_paths = worker_spill_paths or ['/tmp/spill-dir']
cluster = KubeDLCluster(kube_api_client, image=image, scheduler_num=scheduler_num,
scheduler_cpu=scheduler_cpu, scheduler_mem=scheduler_mem,
worker_num=worker_num, worker_cpu=worker_cpu, worker_mem=worker_mem,
worker_spill_paths=worker_spill_paths, worker_cache_mem=worker_cache_mem,
min_worker_num=min_worker_num, web_num=web_num, web_cpu=web_cpu,
web_mem=web_mem, slb_endpoint=slb_endpoint, verify_ssl=verify_ssl,
job_name=job_name, timeout=timeout, **kwargs)
client = KubeDLClusterClient(cluster)
client.start()
return client
| [
"noreply@github.com"
] | Fernadoo.noreply@github.com |
c626a31052ba0bf40912b3c89ebde89a31bf2a7e | a64089402e4c265319f69b126ec89512105d0e78 | /chainer/distributions/exponential.py | 5f4274fa49b83e12e32a3b5a920007ea5636bdab | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lehy/chainer | 86c46e36e9a9414349137a87f56afc6ebb735f46 | 007f86fdc68d9963a01f9d9230e004071a1fcfb2 | refs/heads/master | 2020-04-03T21:53:59.915980 | 2018-10-31T16:09:12 | 2018-10-31T16:22:20 | 155,586,089 | 0 | 0 | MIT | 2018-10-31T16:05:37 | 2018-10-31T16:05:36 | null | UTF-8 | Python | false | false | 2,414 | py | import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import exponential_m1
from chainer.functions.math import logarithm_1p
class Exponential(distribution.Distribution):
"""Exponential Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\lambda) = \\lambda e^{-\\lambda x}
Args:
lam(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution :math:`\\lambda`.
"""
def __init__(self, lam):
super(Exponential, self).__init__()
self.__lam = chainer.as_variable(lam)
@property
def lam(self):
return self.__lam
@property
def batch_shape(self):
return self.lam.shape
def cdf(self, x):
return - exponential_m1.expm1(-self.lam * x)
@property
def entropy(self):
return 1 - exponential.log(self.lam)
@property
def event_shape(self):
return ()
def icdf(self, x):
x = chainer.as_variable(x)
return -1 / self.lam * logarithm_1p.log1p(-x)
@property
def _is_gpu(self):
return isinstance(self.lam.data, cuda.ndarray)
def log_prob(self, x):
logp = exponential.log(self.lam) - self.lam * x
xp = logp.xp
if isinstance(x, chainer.Variable):
x = x.array
inf = xp.full_like(logp.array, xp.inf)
return where.where(xp.asarray(x >= 0), logp, xp.asarray(-inf))
@property
def mean(self):
return 1 / self.lam
def sample_n(self, n):
xp = cuda.get_array_module(self.lam)
if xp is cuda.cupy:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape, dtype=self.lam.dtype)
else:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape).astype(self.lam.dtype)
noise = eps / self.lam
return noise
@property
def support(self):
return 'positive'
@property
def variance(self):
return 1 / self.lam ** 2
@distribution.register_kl(Exponential, Exponential)
def _kl_exponential_exponential(dist1, dist2):
return exponential.log(dist1.lam) - exponential.log(dist2.lam) \
+ dist2.lam / dist1.lam - 1.
| [
"yoshikawa@preferred.jp"
] | yoshikawa@preferred.jp |
9a1205ffcd8780e7f8160ba2f5fa17e38a4537c4 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/servico/migrations/0026_numerodocumento_status.py | a98891ce1975c237645961287f635c3604f36fd9 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-21 18:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('servico', '0025_eventodestatus_status_pre_pos'),
]
operations = [
migrations.AlterField(
model_name='numerodocumento',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='servico.StatusDocumento'),
),
]
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
c681389bc526c21ef3d1837e9d67227023d2f7ee | af3e249753fbf04ce10a01e4dbeab549cb4ae34d | /oscar/core/ajax.py | 988307bb76a4b1b63a798bccf15c325dabfbcdb1 | [] | no_license | rwozniak72/sklep_oscar_test | 79588b57470c9245324cc5396aa472192953aeda | fb410dc542e6cb4deaf870b3e7d5d22ca794dc29 | refs/heads/master | 2020-08-12T04:55:25.084998 | 2019-10-16T21:14:08 | 2019-10-16T21:14:08 | 214,692,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from django.contrib import messages
class FlashMessages(object):
"""
Intermediate container for flash messages.
This is useful as, at the time of creating the message, we don't know
whether the response is an AJAX response or not.
"""
def __init__(self):
self.msgs = {}
def add_message(self, level, message):
self.msgs.setdefault(level, []).append(message)
def add_messages(self, level, messages):
for msg in messages:
self.add_message(level, msg)
def info(self, message):
self.add_message(messages.INFO, message)
def warning(self, message):
self.add_message(messages.WARNING, message)
def error(self, message):
self.add_message(messages.ERROR, message)
def success(self, message):
self.add_message(messages.SUCCESS, message)
def as_dict(self):
payload = {}
for level, msgs in self.msgs.items():
tag = messages.DEFAULT_TAGS.get(level, 'info')
payload[tag] = [str(msg) for msg in msgs]
return payload
def apply_to_request(self, request):
for level, msgs in self.msgs.items():
for msg in msgs:
messages.add_message(request, level, msg)
| [
"rwozniak.esselte@gmail.com"
] | rwozniak.esselte@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.