code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
compressor = lzma.LZMACompressor( check=lzma.CHECK_CRC64, filters=[ {"id": lzma.FILTER_X86}, {"id": lzma.FILTER_LZMA2, "preset": lzma.PRESET_DEFAULT}, ]) for block in src: encoded = compressor.compress(block) if encoded: yield encoded yield compressor.flush()
def xz_compress_stream(src)
Compress data from `src`. Args: src (iterable): iterable that yields blocks of data to compress Yields: blocks of compressed data
2.81119
3.143712
0.894226
dec = lzma.LZMADecompressor() for block in src: decoded = dec.decompress(block) if decoded: yield decoded if dec.unused_data: # pragma: nocover; can't figure out how to test this raise IOError('Read unused data at end of compressed stream')
def xz_decompress_stream(src)
Decompress data from `src`. Args: src (iterable): iterable that yields blocks of compressed data Yields: blocks of uncompressed data
4.84907
5.762189
0.841533
block = next(src) compression = guess_compression(block) if compression == 'bz2': src = bz2_decompress_stream(chain([block], src)) elif compression == 'xz': src = xz_decompress_stream(chain([block], src)) else: src = chain([block], src) for block in src: yield block
def auto_decompress_stream(src)
Decompress data from `src` if required. If the first block of `src` appears to be compressed, then the entire stream will be uncompressed. Otherwise the stream will be passed along as-is. Args: src (iterable): iterable that yields blocks of data Yields: blocks of uncompressed data
2.715586
2.781503
0.976302
path = os.path.abspath(path) dirname = os.path.abspath(dirname) while len(path) >= len(dirname): if path == dirname: return True newpath = os.path.dirname(path) if newpath == path: return False path = newpath return False
def path_is_inside(path, dirname)
Return True if path is under dirname.
1.865071
1.75518
1.06261
# TODO: do we really want to be absolute here? base = os.path.abspath(base) path = os.path.join(base, *elements) path = os.path.normpath(path) if not path_is_inside(path, base): raise ValueError('target path is outside of the base path') return path
def safejoin(base, *elements)
Safely joins paths together. The result will always be a subdirectory under `base`, otherwise ValueError is raised. Args: base (str): base path elements (list of strings): path elements to join to base Returns: elements joined to base
3.324847
3.909843
0.850379
current = fileobj.tell() fileobj.seek(0, 2) end = fileobj.tell() fileobj.seek(current) return end
def filesize(fileobj)
Return the number of bytes in the fileobj. This function seeks to the end of the file, and then back to the original position.
2.23257
2.236308
0.998329
evt = WeakEvent(auto_reset=False) # first ensure that any pending callbacks from worker threads have been delivered # these are calls of _fromMain() Callback(evt.Signal) evt.Wait(timeout=timeout) evt.Reset() # reuse # grab the current set of inprogress cothreads/events wait4 = set(_handlers) # because Spawn.Wait() can only be called once, remove them and # use 'evt' as a proxy for what I'm waiting on so that overlapping # calls to _sync() will wait for these as well. # However, this means that our failure will must cascade to subsequent # calls to _sync() before we complete. _handlers.clear() _handlers.add(evt) try: WaitForAll(wait4, timeout=timeout) except Exception as e: evt.SignalException(e) # pass along error to next concurrent _sync() else: evt.Signal()
def _sync(timeout=None)
I will wait until all pending handlers cothreads have completed
15.358769
13.864692
1.107761
_SharedPV.close(self, destroy) if sync: # TODO: still not syncing PVA workers... _sync() self._disconnected.Wait(timeout=timeout)
def close(self, destroy=False, sync=False, timeout=None)
Close PV, disconnecting any clients. :param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open(). :param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies). :param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value. close() with destory=True or sync=True will not prevent clients from re-connecting. New clients may prevent sync=True from succeeding. Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`, or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
28.208651
31.255098
0.90253
while True: # TODO: Queue.get() (and anything using thread.allocate_lock # ignores signals :( so timeout periodically to allow delivery try: callable = None # ensure no lingering references to past work while blocking callable = self._Q.get(True, 1.0) except Empty: continue # retry on timeout try: if callable is self._stopit: break callable() except: _log.exception("Error from WorkQueue") finally: self._Q.task_done()
def handle(self)
Process queued work until interrupt() is called
13.730982
12.459557
1.102044
lvl = _lvlmap.get(lvl, lvl) assert lvl in _lvls, lvl _ClientProvider.set_debug(lvl)
def set_debug(lvl)
Set PVA global debug print level. This prints directly to stdout, bypassing eg. sys.stdout. :param lvl: logging.* level or logLevel*
8.443244
15.983274
0.528255
_log.debug("P4P atexit begins") # clean provider registry from .server import clearProviders, _cleanup_servers clearProviders() # close client contexts from .client.raw import _cleanup_contexts _cleanup_contexts() # stop servers _cleanup_servers() # shutdown default work queue from .util import _defaultWorkQueue _defaultWorkQueue.stop() _log.debug("P4P atexit completes")
def cleanup()
P4P sequenced shutdown. Intended to be atexit. Idenpotent.
8.870333
7.238303
1.225471
with klass(*args, **kws): _log.info("Running server") try: while True: time.sleep(100) except KeyboardInterrupt: pass finally: _log.info("Stopping server")
def forever(klass, *args, **kws)
Create a server and block the calling thread until KeyboardInterrupt. Shorthand for: :: with Server(*args, **kws): try; time.sleep(99999999) except KeyboardInterrupt: pass
3.331849
3.065346
1.08694
isarray = valtype[:1] == 'a' F = [ ('value', valtype), ('alarm', alarm), ('timeStamp', timeStamp), ] _metaHelper(F, valtype, display=display, control=control, valueAlarm=valueAlarm) F.extend(extra) return Type(id="epics:nt/NTScalarArray:1.0" if isarray else "epics:nt/NTScalar:1.0", spec=F)
def buildType(valtype, extra=[], display=False, control=False, valueAlarm=False)
Build a Type :param str valtype: A type code to be used with the 'value' field. See :ref:`valuecodes` :param list extra: A list of tuples describing additional non-standard fields :param bool display: Include optional fields for display meta-data :param bool control: Include optional fields for control meta-data :param bool valueAlarm: Include optional fields for alarm level meta-data :returns: A :py:class:`Type`
5.042182
5.243439
0.961618
if isinstance(value, Value): return value elif isinstance(value, ntwrappercommon): return value.raw elif isinstance(value, dict): return self.Value(self.type, value) else: S, NS = divmod(float(timestamp or time.time()), 1.0) return self.Value(self.type, { 'value': value, 'timeStamp': { 'secondsPastEpoch': S, 'nanoseconds': NS * 1e9, }, })
def wrap(self, value, timestamp=None)
Pack python value into Value Accepts dict to explicitly initialize fields be name. Any other type is assigned to the 'value' field.
4.209468
3.929127
1.07135
assert isinstance(value, Value), value V = value.value try: T = klass.typeMap[type(V)] except KeyError: raise ValueError("Can't unwrap value of type %s" % type(V)) try: return T(value.value)._store(value) except Exception as e: raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e))
def unwrap(klass, value)
Unpack a Value into an augmented python type (selected from the 'value' field)
4.147369
3.829566
1.082987
S = super(Value, self).changed for fld in fields or (None,): # no args tests for any change if S(fld): return True return False
def changed(self, *fields)
Test if one or more fields have changed. A field is considered to have changed if it has been marked as changed, or if any of its parent, or child, fields have been marked as changed.
13.522643
14.610185
0.925563
return ValueBase.changedSet(self, expand, parents)
def changedSet(self, expand=False, parents=False)
:param bool expand: Whether to expand when entire sub-structures are marked as changed. If True, then sub-structures are expanded and only leaf fields will be included. If False, then a direct translation is made, which may include both leaf and sub-structure fields. :param bool parents: If True, include fake entries for parent sub-structures with leaf fields marked as changed. :returns: A :py:class:`set` of names of those fields marked as changed. Return a :py:class:`set` containing the names of all changed fields. :: A = Value(Type([ ('x', 'i'), ('z', ('S', None, [ ('a', 'i'), ('b', 'i'), ])), ]), { }) A.mark('z') assert A.changedSet(expand=False) == {'z'} # only shows fields explicitly marked assert A.changedSet(expand=True) == {'z.a', 'z.b'} # actually used during network transmission A.mark('z.a') # redundant assert A.changedSet(expand=False) == {'z', 'z.a'} assert A.changedSet(expand=True) == {'z.a', 'z.b'} A.unmark('z') assert A.changedSet(expand=False) == {'z.a'} assert A.changedSet(expand=True) == {'z.a'} assert A.changedSet(expand=False, parents=True) == {'z', 'z.a'} assert A.changedSet(expand=True, parents=True) == {'z', 'z.a'} * expand=False, parents=False gives a direct mapping of the underlying BitSet as it would (get/monitor), or have been (put/rpc), moved over the network. * expand=True, parents=False gives the effective set of leaf fields which will be moved over the network. taking into account the use of whole sub-structure compress/shorthand bits. * expand=False, parents=True gives a way of testing if anything changed within a set of interesting fields (cf. set.intersect).
10.038246
14.034819
0.715239
attrib = getattr(value, 'attrib', {}) S, NS = divmod(time.time(), 1.0) value = numpy.asarray(value) # loses any special/augmented attributes dims = list(value.shape) dims.reverse() # inner-most sent as left if 'ColorMode' not in attrib: # attempt to infer color mode from shape if value.ndim==2: attrib['ColorMode'] = 0 # gray elif value.ndim==3: for idx,dim in enumerate(dims): if dim==3: # assume it's a color attrib['ColorMode'] = 2 + idx # 2 - RGB1, 3 - RGB2, 4 - RGB3 break # assume that the first is color, and any subsequent dim=3 is a thin ROI dataSize = value.nbytes return Value(self.type, { 'value': (self._code2u[value.dtype.char], value.flatten()), 'compressedSize': dataSize, 'uncompressedSize': dataSize, 'uniqueId': 0, 'timeStamp': { 'secondsPastEpoch': S, 'nanoseconds': NS * 1e9, }, 'attribute': [{'name': K, 'value': V} for K, V in attrib.items()], 'dimension': [{'size': N, 'offset': 0, 'fullSize': N, 'binning': 1, 'reverse': False} for N in dims], })
def wrap(self, value)
Wrap numpy.ndarray as Value
6.440309
6.208402
1.037354
V = value.value if V is None: # Union empty. treat as zero-length char array V = numpy.zeros((0,), dtype=numpy.uint8) return V.view(klass.ntndarray)._store(value)
def unwrap(klass, value)
Unwrap Value as NTNDArray
16.800642
12.144394
1.383407
def dounwrap(code, msg, val): _log.debug("Handler (%s, %s, %s) -> %s", code, msg, LazyRepr(val), handler) try: if code == 0: handler(RemoteError(msg)) elif code == 1: handler(Cancelled()) else: if val is not None: val = nt.unwrap(val) handler(val) except: _log.exception("Exception in Operation handler") return dounwrap
def unwrapHandler(handler, nt)
Wrap get/rpc handler to unwrap Value
5.056723
4.998942
1.011558
if callable(value): def logbuilder(V): try: value(V) except: _log.exception("Error in Builder") raise # will be logged again return logbuilder def builder(V): try: if isinstance(value, Value): V[None] = value elif isinstance(value, dict): for k, v in value.items(): V[k] = v else: nt.assign(V, value) except: _log.exception("Exception in Put builder") raise # will be printed to stdout from extension code. return builder
def defaultBuilder(value, nt)
Reasonably sensible default handling of put builder
5.30233
5.061324
1.047617
if name is None: self._channels = {} else: self._channels.pop(name) if self._ctxt is not None: self._ctxt.disconnect(name)
def disconnect(self, name=None)
Clear internal Channel cache, allowing currently unused channels to be implictly closed. :param str name: None, to clear the entire cache, or a name string to clear only a certain entry.
3.754851
3.890074
0.965239
opts = [] if process is not None: opts.append('process=%s' % process) if wait is not None: if wait: opts.append('wait=true') else: opts.append('wait=false') return 'field()record[%s]' % (','.join(opts))
def _request(self, process=None, wait=None)
helper for building pvRequests :param str process: Control remote processing. May be 'true', 'false', 'passive', or None. :param bool wait: Wait for all server processing to complete.
3.425488
3.804564
0.900363
chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), pvRequest=wrapRequest(request), get=True, put=False)
def get(self, name, handler, request=None)
Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation.
18.705502
18.211067
1.02715
chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), builder=defaultBuilder(builder, self._nt), pvRequest=wrapRequest(request), get=get, put=True)
def put(self, name, handler, builder=None, request=None, get=True)
Write a new value to a PV. :param name: A single name string or list of name strings :param callable handler: Completion notification. Called with None (success), RemoteError, or Cancelled :param callable builder: Called when the PV Put type is known. A builder is responsible for filling in the Value to be sent. builder(value) :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list. :returns: A object with a method cancel() which may be used to abort the operation.
11.900261
11.823713
1.006474
chan = self._channel(name) if value is None: value = Value(Type([])) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), value=value, pvRequest=wrapRequest(request), rpc=True)
def rpc(self, name, handler, value, request=None)
Perform RPC operation on PV :param name: A single name string or list of name strings :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :returns: A object with a method cancel() which may be used to abort the operation.
14.736665
14.088939
1.045974
chan = self._channel(name) return Subscription(context=self, nt=self._nt, channel=chan, handler=monHandler(handler), pvRequest=wrapRequest(request), **kws)
def monitor(self, name, handler, request=None, **kws)
Begin subscription to named PV :param str name: PV name string :param callable handler: Completion notification. Called with None (FIFO not empty), RemoteError, Cancelled, or Disconnected :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: Whether disconnect (and done) notifications are delivered to the callback (as None). :returns: A Subscription
15.089999
16.384089
0.921015
_SharedPV.close(self, destroy) if sync: return self._wait_closed()
def close(self, destroy=False, sync=False)
Close PV, disconnecting any clients. :param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open(). :param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies). :param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value. close() with destory=True or sync=True will not prevent clients from re-connecting. New clients may prevent sync=True from succeeding. Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`, or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
14.815814
18.87916
0.784771
def decorate(fn): assert asyncio.iscoroutinefunction(fn), "Place @timesout before @coroutine" @wraps(fn) @asyncio.coroutine def wrapper(*args, timeout=deftimeout, **kws): loop = kws.get('loop') fut = fn(*args, **kws) if timeout is None: yield from fut else: yield from asyncio.wait_for(fut, timeout=timeout, loop=loop) return wrapper return decorate
def timesout(deftimeout=5.0)
Decorate a coroutine to implement an overall timeout. The decorated coroutine will have an additional keyword argument 'timeout=' which gives a timeout in seconds, or None to disable timeout. :param float deftimeout: The default timeout= for the decorated coroutine. It is suggested perform one overall timeout at a high level rather than multiple timeouts on low-level operations. :: @timesout() @asyncio.coroutine def dostuff(ctxt): yield from ctxt.put('msg', 'Working') A, B = yield from ctxt.get(['foo', 'bar']) yield from ctxt.put('bar', A+B, wait=True) yield from ctxt.put('msg', 'Done') @asyncio.coroutine def exec(): with Context('pva') as ctxt: yield from dostuff(ctxt, timeout=5)
3.039705
2.931181
1.037024
singlepv = isinstance(name, (bytes, str)) if singlepv: return (yield from self._get_one(name, request=request)) elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) futs = [self._get_one(N, request=R) for N, R in zip(name, request)] ret = yield from asyncio.gather(*futs, loop=self.loop) return ret
def get(self, name, request=None)
Fetch current value of some number of PVs. :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :returns: A p4p.Value, or list of same. Subject to :py:ref:`unwrap`. When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values. :: with Context('pva') as ctxt: V = yield from ctxt.get('pv:name') A, B = yield from ctxt.get(['pv:1', 'pv:2'])
3.649881
3.398225
1.074055
if request and (process or wait is not None): raise ValueError("request= is mutually exclusive to process= or wait=") elif process or wait is not None: request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive') singlepv = isinstance(name, (bytes, str)) if singlepv: return (yield from self._put_one(name, values, request=request, get=get)) elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) assert len(name) == len(values), (name, values) futs = [self._put_one(N, V, request=R, get=get) for N, V, R in zip(name, values, request)] yield from asyncio.gather(*futs, loop=self.loop)
def put(self, name, values, request=None, process=None, wait=None, get=True)
Write a new value of some number of PVs. :param name: A single name string or list of name strings :param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument. :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param str process: Control remote processing. May be 'true', 'false', 'passive', or None. :param bool wait: Wait for all server processing to complete. :param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list. When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values If 'wait' or 'process' is specified, then 'request' must be omitted or None. :: with Context('pva') as ctxt: yield from ctxt.put('pv:name', 5.0) yield from ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0]) yield from ctxt.put('pv:name', {'value':5}) The provided value(s) will be automatically coerced to the target type. If this is not possible then an Exception is raised/returned. Unless the provided value is a dict, it is assumed to be a plain value and an attempt is made to store it in '.value' field.
3.806618
3.45541
1.10164
assert asyncio.iscoroutinefunction(cb), "monitor callback must be coroutine" R = Subscription(name, cb, notify_disconnect=notify_disconnect, loop=self.loop) cb = partial(self.loop.call_soon_threadsafe, R._event) R._S = super(Context, self).monitor(name, cb, request) return R
def monitor(self, name, cb, request=None, notify_disconnect=False)
Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
5.307518
5.652083
0.939038
if self._S is not None: # after .close() self._event should never be called self._S.close() self._S = None self._Q.put_nowait(None)
def close(self)
Begin closing subscription.
8.081368
7.266387
1.112158
if self._S is not None: # after .close() self._event should never be called self._S.close() # wait for Cancelled to be delivered self._evt.wait() self._S = None
def close(self)
Close subscription.
12.433917
10.919276
1.138713
if self._Q is not None: for T in self._T: self._Q.interrupt() for n, T in enumerate(self._T): _log.debug('Join Context worker %d', n) T.join() _log.debug('Joined Context workers') self._Q, self._T = None, None super(Context, self).close()
def close(self)
Force close all Channels and cancel all Operations
4.694443
4.638888
1.011976
singlepv = isinstance(name, (bytes, unicode)) if singlepv: name = [name] request = [request] elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) # use Queue instead of Event to allow KeyboardInterrupt done = Queue() result = [TimeoutError()] * len(name) ops = [None] * len(name) raw_get = super(Context, self).get try: for i, (N, req) in enumerate(izip(name, request)): def cb(value, i=i): try: if not isinstance(value, Cancelled): done.put_nowait((value, i)) _log.debug('get %s Q %s', N, LazyRepr(value)) except: _log.exception("Error queuing get result %s", value) _log.debug('get %s w/ %s', N, req) ops[i] = raw_get(N, cb, request=req) for _n in range(len(name)): try: value, i = done.get(timeout=timeout) except Empty: if throw: _log.debug('timeout %s after %s', name[i], timeout) raise TimeoutError() break _log.debug('got %s %s', name[i], LazyRepr(value)) if throw and isinstance(value, Exception): raise value result[i] = value finally: [op and op.close() for op in ops] if singlepv: return result[0] else: return result
def get(self, name, request=None, timeout=5.0, throw=True)
Fetch current value of some number of PVs. :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param float timeout: Operation timeout in seconds :param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value :returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`. When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values >>> ctxt = Context('pva') >>> V = ctxt.get('pv:name') >>> A, B = ctxt.get(['pv:1', 'pv:2']) >>>
3.491789
3.416465
1.022047
if request and (process or wait is not None): raise ValueError("request= is mutually exclusive to process= or wait=") elif process or wait is not None: request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive') singlepv = isinstance(name, (bytes, unicode)) if singlepv: name = [name] values = [values] request = [request] elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) assert len(name) == len(values), (name, values) # use Queue instead of Event to allow KeyboardInterrupt done = Queue() result = [TimeoutError()] * len(name) ops = [None] * len(name) raw_put = super(Context, self).put try: for i, (n, value, req) in enumerate(izip(name, values, request)): if isinstance(value, (bytes, unicode)) and value[:1] == '{': try: value = json.loads(value) except ValueError: raise ValueError("Unable to interpret '%s' as json" % value) # completion callback def cb(value, i=i): try: done.put_nowait((value, i)) except: _log.exception("Error queuing put result %s", LazyRepr(value)) ops[i] = raw_put(n, cb, builder=value, request=req, get=get) for _n in range(len(name)): try: value, i = done.get(timeout=timeout) except Empty: if throw: raise TimeoutError() break if throw and isinstance(value, Exception): raise value result[i] = value if singlepv: return result[0] else: return result finally: [op and op.close() for op in ops]
def put(self, name, values, request=None, timeout=5.0, throw=True, process=None, wait=None, get=True)
Write a new value of some number of PVs. :param name: A single name string or list of name strings :param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument. :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param float timeout: Operation timeout in seconds :param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value :param str process: Control remote processing. May be 'true', 'false', 'passive', or None. :param bool wait: Wait for all server processing to complete. :param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list. :returns: A None or Exception, or list of same When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values If 'wait' or 'process' is specified, then 'request' must be omitted or None. >>> ctxt = Context('pva') >>> ctxt.put('pv:name', 5.0) >>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0]) >>> ctxt.put('pv:name', {'value':5}) >>> The provided value(s) will be automatically coerced to the target type. If this is not possible then an Exception is raised/returned. Unless the provided value is a dict, it is assumed to be a plain value and an attempt is made to store it in '.value' field.
3.739486
3.505281
1.066815
done = Queue() op = super(Context, self).rpc(name, done.put_nowait, value, request=request) try: try: result = done.get(timeout=timeout) except Empty: result = TimeoutError() if throw and isinstance(result, Exception): raise result return result except: op.close() raise
def rpc(self, name, value, request=None, timeout=5.0, throw=True)
Perform a Remote Procedure Call (RPC) operation :param str name: PV name string :param Value value: Arguments. Must be Value instance :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param float timeout: Operation timeout in seconds :param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value :returns: A Value or Exception. Subject to :py:ref:`unwrap`. >>> ctxt = Context('pva') >>> ctxt.rpc('pv:name:add', {'A':5, 'B'; 6}) >>> The provided value(s) will be automatically coerced to the target type. If this is not possible then an Exception is raised/returned. Unless the provided value is a dict, it is assumed to be a plain value and an attempt is made to store it in '.value' field.
3.778261
4.739265
0.797225
R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue) R._S = super(Context, self).monitor(name, R._event, request) return R
def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None)
Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :param WorkQueue queue: A work queue through which monitor callbacks are dispatched. :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
6.78181
9.908398
0.684451
self._wrap = wrap or (nt and nt.wrap) or self._wrap self._unwrap = unwrap or (nt and nt.unwrap) or self._unwrap _SharedPV.open(self, self._wrap(value))
def open(self, value, nt=None, wrap=None, unwrap=None)
Mark the PV as opened an provide its initial value. This initial value is later updated with post(). :param value: A Value, or appropriate object (see nt= and wrap= of the constructor). Any clients which have begun connecting which began connecting while this PV was in the close'd state will complete connecting. Only those fields of the value which are marked as changed will be stored.
4.907651
4.650928
1.055198
wrap = None if rtype is None or isinstance(rtype, Type): pass elif isinstance(type, (list, tuple)): rtype = Type(rtype) elif hasattr(rtype, 'type'): # eg. one of the NT* helper classes wrap = rtype.wrap rtype = rtype.type else: raise TypeError("Not supported") def wrapper(fn): if wrap is not None: orig = fn @wraps(orig) def wrapper2(*args, **kws): return wrap(orig(*args, **kws)) fn = wrapper2 fn._reply_Type = rtype return fn return wrapper
def rpc(rtype=None)
Decorator marks a method for export. :param type: Specifies which :py:class:`Type` this method will return. The return type (rtype) must be one of: - An instance of :py:class:`p4p.Type` - None, in which case the method must return a :py:class:`p4p.Value` - One of the NT helper classes (eg :py:class:`p4p.nt.NTScalar`). - A list or tuple used to construct a :py:class:`p4p.Type`. Exported methods raise an :py:class:`Exception` to indicate an error to the remote caller. :py:class:`RemoteError` may be raised to send a specific message describing the error condition. >>> class Example(object): @rpc(NTScalar.buildType('d')) def add(self, lhs, rhs): return {'value':float(lhs)+flost(rhs)}
4.59833
4.445351
1.034413
def wrapper(fn): fn._call_PV = pvname fn._call_Request = request fn._reply_Type = rtype return fn return wrapper
def rpccall(pvname, request=None, rtype=None)
Decorator marks a client proxy method. :param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor. :param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`. The method to be decorated must have all keyword arguments, where the keywords are type code strings or :class:`~p4p.Type`.
5.842131
8.363667
0.698513
from p4p.server import Server import time queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers) provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider) threads = [] server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate) with server, queue: while True: time.sleep(10.0)
def quickRPCServer(provider, prefix, target, maxsize=20, workers=1, useenv=True, conf=None, isolate=False)
Run an RPC server in the current thread Calls are handled sequentially, and always in the current thread, if workers=1 (the default). If workers>1 then calls are handled concurrently by a pool of worker threads. Requires NTURI style argument encoding. :param str provider: A provider name. Must be unique in this process. :param str prefix: PV name prefix. Along with method names, must be globally unique. :param target: The object which is exporting methods. (use the :func:`rpc` decorator) :param int maxsize: Number of pending RPC calls to be queued. :param int workers: Number of worker threads (default 1) :param useenv: Passed to :class:`~p4p.server.Server` :param conf: Passed to :class:`~p4p.server.Server` :param isolate: Passed to :class:`~p4p.server.Server`
5.204589
4.325024
1.203366
# inject our ctor first so we don't have to worry about super() non-sense. def _proxyinit(self, context=None, format={}, **kws): assert context is not None, context self.context = context self.format = format spec.__init__(self, **kws) obj = {'__init__': _proxyinit} for K, V in inspect.getmembers(spec, lambda M: hasattr(M, '_call_PV')): obj[K] = _wrapMethod(K, V) return type(spec.__name__, (RPCProxyBase, spec), obj)
def rpcproxy(spec)
Decorator to enable this class to proxy RPC client calls The decorated class constructor takes two additional arguments, `context=` is required to be a :class:`~p4p.client.thread.Context`. `format`= can be a string, tuple, or dictionary and is applied to PV name strings given to :py:func:`rpcall`. Other arguments are passed to the user class constructor. :: @rpcproxy class MyProxy(object): @rpccall("%s:add") def add(lhs='d', rhs='d'): pass ctxt = Context('pva') proxy = MyProxy(context=ctxt, format="tst:") # evaluates "%s:add"%"tst:" The decorated class will be a sub-class of the provided class and :class:`RPCProxyBase`.
7.084614
6.812403
1.039958
if val.getID()!=self.id: self._update(val) return self._unwrap(val)
def unwrap(self, val)
Unpack a Value as some other python type
9.468219
9.839257
0.96229
assert valtype[:1] == 'a', 'valtype must be an array' return Type(id="epics:nt/NTMultiChannel:1.0", spec=[ ('value', valtype), ('channelName', 'as'), ('descriptor', 's'), ('alarm', alarm), ('timeStamp', timeStamp), ('severity', 'ai'), ('status', 'ai'), ('message', 'as'), ('secondsPastEpoch', 'al'), ('nanoseconds', 'ai'), ('userTag', 'ai'), ('isConnected', 'a?'), ] + extra)
def buildType(valtype, extra=[])
Build a Type :param str valtype: A type code to be used with the 'value' field. Must be an array :param list extra: A list of tuples describing additional non-standard fields :returns: A :py:class:`Type`
5.858145
5.666719
1.033781
return Type(id="epics:nt/NTTable:1.0", spec=[ ('labels', 'as'), ('value', ('S', None, columns)), ('descriptor', 's'), ('alarm', alarm), ('timeStamp', timeStamp), ] + extra)
def buildType(columns=[], extra=[])
Build a table :param list columns: List of column names and types. eg [('colA', 'd')] :param list extra: A list of tuples describing additional non-standard fields :returns: A :py:class:`Type`
10.197242
11.75409
0.867548
if isinstance(values, Value): return values cols = dict([(L, []) for L in self.labels]) try: # unzip list of dict for V in values: for L in self.labels: try: cols[L].append(V[L]) except (IndexError, KeyError): pass # allow omit empty columns for L in self.labels: V = cols[L] if len(V) == 0: del cols[L] try: return self.Value(self.type, { 'labels': self.labels, 'value': cols, }) except: _log.error("Failed to encode '%s' with %s", cols, self.labels) raise except: _log.exception("Failed to wrap: %s", values) raise
def wrap(self, values)
Pack an iterable of dict into a Value >>> T=NTTable([('A', 'ai'), ('B', 'as')]) >>> V = T.wrap([ {'A':42, 'B':'one'}, {'A':43, 'B':'two'}, ])
3.825415
3.860298
0.990964
ret = [] # build lists of column names, and value lbl, cols = [], [] for cname, cval in value.value.items(): lbl.append(cname) cols.append(cval) # zip together column arrays to iterate over rows for rval in izip(*cols): # zip together column names and row values ret.append(OrderedDict(zip(lbl, rval))) return ret
def unwrap(value)
Iterate an NTTable :returns: An iterator yielding an OrderedDict for each column
6.705609
6.201201
1.08134
try: return Type(id="epics:nt/NTURI:1.0", spec=[ ('scheme', 's'), ('authority', 's'), ('path', 's'), ('query', ('S', None, args)), ]) except Exception as e: raise ValueError('Unable to build NTURI compatible type from %s' % args)
def buildType(args)
Build NTURI :param list args: A list of tuples of query argument name and PVD type code. >>> I = NTURI([ ('arg_a', 'I'), ('arg_two', 's'), ])
6.038023
5.83213
1.035303
# build dict of argument name+value AV = {} AV.update([A for A in kws.items() if A[1] is not None]) AV.update([(N, V) for (N, _T), V in zip(self._args, args)]) # list of argument name+type tuples for which a value was provided AT = [A for A in self._args if A[0] in AV] T = self.buildType(AT) try: return Value(T, { 'scheme': scheme, 'authority': authority, 'path': path, 'query': AV, }) except Exception as e: raise ValueError('Unable to initialize NTURI %s from %s using %s' % (AT, AV, self._args))
def wrap(self, path, args=(), kws={}, scheme='', authority='')
Wrap argument values (tuple/list with optional dict) into Value :param str path: The PV name to which this call is made :param tuple args: Ordered arguments :param dict kws: Keyword arguments :rtype: Value
5.605492
5.620483
0.997333
V = self.type() S, NS = divmod(float(timestamp or time.time()), 1.0) V.timeStamp = { 'secondsPastEpoch': S, 'nanoseconds': NS * 1e9, } if isinstance(value, dict): # assume dict of index and choices list V.value = value self._choices = V['value.choices'] else: # index or string self.assign(V, value) return V
def wrap(self, value, timestamp=None)
Pack python value into Value
7.625154
7.26262
1.049918
if value.changed('value.choices'): self._choices = value['value.choices'] idx = value['value.index'] ret = ntenum(idx)._store(value) try: ret.choice = self._choices[idx] except IndexError: pass # leave it as None return ret
def unwrap(self, value)
Unpack a Value into an augmented python type (selected from the 'value' field)
10.713772
9.876798
1.084741
if isinstance(py, (bytes, unicode)): for i,C in enumerate(V['value.choices'] or self._choices): if py==C: V['value.index'] = i return # attempt to parse as integer V['value.index'] = py
def assign(self, V, py)
Store python value in Value
8.047751
7.338928
1.096584
_SharedPV.close(self, destroy) if sync: # TODO: still not syncing PVA workers... self._queue.sync() self._disconnected.wait()
def close(self, destroy=False, sync=False, timeout=None)
Close PV, disconnecting any clients. :param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open(). :param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies). :param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value. close() with destory=True or sync=True will not prevent clients from re-connecting. New clients may prevent sync=True from succeeding. Prevent reconnection by __first__ stopping the Server, removing with :py:meth:`StaticProvider.remove()`, or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
24.84416
27.259512
0.911394
all = gc.get_objects() _stats = {} for obj in all: K = type(obj) if K is StatsDelta: continue # avoid counting ourselves elif K is InstanceType: # instance of an old-style class K = getattr(obj, '__class__', K) # Track types as strings to avoid holding references K = str(K) try: _stats[K] += 1 except KeyError: _stats[K] = 1 # explicitly break the reference loop between the list and this frame, # which is contained in the list # This would otherwise prevent the list from being free'd del all return _stats
def gcstats()
Count the number of instances of each type/class :returns: A dict() mapping type (as a string) to an integer number of references
7.37515
7.761699
0.950198
import threading import time S = _StatsThread(period=period, file=file) T = threading.Thread(target=S) T.daemon = True T.start()
def periodic(period=60.0, file=sys.stderr)
Start a daemon thread which will periodically print GC stats :param period: Update period in seconds :param file: A writable file-like object
4.650147
5.023879
0.925609
cur = gcstats() Ncur = len(cur) if self.stats is not None and file is not None: prev = self.stats Nprev = self.ntypes # may be less than len(prev) if Ncur != Nprev: print("# Types %d -> %d" % (Nprev, Ncur), file=file) Scur, Sprev, first = set(cur), set(prev), True for T in Scur - Sprev: # new types if first: print('New Types', file=file) first = False print(' ', T, cur[T], file=file) first = True for T in Sprev - Scur: # collected types if first: print('Cleaned Types', file=file) first = False print(' ', T, -prev[T], file=file) first = True for T in Scur & Sprev: if cur[T] == prev[T]: continue if first: print('Known Types', file=file) first = False print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file) else: # first call print("All Types", file=file) for T, C in cur.items(): print(' ', T, C, file=file) self.stats, self.ntypes = cur, len(cur)
def collect(self, file=sys.stderr)
Collect stats and print results to file :param file: A writable file-like object
3.125672
3.247894
0.962369
singlepv = isinstance(name, (bytes, unicode)) if singlepv: return self._get_one(name, request=request, timeout=timeout, throw=throw) elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) return cothread.WaitForAll( [cothread.Spawn(self._get_one, N, request=R, timeout=timeout, throw=throw, raise_on_wait=True) for N, R in zip(name, request)] )
def get(self, name, request=None, timeout=5.0, throw=True)
Fetch current value of some number of PVs. :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param float timeout: Operation timeout in seconds :param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value :returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`. When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values >>> ctxt = Context('pva') >>> V = ctxt.get('pv:name') >>> A, B = ctxt.get(['pv:1', 'pv:2']) >>>
3.976739
4.42289
0.899127
if request and (process or wait is not None): raise ValueError("request= is mutually exclusive to process= or wait=") elif process or wait is not None: request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive') singlepv = isinstance(name, (bytes, unicode)) if singlepv: return self._put_one(name, values, request=request, timeout=timeout, throw=throw, get=get) elif request is None: request = [None] * len(name) assert len(name) == len(request), (name, request) assert len(name) == len(values), (name, values) return cothread.WaitForAll( [cothread.Spawn(self._put_one, N, V, request=R, timeout=timeout, throw=throw, get=get, raise_on_wait=True) for N, V, R in zip(name, values, request)] )
def put(self, name, values, request=None, process=None, wait=None, timeout=5.0, get=True, throw=True)
Write a new value of some number of PVs. :param name: A single name string or list of name strings :param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument. :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param float timeout: Operation timeout in seconds :param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value :param str process: Control remote processing. May be 'true', 'false', 'passive', or None. :param bool wait: Wait for all server processing to complete. :param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list. :returns: A None or Exception, or list of same When invoked with a single name then returns is a single value. When invoked with a list of name, then returns a list of values If 'wait' or 'process' is specified, then 'request' must be omitted or None. >>> ctxt = Context('pva') >>> ctxt.put('pv:name', 5.0) >>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0]) >>> ctxt.put('pv:name', {'value':5}) >>> The provided value(s) will be automatically coerced to the target type. If this is not possible then an Exception is raised/returned. Unless the provided value is a dict, it is assumed to be a plain value and an attempt is made to store it in '.value' field.
4.052863
3.912603
1.035848
R = Subscription(name, cb, notify_disconnect=notify_disconnect) cb = partial(cothread.Callback, R._event) R._S = super(Context, self).monitor(name, cb, request) return R
def monitor(self, name, cb, request=None, notify_disconnect=False)
Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
9.767743
11.640135
0.839143
if self._S is not None: # after .close() self._event should never be called self._S.close() self._S = None self._Q.Signal(None) self._T.Wait()
def close(self)
Close subscription.
12.167932
10.877541
1.118629
check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs)
Takes a chamber and Congress, OR state and district, returning a list of members
2.726276
2.562781
1.063796
"Same as BillsClient.by_member" path = "members/{0}/bills/{1}.json".format(member_id, type) return self.fetch(path)
def bills(self, member_id, type='introduced')
Same as BillsClient.by_member
7.477584
4.230176
1.767677
check_chamber(chamber) path = "members/{first}/{type}/{second}/{congress}/{chamber}.json" path = path.format(first=first, second=second, type=type, congress=congress, chamber=chamber) return self.fetch(path)
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS)
See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number.
3.134657
2.986945
1.049453
path = "members/{member_id}/bills/{type}.json".format( member_id=member_id, type=type) return self.fetch(path)
def by_member(self, member_id, type='introduced')
Takes a bioguide ID and a type: (introduced|updated|cosponsored|withdrawn) Returns recent bills
3.691669
3.471364
1.063463
"Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
def upcoming(self, chamber, congress=CURRENT_CONGRESS)
Shortcut for upcoming bills
7.511097
5.554422
1.352273
check_chamber(chamber) now = datetime.datetime.now() year = year or now.year month = month or now.month path = "{chamber}/votes/{year}/{month}.json".format( chamber=chamber, year=year, month=month) return self.fetch(path, parse=lambda r: r['results'])
def by_month(self, chamber, year=None, month=None)
Return votes for a single month, defaulting to the current month.
3.050464
2.679508
1.138442
check_chamber(chamber) start, end = parse_date(start), parse_date(end) if start > end: start, end = end, start path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format( chamber=chamber, start=start, end=end) return self.fetch(path, parse=lambda r: r['results'])
def by_range(self, chamber, start, end)
Return votes cast in a chamber between two dates, up to one month apart.
2.605852
2.488953
1.046967
"Return votes cast in a chamber on a single day" date = parse_date(date) return self.by_range(chamber, date, date)
def by_date(self, chamber, date)
Return votes cast in a chamber on a single day
5.711632
3.734192
1.52955
"Return today's votes in a given chamber" now = datetime.date.today() return self.by_range(chamber, now, now)
def today(self, chamber)
Return today's votes in a given chamber
6.530143
5.703763
1.144883
"Return votes by type: missed, party, lone no, perfect" check_chamber(chamber) path = "{congress}/{chamber}/votes/{type}.json".format( congress=congress, chamber=chamber, type=type) return self.fetch(path)
def by_type(self, chamber, type, congress=CURRENT_CONGRESS)
Return votes by type: missed, party, lone no, perfect
7.221801
2.859424
2.525614
"Return votes on nominations from a given Congress" path = "{congress}/nominations.json".format(congress=congress) return self.fetch(path)
def nominations(self, congress=CURRENT_CONGRESS)
Return votes on nominations from a given Congress
6.364917
4.015357
1.585143
url = self.BASE_URI + path headers = {'X-API-Key': self.apikey} log.debug(url) resp, content = self.http.request(url, headers=headers) content = u(content) content = json.loads(content) # handle errors if not content.get('status') == 'OK': if "errors" in content and content['errors'][0]['error'] == "Record not found": raise NotFound(path) if content.get('status') == '404': raise NotFound(path) raise CongressError(content, resp, url) if callable(parse): content = parse(content) return content
def fetch(self, path, parse=lambda r: r['results'][0])
Make an API request, with authentication. This method can be used directly to fetch new endpoints or customize parsing. :: >>> from congress import Congress >>> client = Congress() >>> senate = client.fetch('115/senate/members.json') >>> print(senate['num_results']) 101
3.722414
3.919763
0.949653
if isinstance(s, (datetime.datetime, datetime.date)): return s try: from dateutil.parser import parse except ImportError: parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d") return parse(s)
def parse_date(s)
Parse a date using dateutil.parser.parse if available, falling back to datetime.datetime.strptime if not
2.318273
2.323321
0.997828
''' >>> d = DiskVarArray('/tmp/test3', dtype='uint32') >>> d.append([1, 2, 3, 4]) >>> d.__getitem__(0) memmap([1, 2, 3, 4], dtype=uint32) >>> d.append([5, 6, 7, 8]) >>> d.__getitem__(1) memmap([5, 6, 7, 8], dtype=uint32) >>> shutil.rmtree('/tmp/test3', ignore_errors=True) ''' self.index.append(len(self.data)) self.data.extend(v)
def append(self, v)
>>> d = DiskVarArray('/tmp/test3', dtype='uint32') >>> d.append([1, 2, 3, 4]) >>> d.__getitem__(0) memmap([1, 2, 3, 4], dtype=uint32) >>> d.append([5, 6, 7, 8]) >>> d.__getitem__(1) memmap([5, 6, 7, 8], dtype=uint32) >>> shutil.rmtree('/tmp/test3', ignore_errors=True)
2.728655
1.381803
1.974706
''' >>> import numpy as np >>> d = DiskVarArray('/tmp/test4', dtype='uint32') >>> d.append([1, 2, 3, 4]) >>> d.destroy # doctest:+ELLIPSIS <bound method DiskVarArray.destroy of <diskarray.vararray.DiskVarArray object at 0x...>> >>> shutil.rmtree('/tmp/test4', ignore_errors=True) ''' self.data.destroy() self.data = None self.index.destroy() self.index = None
def destroy(self)
>>> import numpy as np >>> d = DiskVarArray('/tmp/test4', dtype='uint32') >>> d.append([1, 2, 3, 4]) >>> d.destroy # doctest:+ELLIPSIS <bound method DiskVarArray.destroy of <diskarray.vararray.DiskVarArray object at 0x...>> >>> shutil.rmtree('/tmp/test4', ignore_errors=True)
3.857712
1.510887
2.553276
''' >>> import numpy as np >>> da = DiskArray('/tmp/test.array', shape=(0, 3), growby=3, dtype=np.float32) >>> print(da[:]) [] >>> data = np.array([[2,3,4], [1, 2, 3]]) >>> da.append(data[0]) >>> print(da[:]) [[2. 3. 4.] [0. 0. 0.] [0. 0. 0.]] ''' # FIXME: for now we only support # append along axis 0 and only # for 1d and 2d arrays # FIXME: for now we only support # appending one item at a time nrows = self._shape[0] nrows_capacity = self._capacity_shape[0] if nrows == nrows_capacity: self._capacity_shape = self._incr_shape(self._capacity_shape, self._growby) self._update_ndarray() shapelen = len(self._shape) if shapelen not in (1, 2): raise AppendNotSupported(shapelen) self.data[nrows] = v self._shape = self._incr_shape(self._shape, 1)
def append(self, v)
>>> import numpy as np >>> da = DiskArray('/tmp/test.array', shape=(0, 3), growby=3, dtype=np.float32) >>> print(da[:]) [] >>> data = np.array([[2,3,4], [1, 2, 3]]) >>> da.append(data[0]) >>> print(da[:]) [[2. 3. 4.] [0. 0. 0.] [0. 0. 0.]]
3.785548
2.513223
1.506252
''' >>> import numpy as np >>> da = DiskArray('/tmp/test.array', shape=(0, 3), capacity=(10, 3), dtype=np.float32) >>> print(da[:]) [[2. 3. 4.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] >>> data = np.array([[2,3,4], [1, 2, 3]]) >>> da.extend(data) >>> print(da[:]) [[2. 3. 4.] [1. 2. 3.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] >>> os.remove('/tmp/test.array') ''' nrows = self._shape[0] nrows_capacity = self._capacity_shape[0] remaining_capacity = nrows_capacity - nrows if remaining_capacity < len(v): diff = len(v) - remaining_capacity self._capacity_shape = self._incr_shape(self._capacity_shape, diff) self._update_ndarray() self.data[nrows:nrows+len(v)] = v self._shape = self._incr_shape(self._shape, len(v))
def extend(self, v)
>>> import numpy as np >>> da = DiskArray('/tmp/test.array', shape=(0, 3), capacity=(10, 3), dtype=np.float32) >>> print(da[:]) [[2. 3. 4.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] >>> data = np.array([[2,3,4], [1, 2, 3]]) >>> da.extend(data) >>> print(da[:]) [[2. 3. 4.] [1. 2. 3.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] >>> os.remove('/tmp/test.array')
1.959003
1.405483
1.393829
# Strip the index info non_index_columns = filter(lambda x: x not in self._prdd._index_names, self._prdd._column_names()) self._grouped_spark_sql = (self._prdd.to_spark_sql() .select(non_index_columns) .groupBy(self._by)) self._columns = filter(lambda x: x != self._by, non_index_columns)
def _prep_spark_sql_groupby(self)
Used Spark SQL group approach
5.304926
5.126567
1.034791
myargs = self._myargs mykwargs = self._mykwargs def extract_keys(groupedFrame): for key, group in groupedFrame: yield (key, group) def group_and_extract(frame): return extract_keys(frame.groupby(*myargs, **mykwargs)) self._baseRDD = self._prdd._rdd() self._distributedRDD = self._baseRDD.flatMap(group_and_extract) self._mergedRDD = self._sortIfNeeded( self._group(self._distributedRDD))
def _prep_pandas_groupby(self)
Prepare the old school pandas group by based approach.
6.027497
5.732729
1.051419
return rdd.reduceByKey(lambda x, y: x.append(y))
def _group(self, rdd)
Group together the values with the same key.
5.560222
3.902011
1.424963
self._prep_pandas_groupby() def extract_group_labels(frame): return (frame[0], frame[1].index.values) return self._mergedRDD.map(extract_group_labels).collectAsMap()
def groups(self)
Returns dict {group name -> group labels}.
10.953206
8.761948
1.250088
if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
def ngroups(self)
Number of groups.
20.676849
19.488468
1.060979
self._prep_pandas_groupby() def extract_group_indices(frame): return (frame[0], frame[1].index) return self._mergedRDD.map(extract_group_indices).collectAsMap()
def indices(self)
Returns dict {group name -> group indices}.
10.669307
8.412019
1.268341
self._prep_pandas_groupby() return DataFrame.fromDataFrameRDD( self._regroup_mergedRDD().values().map( lambda x: x.median()), self.sql_ctx)
def median(self)
Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex.
19.510612
17.972054
1.085608
if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.mean) self._prep_pandas_groupby() return DataFrame.fromDataFrameRDD( self._regroup_mergedRDD().values().map( lambda x: x.mean()), self.sql_ctx)
def mean(self)
Compute mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex.
10.609351
9.958145
1.065394
self._prep_pandas_groupby() return DataFrame.fromDataFrameRDD( self._regroup_mergedRDD().values().map( lambda x: x.var(ddof=ddof)), self.sql_ctx)
def var(self, ddof=1)
Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex.
13.80889
14.83413
0.930886
if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.sum) self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs def create_combiner(x): return x.groupby(*myargs, **mykwargs).sum() def merge_value(x, y): return pd.concat([x, create_combiner(y)]) def merge_combiner(x, y): return x + y rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey( create_combiner, merge_value, merge_combiner)).values() return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx)
def sum(self)
Compute the sum for each group.
5.864257
5.565516
1.053677
expressions = map(lambda c: f(c).alias(c), self._columns) return expressions
def _create_exprs_using_func(self, f, columns)
Create aggregate expressions using the provided function with the result coming back as the original column name.
9.94746
9.223942
1.078439
if not columns: columns = self._columns from pyspark.sql import functions as F aggs = map(lambda column: agg(column).alias(column), self._columns) aggRdd = self._grouped_spark_sql.agg(*aggs) df = DataFrame.from_schema_rdd(aggRdd, self._by) return df
def _use_aggregation(self, agg, columns=None)
Compute the result using the aggregation function provided. The aggregation name must also be provided so we can strip of the extra name that Spark SQL adds.
5.345678
5.241404
1.019894
myargs = self._myargs mykwargs = self._mykwargs self._prep_pandas_groupby() def regroup(df): return df.groupby(*myargs, **mykwargs) return self._mergedRDD.mapValues(regroup)
def _regroup_mergedRDD(self)
A common pattern is we want to call groupby again on the dataframes so we can use the groupby functions.
5.682367
5.013714
1.133365
# TODO: Stop collecting the entire frame for each key. self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs nthRDD = self._regroup_mergedRDD().mapValues( lambda r: r.nth( n, *args, **kwargs)).values() return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx)
def nth(self, n, *args, **kwargs)
Take the nth element of each grouby.
11.909569
10.438528
1.140924
if self._can_use_new_school() and f == pd.Series.kurtosis: self._prep_spark_sql_groupby() import custom_functions as CF return self._use_aggregation(CF.kurtosis) else: self._prep_pandas_groupby() return DataFrame.fromDataFrameRDD( self._regroup_mergedRDD().values().map( lambda g: g.aggregate(f)), self.sql_ctx)
def aggregate(self, f)
Apply the aggregation function. Note: This implementation does note take advantage of partial aggregation unless we have one of the special cases. Currently the only special case is Series.kurtosis - and even that doesn't properly do partial aggregations, but we can improve it to do this eventually!
11.415647
9.680187
1.17928
self._prep_pandas_groupby() def key_by_index(data): # TODO: Is there a better way to do this? for key, row in data.iterrows(): yield (key, pd.DataFrame.from_dict( dict([(key, row)]), orient='index')) myargs = self._myargs mykwargs = self._mykwargs regroupedRDD = self._distributedRDD.mapValues( lambda data: data.groupby(*myargs, **mykwargs)) appliedRDD = regroupedRDD.map( lambda key_data: key_data[1].apply(func, *args, **kwargs)) reKeyedRDD = appliedRDD.flatMap(key_by_index) dataframe = self._sortIfNeeded(reKeyedRDD).values() return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
def apply(self, func, *args, **kwargs)
Apply the provided function and combine the results together in the same way as apply from groupby in pandas. This returns a DataFrame.
5.161427
5.283984
0.976806
def _(col): spark_ctx = SparkContext._active_spark_context java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions, name) (col._java_ctx if isinstance(col, Column) else col)) return Column(java_ctx) _.__name__ = name _.__doc__ = doc return _
def _create_function(name, doc="")
Create a function for aggregator by name
5.027864
4.951739
1.015373
for column, values in frame.iteritems(): # Temporary hack, fix later counter = self._counters.get(column) for value in values: if counter is not None: counter.merge(value)
def merge(self, frame)
Add another DataFrame to the PStatCounter.
7.30637
5.195086
1.4064
if not isinstance(other, PStatCounter): raise Exception("Can only merge PStatcounters!") for column, counter in self._counters.items(): other_counter = other._counters.get(column) self._counters[column] = counter.mergeStats(other_counter) return self
def merge_pstats(self, other)
Merge all of the stats counters of the other PStatCounter with our counters.
3.911093
3.246351
1.204766
for column_name, _ in self._column_stats.items(): data_arr = frame[[column_name]].values count, min_max_tup, mean, _, _, _ = \ scistats.describe(data_arr) stats_counter = StatCounter() stats_counter.n = count stats_counter.mu = mean stats_counter.m2 = np.sum((data_arr - mean) ** 2) stats_counter.minValue, stats_counter.maxValue = min_max_tup self._column_stats[column_name] = self._column_stats[ column_name].mergeStats(stats_counter) return self
def merge(self, frame)
Add another DataFrame to the accumulated stats for each column. Parameters ---------- frame: pandas DataFrame we will update our stats counter with.
3.77852
3.48472
1.084311
for column_name, _ in self._column_stats.items(): self._column_stats[column_name] = self._column_stats[column_name] \ .mergeStats(other_col_counters._column_stats[column_name]) return self
def merge_stats(self, other_col_counters)
Merge statistics from a different column stats counter in to this one. Parameters ---------- other_column_counters: Other col_stat_counter to marge in to this one.
2.65907
2.609235
1.019099