sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def items(self) -> Tuple[Tuple[str, "Package"], ...]: # type: ignore """ Return an iterable containing package name and corresponding `Package` instance that are available. """ item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
Return an iterable containing package name and corresponding `Package` instance that are available.
entailment
def values(self) -> List["Package"]: # type: ignore """ Return an iterable of the available `Package` instances. """ values = [self.build_dependencies.get(name) for name in self.build_dependencies] return values
Return an iterable of the available `Package` instances.
entailment
def get_dependency_package( self, package_name: str ) -> "Package": # type: ignore # noqa: F821 """ Return the dependency Package for a given package name. """ self._validate_name(package_name) return self.build_dependencies.get(package_name)
Return the dependency Package for a given package name.
entailment
def add(app: web.Application, feature: Any, key: Hashable = None, exist_ok: bool = False ): """ Adds a new feature to the app. Features can either be registered as the default feature for the class, or be given an explicit name. Args: app (web.Application): The current Aiohttp application. feature (Any): The new feature that should be registered. It is recommended, but not required to use a `ServiceFeature`. key (Hashable, optional): The key under which the feature should be registered. Defaults to `type(feature)`. exist_ok (bool): If truthy, this function will do nothing if a feature was already registered for `key`. Otherwise, an exception is raised. """ if FEATURES_KEY not in app: app[FEATURES_KEY] = dict() key = key or type(feature) if key in app[FEATURES_KEY]: if exist_ok: return else: raise KeyError(f'Feature "{key}" already registered') app[FEATURES_KEY][key] = feature
Adds a new feature to the app. Features can either be registered as the default feature for the class, or be given an explicit name. Args: app (web.Application): The current Aiohttp application. feature (Any): The new feature that should be registered. It is recommended, but not required to use a `ServiceFeature`. key (Hashable, optional): The key under which the feature should be registered. Defaults to `type(feature)`. exist_ok (bool): If truthy, this function will do nothing if a feature was already registered for `key`. Otherwise, an exception is raised.
entailment
def get(app: web.Application, feature_type: Type[Any] = None, key: Hashable = None ) -> Any: """ Finds declared feature. Identification is done based on feature type and key. Args: app (web.Application): The current Aiohttp application. feature_type (Type[Any]): The Python type of the desired feature. If specified, it will be checked against the found feature. key (Hashable): A specific identifier for the desired feature. Defaults to `feature_type` Returns: Any: The feature found for the combination of `feature_type` and `key` """ key = key or feature_type if not key: raise AssertionError('No feature identifier provided') try: found = app[FEATURES_KEY][key] except KeyError: raise KeyError(f'No feature found for "{key}"') if feature_type and not isinstance(found, feature_type): raise AssertionError(f'Found {found} did not match type "{feature_type}"') return found
Finds declared feature. Identification is done based on feature type and key. Args: app (web.Application): The current Aiohttp application. feature_type (Type[Any]): The Python type of the desired feature. If specified, it will be checked against the found feature. key (Hashable): A specific identifier for the desired feature. Defaults to `feature_type` Returns: Any: The feature found for the combination of `feature_type` and `key`
entailment
def validate_minimal_contract_factory_data(contract_data: Dict[str, str]) -> None: """ Validate that contract data in a package contains at least an "abi" and "deployment_bytecode" necessary to generate a deployable contract factory. """ if not all(key in contract_data.keys() for key in ("abi", "deployment_bytecode")): raise InsufficientAssetsError( "Minimum required contract data to generate a deployable " "contract factory (abi & deployment_bytecode) not found." )
Validate that contract data in a package contains at least an "abi" and "deployment_bytecode" necessary to generate a deployable contract factory.
entailment
def generate_contract_factory_kwargs( contract_data: Dict[str, Any] ) -> Generator[Tuple[str, Any], None, None]: """ Build a dictionary of kwargs to be passed into contract factory. """ if "abi" in contract_data: yield "abi", contract_data["abi"] if "deployment_bytecode" in contract_data: yield "bytecode", contract_data["deployment_bytecode"]["bytecode"] if "link_references" in contract_data["deployment_bytecode"]: yield "unlinked_references", tuple( contract_data["deployment_bytecode"]["link_references"] ) if "runtime_bytecode" in contract_data: yield "bytecode_runtime", contract_data["runtime_bytecode"]["bytecode"] if "link_references" in contract_data["runtime_bytecode"]: yield "linked_references", tuple( contract_data["runtime_bytecode"]["link_references"] )
Build a dictionary of kwargs to be passed into contract factory.
entailment
async def post_publish(request): """ --- tags: - Events summary: Publish event. description: Publish a new event message to the event bus. operationId: events.publish produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string message: type: object """ args = await request.json() try: await get_publisher(request.app).publish( args['exchange'], args['routing'], args['message'] ) return web.Response() except Exception as ex: warnings.warn(f'Unable to publish {args}: {ex}') return web.Response(body='Event bus connection refused', status=500)
--- tags: - Events summary: Publish event. description: Publish a new event message to the event bus. operationId: events.publish produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string message: type: object
entailment
async def post_subscribe(request): """ --- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string """ args = await request.json() get_listener(request.app).subscribe( args['exchange'], args['routing'] ) return web.Response()
--- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string
entailment
async def _relay(self, channel: aioamqp.channel.Channel, body: str, envelope: aioamqp.envelope.Envelope, properties: aioamqp.properties.Properties): """Relays incoming messages between the queue and the user callback""" try: await channel.basic_client_ack(envelope.delivery_tag) await self.on_message(self, envelope.routing_key, json.loads(body)) except Exception as ex: LOGGER.error(f'Exception relaying message in {self}: {ex}')
Relays incoming messages between the queue and the user callback
entailment
def _lazy_listen(self): """ Ensures that the listener task only runs when actually needed. This function is a no-op if any of the preconditions is not met. Preconditions are: * The application is running (self._loop is set) * The task is not already running * There are subscriptions: either pending, or active """ if all([ self._loop, not self.running, self._subscriptions or (self._pending and not self._pending.empty()), ]): self._task = self._loop.create_task(self._listen())
Ensures that the listener task only runs when actually needed. This function is a no-op if any of the preconditions is not met. Preconditions are: * The application is running (self._loop is set) * The task is not already running * There are subscriptions: either pending, or active
entailment
def subscribe(self, exchange_name: str, routing: str, exchange_type: ExchangeType_ = 'topic', on_message: EVENT_CALLBACK_ = None ) -> EventSubscription: """Adds a new event subscription to the listener. Actual queue declaration to the remote message server is done when connected. If the listener is not currently connected, it defers declaration. All existing subscriptions are redeclared on the remote if `EventListener` loses and recreates the connection. Args: exchange_name (str): Name of the AMQP exchange. Messages are always published to a specific exchange. routing (str): Filter messages passing through the exchange. A routing key is a '.'-separated string, and accepts '#' and '*' wildcards. exchange_type (ExchangeType_, optional): If the exchange does not yet exist, it will be created with this type. Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`. on_message (EVENT_CALLBACK_, optional): The function to be called when a new message is received. If `on_message` is none, it will default to logging the message. Returns: EventSubscription: The newly created subscription. This value can safely be discarded: EventListener keeps its own reference. """ sub = EventSubscription( exchange_name, routing, exchange_type, on_message=on_message ) if self._pending is not None: self._pending.put_nowait(sub) else: self._pending_pre_async.append(sub) LOGGER.info(f'Deferred event bus subscription: [{sub}]') self._lazy_listen() return sub
Adds a new event subscription to the listener. Actual queue declaration to the remote message server is done when connected. If the listener is not currently connected, it defers declaration. All existing subscriptions are redeclared on the remote if `EventListener` loses and recreates the connection. Args: exchange_name (str): Name of the AMQP exchange. Messages are always published to a specific exchange. routing (str): Filter messages passing through the exchange. A routing key is a '.'-separated string, and accepts '#' and '*' wildcards. exchange_type (ExchangeType_, optional): If the exchange does not yet exist, it will be created with this type. Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`. on_message (EVENT_CALLBACK_, optional): The function to be called when a new message is received. If `on_message` is none, it will default to logging the message. Returns: EventSubscription: The newly created subscription. This value can safely be discarded: EventListener keeps its own reference.
entailment
async def publish(self, exchange: str, routing: str, message: Union[str, dict], exchange_type: ExchangeType_ = 'topic'): """ Publish a new event message. Connections are created automatically when calling `publish()`, and will attempt to reconnect if connection was lost. For more information on publishing AMQP messages, see https://www.rabbitmq.com/tutorials/tutorial-three-python.html Args: exchange (str): The AMQP message exchange to publish the message to. A new exchange will be created if it does not yet exist. routing (str): The routing identification with which the message should be published. Subscribers use routing information for fine-grained filtering. Routing can be expressed as a '.'-separated path. message (Union[str, dict]): The message body. It will be serialized before transmission. exchange_type (ExchangeType_, optional): When publishing to a previously undeclared exchange, it will be created. `exchange_type` defines how the exchange distributes messages between subscribers. The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'. Raises: aioamqp.exceptions.AioamqpException: * Failed to connect to AMQP host * Failed to send message * `exchange` already exists, but has a different `exchange_type` """ try: await self._ensure_channel() except Exception: # If server has restarted since our last attempt, ensure channel will fail (old connection invalid) # Retry once to check whether a new connection can be made await self._ensure_channel() # json.dumps() also correctly handles strings data = json.dumps(message).encode() await self._channel.exchange_declare( exchange_name=exchange, type_name=exchange_type, auto_delete=True ) await self._channel.basic_publish( payload=data, exchange_name=exchange, routing_key=routing )
Publish a new event message. Connections are created automatically when calling `publish()`, and will attempt to reconnect if connection was lost. For more information on publishing AMQP messages, see https://www.rabbitmq.com/tutorials/tutorial-three-python.html Args: exchange (str): The AMQP message exchange to publish the message to. A new exchange will be created if it does not yet exist. routing (str): The routing identification with which the message should be published. Subscribers use routing information for fine-grained filtering. Routing can be expressed as a '.'-separated path. message (Union[str, dict]): The message body. It will be serialized before transmission. exchange_type (ExchangeType_, optional): When publishing to a previously undeclared exchange, it will be created. `exchange_type` defines how the exchange distributes messages between subscribers. The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'. Raises: aioamqp.exceptions.AioamqpException: * Failed to connect to AMQP host * Failed to send message * `exchange` already exists, but has a different `exchange_type`
entailment
def is_ens_domain(authority: str) -> bool: """ Return false if authority is not a valid ENS domain. """ # check that authority ends with the tld '.eth' # check that there are either 2 or 3 subdomains in the authority # i.e. zeppelinos.eth or packages.zeppelinos.eth if authority[-4:] != ".eth" or len(authority.split(".")) not in [2, 3]: return False return True
Return false if authority is not a valid ENS domain.
entailment
def splitPrefix(name): """ Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name}) """ if isinstance(name, basestring) \ and ':' in name: return tuple(name.split(':', 1)) else: return (None, name)
Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name})
entailment
def unwrap(self, d, item): """ translate (unwrap) using an optional wrapper function """ nopt = ( lambda x: x ) try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return item wrappers = getattr(pmd, 'wrappers', {}) fn = wrappers.get(item[0], nopt) return (item[0], fn(item[1])) except: pass return item
translate (unwrap) using an optional wrapper function
entailment
def exclude(self, d, item): """ check metadata for excluded items """ try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return False excludes = getattr(pmd, 'excludes', []) return ( item[0] in excludes ) except: pass return False
check metadata for excluded items
entailment
def mangle(self, name, x): """ Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name. """ h = abs(hash(name)) return '%s-%s' % (h, x)
Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name.
entailment
def find(self, name, resolved=True): """ Get the definition object for the schema object by name. @param name: The name of a schema object. @type name: basestring @param resolved: A flag indicating that the fully resolved type should be returned. @type resolved: boolean @return: The found schema I{type} @rtype: L{xsd.sxbase.SchemaObject} """ #log.debug('searching schema for (%s)', name) qref = qualify(name, self.schema.root, self.schema.tns) query = BlindQuery(qref) result = query.execute(self.schema) if result is None: log.error('(%s) not-found', name) return None #log.debug('found (%s) as (%s)', name, Repr(result)) if resolved: result = result.resolve() return result
Get the definition object for the schema object by name. @param name: The name of a schema object. @type name: basestring @param resolved: A flag indicating that the fully resolved type should be returned. @type resolved: boolean @return: The found schema I{type} @rtype: L{xsd.sxbase.SchemaObject}
entailment
def push(self, x): """ Push an I{object} onto the stack. @param x: An object to push. @type x: L{Frame} @return: The pushed frame. @rtype: L{Frame} """ if isinstance(x, Frame): frame = x else: frame = Frame(x) self.stack.append(frame) #log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack)) return frame
Push an I{object} onto the stack. @param x: An object to push. @type x: L{Frame} @return: The pushed frame. @rtype: L{Frame}
entailment
def pop(self): """ Pop the frame at the top of the stack. @return: The popped frame, else None. @rtype: L{Frame} """ if len(self.stack): popped = self.stack.pop() #log.debug('pop: (%s)\n%s', Repr(popped), Repr(self.stack)) return popped else: #log.debug('stack empty, not-popped') pass return None
Pop the frame at the top of the stack. @return: The popped frame, else None. @rtype: L{Frame}
entailment
def getchild(self, name, parent): """ get a child by name """ #log.debug('searching parent (%s) for (%s)', Repr(parent), name) if name.startswith('@'): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
get a child by name
entailment
def query(self, name, node): """ blindly query the schema by name """ #log.debug('searching schema for (%s)', name) qref = qualify(name, node, node.namespace()) query = BlindQuery(qref) result = query.execute(self.schema) return (result, [])
blindly query the schema by name
entailment
def query(self, name): """ blindly query the schema by name """ #log.debug('searching schema for (%s)', name) schema = self.schema wsdl = self.wsdl() if wsdl is None: qref = qualify(name, schema.root, schema.tns) else: qref = qualify(name, wsdl.root, wsdl.tns) query = BlindQuery(qref) result = query.execute(schema) return (result, [])
blindly query the schema by name
entailment
def resolvesoapbody(self, definitions, op): """ Resolve soap body I{message} parts by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation} """ ptop = self.type.operation(op.name) if ptop is None: raise Exception, \ "operation '%s' not defined in portType" % op.name soap = op.soap parts = soap.input.body.parts if len(parts): pts = [] for p in ptop.input.parts: if p.name in parts: pts.append(p) soap.input.body.parts = pts else: soap.input.body.parts = ptop.input.parts parts = soap.output.body.parts if len(parts): pts = [] for p in ptop.output.parts: if p.name in parts: pts.append(p) soap.output.body.parts = pts else: soap.output.body.parts = ptop.output.parts
Resolve soap body I{message} parts by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation}
entailment
def resolveheaders(self, definitions, op): """ Resolve soap header I{message} references. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation} """ soap = op.soap headers = soap.input.headers + soap.output.headers for header in headers: mn = header.message ref = qualify(mn, self.root, definitions.tns) message = definitions.messages.get(ref) if message is None: raise Exception, "message'%s', not-found" % mn pn = header.part for p in message.parts: if p.name == pn: header.part = p break if pn == header.part: raise Exception, \ "message '%s' has not part named '%s'" % (ref, pn)
Resolve soap header I{message} references. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation}
entailment
def resolvefaults(self, definitions, op): """ Resolve soap fault I{message} references by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation} """ ptop = self.type.operation(op.name) if ptop is None: raise Exception, \ "operation '%s' not defined in portType" % op.name soap = op.soap for fault in soap.faults: for f in ptop.faults: if f.name == fault.name: fault.parts = f.message.parts continue if hasattr(fault, 'parts'): continue raise Exception, \ "fault '%s' not defined in portType '%s'" % (fault.name, self.type.name)
Resolve soap fault I{message} references by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation}
entailment
def find(self, location): """ Find the specified location in the store. @param location: The I{location} part of a URL. @type location: str @return: An input stream to the document. @rtype: StringIO """ try: content = self.store[location] return StringIO(content) except: reason = 'location "%s" not in document store' % location raise Exception, reason
Find the specified location in the store. @param location: The I{location} part of a URL. @type location: str @return: An input stream to the document. @rtype: StringIO
entailment
def publictypes(self): """ get all public types """ for t in self.wsdl.schema.types.values(): if t in self.params: continue if t in self.types: continue item = (t, t) self.types.append(item) tc = lambda x,y: cmp(x[0].name, y[0].name) self.types.sort(cmp=tc)
get all public types
entailment
def encode(self, s): """ Encode special characters found in string I{s}. @param s: A string to encode. @type s: str @return: The encoded string. @rtype: str """ if isinstance(s, basestring) and self.needsEncoding(s): for x in self.encodings: s = s.replace(x[0], x[1]) return s
Encode special characters found in string I{s}. @param s: A string to encode. @type s: str @return: The encoded string. @rtype: str
entailment
def decode(self, s): """ Decode special characters encodings found in string I{s}. @param s: A string to decode. @type s: str @return: The decoded string. @rtype: str """ if isinstance(s, basestring) and '&' in s: for x in self.decodings: s = s.replace(x[0], x[1]) return s
Decode special characters encodings found in string I{s}. @param s: A string to decode. @type s: str @return: The decoded string. @rtype: str
entailment
def parse(self, file=None, string=None): """ SAX parse XML text. @param file: Parse a python I{file-like} object. @type file: I{file-like} object. @param string: Parse string XML. @type string: str """ timer = metrics.Timer() timer.start() sax, handler = self.saxparser() if file is not None: sax.parse(file) timer.stop() metrics.log.debug('sax (%s) duration: %s', file, timer) return handler.nodes[0] if string is not None: source = InputSource(None) source.setByteStream(StringIO(string)) sax.parse(source) timer.stop() metrics.log.debug('%s\nsax duration: %s', string, timer) return handler.nodes[0]
SAX parse XML text. @param file: Parse a python I{file-like} object. @type file: I{file-like} object. @param string: Parse string XML. @type string: str
entailment
def get_message(self, method, args, kwargs, options=None): """ Get the soap message for the specified method, args and soapheaders. This is the entry point for creating the outbound soap message. @param method: The method being invoked. @type method: I{service.Method} @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The soap envelope. @rtype: L{Document} """ content = self.headercontent(method, options=options) header = self.header(content) content = self.bodycontent(method, args, kwargs) body = self.body(content) env = self.envelope(header, body) if self.options().prefixes: body.normalizePrefixes() env.promotePrefixes() else: env.refitPrefixes() return Document(env)
Get the soap message for the specified method, args and soapheaders. This is the entry point for creating the outbound soap message. @param method: The method being invoked. @type method: I{service.Method} @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The soap envelope. @rtype: L{Document}
entailment
def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin}|I{subclass of} L{Object} """ timer = metrics.Timer() timer.start() result = None binding = self.method.binding.input soapenv = binding.get_message(self.method, args, kwargs, options=self.options) timer.stop() metrics.log.debug( "message for '%s' created: %s", self.method.name, timer) timer.start() result = self.send(soapenv) timer.stop() metrics.log.debug( "method '%s' invoked: %s", self.method.name, timer) return result
Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin}|I{subclass of} L{Object}
entailment
def send(self, soapenv): """ Send soap message. @param soapenv: A soap envelope to send. @type soapenv: L{Document} @return: The reply to the sent message. @rtype: I{builtin} or I{subclass of} L{Object} """ result = None location = self.location() binding = self.method.binding.input transport = self.options.transport retxml = self.options.retxml prettyxml = self.options.prettyxml log.debug('sending to (%s)\nmessage:\n%s', location, soapenv) try: self.last_sent(soapenv) plugins = PluginContainer(self.options.plugins) plugins.message.marshalled(envelope=soapenv.root()) if prettyxml: soapenv = soapenv.str() else: soapenv = soapenv.plain() soapenv = soapenv.encode('utf-8') plugins.message.sending(envelope=soapenv) request = Request(location, soapenv) request.headers = self.headers() reply = transport.send(request) ctx = plugins.message.received(reply=reply.message) reply.message = ctx.reply if retxml: result = reply.message else: timer = metrics.Timer() timer.start() result = self.succeeded(binding, reply.message) #cProfile.runctx("result = self.succeeded(binding, reply.message)", globals(), locals(), "unmarshal_prof") timer.stop() metrics.log.debug( "succeeded took: %s", timer) except TransportError, e: if e.httpcode in (202,204): result = None else: log.error(self.last_sent()) result = self.failed(binding, e) return result
Send soap message. @param soapenv: A soap envelope to send. @type soapenv: L{Document} @return: The reply to the sent message. @rtype: I{builtin} or I{subclass of} L{Object}
entailment
def headers(self): """ Get http headers or the http/https request. @return: A dictionary of header/values. @rtype: dict """ action = self.method.soap.action stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action } result = dict(stock, **self.options.headers) log.debug('headers = %s', result) return result
Get http headers or the http/https request. @return: A dictionary of header/values. @rtype: dict
entailment
def succeeded(self, binding, reply): """ Request succeeded, process the reply @param binding: The binding to be used to process the reply. @type binding: L{bindings.binding.Binding} @param reply: The raw reply text. @type reply: str @return: The method result. @rtype: I{builtin}, L{Object} @raise WebFault: On server. """ log.debug('http succeeded:\n%s', reply) plugins = PluginContainer(self.options.plugins) if len(reply) > 0: with LocalTimer() as lt: reply, result = binding.get_reply(self.method, reply) self.last_received(reply) metrics.log.debug("Calling binding.get_reply took: %.03f" % lt.interval) else: result = None ctx = plugins.message.unmarshalled(reply=result) result = ctx.reply if self.options.faults: return result else: return (200, result)
Request succeeded, process the reply @param binding: The binding to be used to process the reply. @type binding: L{bindings.binding.Binding} @param reply: The raw reply text. @type reply: str @return: The method result. @rtype: I{builtin}, L{Object} @raise WebFault: On server.
entailment
def failed(self, binding, error): """ Request failed, process reply based on reason @param binding: The binding to be used to process the reply. @type binding: L{suds.bindings.binding.Binding} @param error: The http error message @type error: L{transport.TransportError} """ status, reason = (error.httpcode, tostr(error)) reply = error.fp.read() log.debug('http failed:\n%s', reply) if status == 500: if len(reply) > 0: r, p = binding.get_fault(reply) self.last_received(r) return (status, p) else: return (status, None) if self.options.faults: raise HttpWebFault(status, reason) else: return (status, None)
Request failed, process reply based on reason @param binding: The binding to be used to process the reply. @type binding: L{suds.bindings.binding.Binding} @param error: The http error message @type error: L{transport.TransportError}
entailment
def _get_line_no_from_comments(py_line): """Return the line number parsed from the comment or 0.""" matched = LINECOL_COMMENT_RE.match(py_line) if matched: return int(matched.group(1)) else: return 0
Return the line number parsed from the comment or 0.
entailment
def _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no): """Searches before and after in the python source to find comments which denote cheetah line numbers. If a lower bound is not found, 0 is substituted. If an upper bound is not found, len(cheetah lines) is returned. The result is a lower-inclusive upper-exclusive range: [..., ...) """ # Find lower bound for line_no in range(py_line_no, 0, -1): lower_bound = _get_line_no_from_comments(py_by_line_no[line_no]) if lower_bound != 0: break else: lower_bound = 0 # Find upper bound for line_no in range(py_line_no, len(py_by_line_no)): upper_bound = _get_line_no_from_comments(py_by_line_no[line_no]) if upper_bound != 0: # Since we'll eventually be building a range(), let's make this # the non-inclusive upper-bound upper_bound += 1 break else: upper_bound = len(cheetah_by_line_no) return lower_bound, upper_bound
Searches before and after in the python source to find comments which denote cheetah line numbers. If a lower bound is not found, 0 is substituted. If an upper bound is not found, len(cheetah lines) is returned. The result is a lower-inclusive upper-exclusive range: [..., ...)
entailment
def _find_fuzzy_line( py_line_no, py_by_line_no, cheetah_by_line_no, prefer_first ): """Attempt to fuzzily find matching lines.""" stripped_line = _fuzz_py_line(py_by_line_no[py_line_no]) cheetah_lower_bound, cheetah_upper_bound = _find_bounds( py_line_no, py_by_line_no, cheetah_by_line_no, ) sliced = list(enumerate(cheetah_by_line_no))[ cheetah_lower_bound:cheetah_upper_bound ] if not prefer_first: sliced = reversed(sliced) for line_no, line in sliced: if stripped_line in _fuzz_cheetah_line(line): return line_no else: # We've failed to find a matching line return 0
Attempt to fuzzily find matching lines.
entailment
def perform_step(file_contents, step): """Performs a step of the transformation. :param text file_contents: Contends of the cheetah template :param function step: Function taking xmldoc and returning new contents :returns: new contents of the file. """ assert type(file_contents) is not bytes xmldoc = parse(file_contents) return step(xmldoc)
Performs a step of the transformation. :param text file_contents: Contends of the cheetah template :param function step: Function taking xmldoc and returning new contents :returns: new contents of the file.
entailment
def _get_sender(*sender_params, **kwargs): """ Utility function acting as a Sender factory - ensures senders don't get created twice of more for the same target server """ notify_func = kwargs['notify_func'] with _sender_instances_lock: existing_sender = _sender_instances.get(sender_params, None) if existing_sender: sender = existing_sender sender._notify = notify_func else: sender = _Sender(*sender_params, notify=notify_func) _sender_instances[sender_params] = sender return sender
Utility function acting as a Sender factory - ensures senders don't get created twice of more for the same target server
entailment
def terminate(): """ Stops all the active Senders by flushing the buffers and closing the underlying sockets """ with _sender_instances_lock: for sender_key, sender in _sender_instances.items(): sender.close() _sender_instances.clear()
Stops all the active Senders by flushing the buffers and closing the underlying sockets
entailment
def _format_event(self, orig_event, external_metadata=None): """ Format the event to the expected Alooma format, packing it into a message field and adding metadata :param orig_event: The original event that was sent, should be dict, str or unicode. :param external_metadata: (Optional) a dict containing metadata to add to the event :return: a dict with the original event in a 'message' field and all the supplied metadata """ event_wrapper = {} # Add ISO6801 timestamp and frame info timestamp = datetime.datetime.utcnow().isoformat() event_wrapper[consts.WRAPPER_REPORT_TIME] = timestamp # Add the enclosing frame frame = inspect.currentframe().f_back.f_back filename = frame.f_code.co_filename line_number = frame.f_lineno event_wrapper[consts.WRAPPER_CALLING_FILE] = str(filename) event_wrapper[consts.WRAPPER_CALLING_LINE] = str(line_number) # Add the UUID to the event event_wrapper[consts.WRAPPER_UUID] = str(uuid.uuid4()) # Try to set event type. If it throws, put the input label try: event_wrapper[consts.WRAPPER_EVENT_TYPE] = \ self._get_event_type(orig_event) except Exception: pass # The event type will be the input name, added by Alooma # Optionally add external metadata if external_metadata and isinstance(external_metadata, dict): event_wrapper.update(external_metadata) # Wrap the event with metadata event_wrapper[consts.WRAPPER_MESSAGE] = orig_event return json_dumps(event_wrapper)
Format the event to the expected Alooma format, packing it into a message field and adding metadata :param orig_event: The original event that was sent, should be dict, str or unicode. :param external_metadata: (Optional) a dict containing metadata to add to the event :return: a dict with the original event in a 'message' field and all the supplied metadata
entailment
def report(self, event, metadata=None, block=None): """ Reports an event to Alooma by formatting it properly and placing it in the buffer to be sent by the Sender instance :param event: A dict / string representing an event :param metadata: (Optional) A dict with extra metadata to be attached to the event :param block: (Optional) If True, the function will block the thread until the event buffer has space for the event. If False, reported events are discarded if the queue is full. Defaults to None, which uses the global `block` parameter given in the `init`. :return: True if the event was successfully enqueued, else False """ # Don't allow reporting if the underlying sender is terminated if self._sender.is_terminated: self._notify(logging.ERROR, consts.LOG_MSG_REPORT_AFTER_TERMINATION) return False # Send the event to the queue if it is a dict or a string. if isinstance(event, (dict,) + py2to3.basestring): formatted_event = self._format_event(event, metadata) should_block = block if block is not None else self.is_blocking return self._sender.enqueue_event(formatted_event, should_block) else: # Event is not a dict nor a string. Deny it. error_message = (consts.LOG_MSG_BAD_EVENT % (type(event), event)) self._notify(logging.ERROR, error_message) return False
Reports an event to Alooma by formatting it properly and placing it in the buffer to be sent by the Sender instance :param event: A dict / string representing an event :param metadata: (Optional) A dict with extra metadata to be attached to the event :param block: (Optional) If True, the function will block the thread until the event buffer has space for the event. If False, reported events are discarded if the queue is full. Defaults to None, which uses the global `block` parameter given in the `init`. :return: True if the event was successfully enqueued, else False
entailment
def report_many(self, event_list, metadata=None, block=None): """ Reports all the given events to Alooma by formatting them properly and placing them in the buffer to be sent by the Sender instance :param event_list: A list of dicts / strings representing events :param metadata: (Optional) A dict with extra metadata to be attached to the event :param block: (Optional) If True, the function will block the thread until the event buffer has space for the event. If False, reported events are discarded if the queue is full. Defaults to None, which uses the global `block` parameter given in the `init`. :return: A list with tuples, each containing a failed event and its original index. An empty list means success """ failed_list = [] for index, event in enumerate(event_list): queued_successfully = self.report(event, metadata, block) if not queued_successfully: failed_list.append((index, event)) return failed_list
Reports all the given events to Alooma by formatting them properly and placing them in the buffer to be sent by the Sender instance :param event_list: A list of dicts / strings representing events :param metadata: (Optional) A dict with extra metadata to be attached to the event :param block: (Optional) If True, the function will block the thread until the event buffer has space for the event. If False, reported events are discarded if the queue is full. Defaults to None, which uses the global `block` parameter given in the `init`. :return: A list with tuples, each containing a failed event and its original index. An empty list means success
entailment
def _notify(self, log_level, message): """ Calls the callback function and logs messages using the PySDK logger :param log_level: An integer representing the log level, as specified in the Python `logging` library :param message: The actual message to be sent to the logger and the `callback` function """ timestamp = datetime.datetime.utcnow() logger.log(log_level, str(message)) try: self._callback(log_level, message, timestamp) except Exception as ex: logger.warning(consts.LOG_MSG_CALLBACK_FAILURE % str(ex))
Calls the callback function and logs messages using the PySDK logger :param log_level: An integer representing the log level, as specified in the Python `logging` library :param message: The actual message to be sent to the logger and the `callback` function
entailment
def _choose_host(self): """ This method randomly chooses a server from the server list given as a parameter to the parent PythonSDK :return: The selected host to which the Sender will attempt to connect """ # If a host hasn't been chosen yet or there is only one host if len(self._hosts) == 1 or self._http_host is None: self._http_host = self._hosts[0] else: # There is a list of hosts to choose from, pick a random one choice = self._http_host while choice == self._http_host: choice = random.choice(self._hosts) self._http_host = choice self._notify(logging.INFO, consts.LOG_MSG_NEW_SERVER % self._http_host) # Set the validation and the REST URLs secure = 's' if self._use_ssl else '' self._connection_validation_url = \ consts.CONN_VALIDATION_URL_TEMPLATE.format(host=self._http_host, secure=secure) self._rest_url = consts.REST_URL_TEMPLATE.format(host=self._http_host, token=self._token, secure=secure) self._token_verification_url = \ consts.TOKEN_VERIFICATION_URL_TEMPLATE.format(host=self._http_host, token=self._token, secure=secure)
This method randomly chooses a server from the server list given as a parameter to the parent PythonSDK :return: The selected host to which the Sender will attempt to connect
entailment
def _verify_connection(self): """ Checks availability of the Alooma server :return: If the server is reachable, returns True :raises: If connection fails, raises exceptions.ConnectionFailed """ try: res = self._session.get(self._connection_validation_url, json={}) logger.debug(consts.LOG_MSG_VERIFYING_CONNECTION, self._connection_validation_url, res if res else 'No result from backend') if not res.ok: raise requests.exceptions.RequestException(res.content) remote_batch_size = res.json().get(consts.MAX_REQUEST_SIZE_FIELD, consts.DEFAULT_BATCH_SIZE) if remote_batch_size < self._batch_max_size: self._batch_max_size = remote_batch_size self._notify(logging.INFO, consts.LOG_MSG_NEW_BATCH_SIZE % remote_batch_size) self._is_connected.set() return True except requests.exceptions.RequestException as ex: msg = consts.LOG_MSG_CONNECTION_FAILED % str(ex) self._notify(logging.ERROR, msg) raise exceptions.ConnectionFailed(msg)
Checks availability of the Alooma server :return: If the server is reachable, returns True :raises: If connection fails, raises exceptions.ConnectionFailed
entailment
def _verify_token(self): """ Verifies the validity of the token against the remote server :return: True if the token is valid, else raises exceptions.BadToken """ res = self._session.get(self._token_verification_url) if not res.ok: raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN) return True
Verifies the validity of the token against the remote server :return: True if the token is valid, else raises exceptions.BadToken
entailment
def _send_batch(self, batch): """ Sends a batch to the destination server via HTTP REST API """ try: json_batch = '[' + ','.join(batch) + ']' # Make JSON array string logger.debug(consts.LOG_MSG_SENDING_BATCH, len(batch), len(json_batch), self._rest_url) res = self._session.post(self._rest_url, data=json_batch, headers=consts.CONTENT_TYPE_JSON) logger.debug(consts.LOG_MSG_BATCH_SENT_RESULT, res.status_code, res.content) if res.status_code == 400: self._notify(logging.CRITICAL, consts.LOG_MSG_BAD_TOKEN) raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN) elif not res.ok: raise exceptions.SendFailed("Got bad response code - %s: %s" % ( res.status_code, res.content if res.content else 'No info')) except broken_pipe_errors as ex: self._is_connected.clear() raise exceptions.BatchTooBig(consts.LOG_MSG_BATCH_TOO_BIG % str(ex)) except requests.exceptions.RequestException as ex: raise exceptions.SendFailed(str(ex))
Sends a batch to the destination server via HTTP REST API
entailment
def _sender_main(self): """ Runs on a pysdk_sender_thread and handles sending events to the Alooma server. Events are sent every <self._batch_interval> seconds or whenever batch size reaches <self._batch_size> """ if not self._http_host: self._choose_host() last_batch_time = datetime.datetime.utcnow() while not (self._is_terminated.is_set() and self._event_queue.empty()): batch = None try: if not self._is_connected.is_set(): self._verify_connection() batch = self._get_batch(last_batch_time) self._send_batch(batch) except exceptions.ConnectionFailed: # Failed to connect to server time.sleep(consts.NO_CONNECTION_SLEEP_TIME) self._is_connected.clear() except exceptions.EmptyBatch: # No events in queue, go to sleep time.sleep(consts.EMPTY_BATCH_SLEEP_TIME) except exceptions.SendFailed as ex: # Failed to send an event batch self._notify(ex.severity, str(ex)) self._is_connected.clear() if batch: # Failed after pulling a batch from the queue self._enqueue_batch(batch) logger.debug(consts.LOG_MSG_ENQUEUED_FAILED_BATCH, len(batch)) else: # We sent a batch successfully, server is reachable self._is_connected.set() finally: # Advance last batch time last_batch_time = datetime.datetime.utcnow()
Runs on a pysdk_sender_thread and handles sending events to the Alooma server. Events are sent every <self._batch_interval> seconds or whenever batch size reaches <self._batch_size>
entailment
def enqueue_event(self, event, block): """ Enqueues an event in the buffer to be sent to the Alooma server :param event: A dict representing a formatted event to be sent by the sender :param block: Whether or not we should block if the event buffer is full :return: True if the event was enqueued successfully, else False """ try: self._event_queue.put_nowait(event) if self._notified_buffer_full: # Non-blocking and buffer was full self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FREED) self._notified_buffer_full = False except py2to3.queue.Full: if block: # Blocking - should block until space is freed self._event_queue.put(event) elif not self._notified_buffer_full: # Don't block, msg not emitted self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FULL) self._notified_buffer_full = True return False return True
Enqueues an event in the buffer to be sent to the Alooma server :param event: A dict representing a formatted event to be sent by the sender :param block: Whether or not we should block if the event buffer is full :return: True if the event was enqueued successfully, else False
entailment
def __get_event(self, block=True, timeout=1): """ Retrieves an event. If self._exceeding_event is not None, it'll be returned. Otherwise, an event is dequeued from the event buffer. If The event which was retrieved is bigger than the permitted batch size, it'll be omitted, and the next event in the event buffer is returned """ while True: if self._exceeding_event: # An event was omitted from last batch event = self._exceeding_event self._exceeding_event = None else: # No omitted event, get an event from the queue event = self._event_queue.get(block, timeout) event_size = len(event) # If the event is bigger than the permitted batch size, ignore it # The ( - 2 ) accounts for the parentheses enclosing the batch if event_size - 2 >= self._batch_max_size: self._notify(logging.WARNING, consts.LOG_MSG_OMITTED_OVERSIZED_EVENT % event_size) else: # Event is of valid size, return it return event
Retrieves an event. If self._exceeding_event is not None, it'll be returned. Otherwise, an event is dequeued from the event buffer. If The event which was retrieved is bigger than the permitted batch size, it'll be omitted, and the next event in the event buffer is returned
entailment
def get_links(self, request=None): """ Return a dictionary containing all the links that should be included in the API schema. """ links = LinkNode() # Generate (path, method, view) given (path, method, callback). paths = [] view_endpoints = [] for path, method, callback in self.endpoints: view = self.create_view(callback, method, request) if getattr(view, 'exclude_from_schema', False): continue path = self.coerce_path(path, method, view) paths.append(path) view_endpoints.append((path, method, view)) # Only generate the path prefix for paths that will be included if not paths: return None prefix = self.determine_path_prefix(paths) for path, method, view in view_endpoints: if not self.has_view_permissions(path, method, view): continue link = self.get_link(path, method, view, version=getattr(request, 'version', None)) subpath = path[len(prefix):] keys = self.get_keys(subpath, method, view) try: insert_into(links, keys, link) except Exception: continue return links
Return a dictionary containing all the links that should be included in the API schema.
entailment
def get_path_fields(self, path, method, view): """ Return a list of `coreapi.Field` instances corresponding to any templated path variables. """ model = getattr(getattr(view, 'queryset', None), 'model', None) fields = [] for variable in uritemplate.variables(path): if variable == 'version': continue title = '' description = '' schema_cls = coreschema.String kwargs = {} if model is not None: # Attempt to infer a field description if possible. try: model_field = model._meta.get_field(variable) except: model_field = None if model_field is not None and model_field.verbose_name: title = force_text(model_field.verbose_name) if model_field is not None and model_field.help_text: description = force_text(model_field.help_text) elif model_field is not None and model_field.primary_key: description = get_pk_description(model, model_field) if hasattr(view, 'lookup_value_regex') and view.lookup_field == variable: kwargs['pattern'] = view.lookup_value_regex elif isinstance(model_field, models.AutoField): schema_cls = coreschema.Integer field = Field( name=variable, location='path', required=True, schema=schema_cls(title=title, description=description, **kwargs) ) fields.append(field) return fields
Return a list of `coreapi.Field` instances corresponding to any templated path variables.
entailment
def get_serializer_class(self, view, method_func): """ Try to get the serializer class from view method. If view method don't have request serializer, fallback to serializer_class on view class """ if hasattr(method_func, 'request_serializer'): return getattr(method_func, 'request_serializer') if hasattr(view, 'serializer_class'): return getattr(view, 'serializer_class') if hasattr(view, 'get_serializer_class'): return getattr(view, 'get_serializer_class')() return None
Try to get the serializer class from view method. If view method don't have request serializer, fallback to serializer_class on view class
entailment
def fallback_schema_from_field(self, field): """ Fallback schema for field that isn't inspected properly by DRF and probably won't land in upstream canon due to its hacky nature only for doc purposes """ title = force_text(field.label) if field.label else '' description = force_text(field.help_text) if field.help_text else '' # since we can't really inspect dictfield and jsonfield, at least display object as type # instead of string if isinstance(field, (serializers.DictField, serializers.JSONField)): return coreschema.Object( properties={}, title=title, description=description )
Fallback schema for field that isn't inspected properly by DRF and probably won't land in upstream canon due to its hacky nature only for doc purposes
entailment
def get_serializer_fields(self, path, method, view, version=None, method_func=None): """ Return a list of `coreapi.Field` instances corresponding to any request body input, as determined by the serializer class. """ if method in ('PUT', 'PATCH', 'POST'): location = 'form' else: location = 'query' serializer_class = self.get_serializer_class(view, method_func) if not serializer_class: return [] serializer = serializer_class() if isinstance(serializer, serializers.ListSerializer): return [ Field( name='data', location=location, required=True, schema=coreschema.Array() ) ] if not isinstance(serializer, serializers.Serializer): return [] fields = [] for field in serializer.fields.values(): if field.read_only or isinstance(field, serializers.HiddenField): continue required = field.required and method != 'PATCH' # if the attribute ('help_text') of this field is a lazy translation object, force it to generate a string description = str(field.help_text) if isinstance(field.help_text, Promise) else field.help_text fallback_schema = self.fallback_schema_from_field(field) field = Field( name=field.field_name, location=location, required=required, schema=fallback_schema if fallback_schema else field_to_schema(field), description=description, ) fields.append(field) return fields
Return a list of `coreapi.Field` instances corresponding to any request body input, as determined by the serializer class.
entailment
def update(self, instance, validated_data): """ Update and return an existing `Snippet` instance, given the validated data. """ instance.title = validated_data.get('title', instance.title) instance.code = validated_data.get('code', instance.code) instance.linenos = validated_data.get('linenos', instance.linenos) instance.language = validated_data.get('language', instance.language) instance.style = validated_data.get('style', instance.style) instance.save() return instance
Update and return an existing `Snippet` instance, given the validated data.
entailment
def _generate_openapi_object(document): """ Generates root of the Swagger spec. """ parsed_url = urlparse.urlparse(document.url) swagger = OrderedDict() swagger['swagger'] = '2.0' swagger['info'] = OrderedDict() swagger['info']['title'] = document.title swagger['info']['description'] = document.description swagger['info']['version'] = document.version if parsed_url.netloc: swagger['host'] = parsed_url.netloc if parsed_url.scheme: swagger['schemes'] = [parsed_url.scheme] swagger['paths'] = _get_paths_object(document) return swagger
Generates root of the Swagger spec.
entailment
def _get_responses(link): """ Returns an OpenApi-compliant response """ template = link.response_schema template.update({'description': 'Success'}) res = {200: template} res.update(link.error_status_codes) return res
Returns an OpenApi-compliant response
entailment
def _get_parameters(link, encoding): """ Generates Swagger Parameter Item object. """ parameters = [] properties = {} required = [] for field in link.fields: parser = OpenApiFieldParser(link, field) if parser.location == 'form': if encoding in ('multipart/form-data', 'application/x-www-form-urlencoded'): # 'formData' in swagger MUST be one of these media types. parameters.append(parser.as_parameter()) else: # Expand coreapi fields with location='form' into a single swagger # parameter, with a schema containing multiple properties. properties[field.name] = parser.as_schema_property() if field.required: required.append(field.name) elif parser.location == 'body': parameters.append(parser.as_body_parameter(encoding)) else: parameters.append(parser.as_parameter()) if properties: parameter = { 'name': 'data', 'in': 'body', 'schema': { 'type': 'object', 'properties': properties } } if required: parameter['schema']['required'] = required parameters.append(parameter) return parameters
Generates Swagger Parameter Item object.
entailment
def auth_uri(self, redirect_uri=None, scope=None, scope_delim=None, state=None, **kwargs): """ Builds the auth URI for the authorization endpoint :param scope: (optional) The `scope` parameter to pass for authorization. The format should match that expected by the provider (i.e. Facebook expects comma-delimited, while Google expects space-delimited) :param state: (optional) The `state` parameter to pass for authorization. If the provider follows the OAuth 2.0 spec, this will be returned to your `redirect_uri` after authorization. Generally used for CSRF protection. :param **kwargs: Any other querystring parameters to be passed to the provider. """ kwargs.update({ 'client_id': self.client_id, 'response_type': 'code', }) if scope is not None: kwargs['scope'] = scope if state is not None: kwargs['state'] = state if redirect_uri is not None: kwargs['redirect_uri'] = redirect_uri return '%s?%s' % (self.auth_endpoint, urlencode(kwargs))
Builds the auth URI for the authorization endpoint :param scope: (optional) The `scope` parameter to pass for authorization. The format should match that expected by the provider (i.e. Facebook expects comma-delimited, while Google expects space-delimited) :param state: (optional) The `state` parameter to pass for authorization. If the provider follows the OAuth 2.0 spec, this will be returned to your `redirect_uri` after authorization. Generally used for CSRF protection. :param **kwargs: Any other querystring parameters to be passed to the provider.
entailment
def request_token(self, parser=None, redirect_uri=None, **kwargs): """ Request an access token from the token endpoint. This is largely a helper method and expects the client code to understand what the server expects. Anything that's passed into ``**kwargs`` will be sent (``urlencode``d) to the endpoint. Client secret and client ID are automatically included, so are not required as kwargs. For example:: # if requesting access token from auth flow: { 'code': rval_from_auth, } # if refreshing access token: { 'refresh_token': stored_refresh_token, 'grant_type': 'refresh_token', } :param parser: Callback to deal with returned data. Not all providers use JSON. """ kwargs = kwargs and kwargs or {} parser = parser or _default_parser kwargs.update({ 'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'grant_type' in kwargs and kwargs['grant_type'] or \ 'authorization_code' }) if redirect_uri is not None: kwargs.update({'redirect_uri': redirect_uri}) # TODO: maybe raise an exception here if status code isn't 200? msg = urlopen(self.token_endpoint, urlencode(kwargs).encode( 'utf-8')) data = parser(msg.read().decode(msg.info().get_content_charset() or 'utf-8')) for key in data: setattr(self, key, data[key]) # expires_in is RFC-compliant. if anything else is used by the # provider, token_expires must be set manually if hasattr(self, 'expires_in'): try: # python3 dosn't support long seconds = long(self.expires_in) except: seconds = int(self.expires_in) self.token_expires = mktime((datetime.utcnow() + timedelta( seconds=seconds)).timetuple())
Request an access token from the token endpoint. This is largely a helper method and expects the client code to understand what the server expects. Anything that's passed into ``**kwargs`` will be sent (``urlencode``d) to the endpoint. Client secret and client ID are automatically included, so are not required as kwargs. For example:: # if requesting access token from auth flow: { 'code': rval_from_auth, } # if refreshing access token: { 'refresh_token': stored_refresh_token, 'grant_type': 'refresh_token', } :param parser: Callback to deal with returned data. Not all providers use JSON.
entailment
def request(self, url, method=None, data=None, headers=None, parser=None): """ Request user data from the resource endpoint :param url: The path to the resource and querystring if required :param method: HTTP method. Defaults to ``GET`` unless data is not None in which case it defaults to ``POST`` :param data: Data to be POSTed to the resource endpoint :param parser: Parser callback to deal with the returned data. Defaults to ``json.loads`.` """ assert self.access_token is not None parser = parser or loads if not method: method = 'GET' if not data else 'POST' req = self.token_transport('{0}{1}'.format(self.resource_endpoint, url), self.access_token, data=data, method=method, headers=headers) resp = urlopen(req) data = resp.read() try: return parser(data.decode(resp.info().get_content_charset() or 'utf-8')) # try to decode it first using either the content charset, falling # back to utf-8 except UnicodeDecodeError: # if we've gotten a decoder error, the calling code better know how # to deal with it. some providers (i.e. stackexchange) like to gzip # their responses, so this allows the client code to handle it # directly. return parser(data)
Request user data from the resource endpoint :param url: The path to the resource and querystring if required :param method: HTTP method. Defaults to ``GET`` unless data is not None in which case it defaults to ``POST`` :param data: Data to be POSTed to the resource endpoint :param parser: Parser callback to deal with the returned data. Defaults to ``json.loads`.`
entailment
def build_srcdict(gta, prop): """Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property """ o = {} for s in gta.roi.sources: o[s.name] = s[prop] return o
Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property
entailment
def get_src_names(gta): """Build and return a list of source name Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object Returns ------- l : list Names of the source """ o = [] for s in gta.roi.sources: o += [s.name] return sorted(o)
Build and return a list of source name Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object Returns ------- l : list Names of the source
entailment
def set_wts_get_npred_wt(gta, maskname): """Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred """ if is_null(maskname): maskname = None gta.set_weights_map(maskname) for name in gta.like.sourceNames(): gta._init_source(name) gta._update_roi() return build_srcdict(gta, 'npred_wt')
Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred
entailment
def snapshot(gta, plotter, key, do_weighted=True, make_plots=True): """Take a snapshot of the ROI Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object plotter : `fermipy.plotting.AnalysisPlotter` The object that makes the plots key : str Key for this snapshot, used to create filenames do_weighted : bool If True, include weighted version of outputs make_plots : bool If True, make plots """ gta.write_roi(key, save_model_map=True, make_plots=make_plots, save_weight_map=do_weighted) if make_plots: o = gta.residmap(key) plotter.make_residmap_plots(o, gta.roi) if do_weighted: gta.make_plots("%s_wt"%key, weighted=True) o = gta.residmap("%s_wt"%key, use_weights=True) plotter.make_residmap_plots(o, gta.roi)
Take a snapshot of the ROI Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object plotter : `fermipy.plotting.AnalysisPlotter` The object that makes the plots key : str Key for this snapshot, used to create filenames do_weighted : bool If True, include weighted version of outputs make_plots : bool If True, make plots
entailment
def get_unchanged(src_list, npred_dict_new, npred_dict_old, npred_threshold=1e4, frac_threshold=0.9): """Compare two dictionarys of npreds, and get the list of sources than have changed less that set thresholds Parameters ---------- src_list : list List of sources to examine npred_dict_new : dict Dictionary mapping source name to npred for the current weights file npred_dict_old : dict Dictionary mapping source name to npred for the previous weights file npred_threshold : float Minimum value of npred above which to consider sources changed frac_threshold : float Value of npred_old / npred_new above which to consider sources unchanged Returns ------- l : list Names of 'unchanged' sources """ o = [] for s in src_list: npred_new = npred_dict_new[s] if npred_new < npred_threshold: o += [s] continue if npred_dict_old is None: npred_old = 0. else: npred_old = npred_dict_old[s] frac = npred_old / npred_new if frac > frac_threshold: o += [s] return o
Compare two dictionarys of npreds, and get the list of sources than have changed less that set thresholds Parameters ---------- src_list : list List of sources to examine npred_dict_new : dict Dictionary mapping source name to npred for the current weights file npred_dict_old : dict Dictionary mapping source name to npred for the previous weights file npred_threshold : float Minimum value of npred above which to consider sources changed frac_threshold : float Value of npred_old / npred_new above which to consider sources unchanged Returns ------- l : list Names of 'unchanged' sources
entailment
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if args.load_baseline: gta = GTAnalysis.create(args.roi_baseline, args.config) else: gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) gta.setup() if is_not_null(args.input_pars): gta.load_parameters_from_yaml(args.input_pars) gta.write_roi(args.roi_baseline, save_model_map=True, save_weight_map=True, make_plots=args.make_plots) src_list = get_src_names(gta) plotter = plotting.AnalysisPlotter(gta.config['plotting'], fileio=gta.config['fileio'], logging=gta.config['logging']) if is_null(args.fit_strategy): return fit_strategy = load_yaml(args.fit_strategy) npred_current = None npred_prev = None plots_only = False for fit_stage in fit_strategy: mask = fit_stage.get('mask', None) npred_threshold = fit_stage.get('npred_threshold', 1.0e4) frac_threshold = fit_stage.get('frac_threshold', 0.5) npred_frac = fit_stage.get('npred_frac', 0.9999) if plots_only: gta.load_roi("%s.npy" % fit_stage['key']) npred_current = set_wts_get_npred_wt(gta, mask) skip_list_region = get_unchanged(src_list, npred_current, npred_prev, frac_threshold=frac_threshold) else: npred_current = set_wts_get_npred_wt(gta, mask) skip_list_region = get_unchanged(src_list, npred_current, npred_prev, frac_threshold=frac_threshold) gta.optimize(npred_frac=npred_frac, npred_threshold=npred_threshold, skip=skip_list_region) snapshot(gta, plotter, fit_stage['key'], make_plots=args.make_plots) npred_prev = npred_current npred_current = build_srcdict(gta, 'npred_wt')
Run this analysis
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} # Tweak the batch job args try: self._interface._lsf_args.update(dict(n=2)) self._interface._lsf_args.update(dict(R='\"select[rhel60&&!fell] -R span[hosts=1]\"')) except AttributeError: pass models = load_yaml(args['models']) base_config = dict(fit_strategy=args['fit_strategy'], input_pars=args['input_pars'], load_baseline=args['load_baseline'], make_plots=args['make_plots']) for modelkey in models: config_file = os.path.join('analysis', 'model_%s' % modelkey, args['config']) #roi_baseline = os.path.join('analysis', 'model_%s' % modelkey, # args['roi_baseline']) roi_baseline = args['roi_baseline'] logfile = os.path.join('analysis', 'model_%s' % modelkey, 'fit_%s.log' % modelkey) job_config = base_config.copy() job_config.update(dict(config=config_file, roi_baseline=roi_baseline, logfile=logfile)) job_configs[modelkey] = job_config return job_configs
Hook to build job configurations
entailment
def main(): """ Main function for command line usage """ usage = "usage: %(prog)s [options] " description = "Merge a set of Fermi-LAT files." parser = argparse.ArgumentParser(usage=usage, description=description) parser.add_argument('-o', '--output', default=None, type=str, help='Output file.') parser.add_argument('--clobber', default=False, action='store_true', help='Overwrite output file.') parser.add_argument('files', nargs='+', default=None, help='List of input files.') args = parser.parse_args() proj, f, hdu = fits_utils.read_projection_from_fits(args.files[0]) if isinstance(proj, WCS): hdulist = merge_utils.merge_wcs_counts_cubes(args.files) elif isinstance(proj, HPX): hdulist = merge_utils.merge_hpx_counts_cubes(args.files) else: raise TypeError("Could not read projection from file %s" % args.files[0]) if args.output: hdulist.writeto(args.output, clobber=args.clobber, output_verify='silentfix')
Main function for command line usage
entailment
def get_native_default_args(): """ Get the correct set of batch jobs arguments. """ native_default_args = dict(max_jobs=500, time_per_cycle=15, jobs_per_cycle=20, max_job_age=90, no_batch=False) return native_default_args.copy()
Get the correct set of batch jobs arguments.
entailment
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout): """Send a single job to be executed Parameters ---------- link : `fermipy.jobs.chain.Link` The link used to invoke the command we are running key : str A string that identifies this particular instance of the job job_config : dict A dictionrary with the arguments for the job. Used with the self._command_template job template logfile : str The logfile for this job, may be used to check for success/ failure """ full_sub_dict = job_config.copy() full_command = "%s >& %s" % ( link.command_template().format(**full_sub_dict), logfile) logdir = os.path.dirname(logfile) if self._dry_run: sys.stdout.write("%s\n" % full_command) else: try: os.makedirs(logdir) except OSError: pass os.system(full_command)
Send a single job to be executed Parameters ---------- link : `fermipy.jobs.chain.Link` The link used to invoke the command we are running key : str A string that identifies this particular instance of the job job_config : dict A dictionrary with the arguments for the job. Used with the self._command_template job template logfile : str The logfile for this job, may be used to check for success/ failure
entailment
def log_level(level): """This is a function that returns a python like level from a HEASOFT like level. """ levels_dict = {0: 50, 1: 40, 2: 30, 3: 20, 4: 10} if not isinstance(level, int): level = int(level) if level > 4: level = 4 return levels_dict[level]
This is a function that returns a python like level from a HEASOFT like level.
entailment
def setup(config=None, logfile=None): """This method sets up the default configuration of the logger. Once this method is called all subsequent instances Logger instances will inherit this configuration.""" if config is None: configpath = os.path.join(fermipy.PACKAGE_ROOT, 'config', 'logging.yaml') with open(configpath, 'r') as f: config = yaml.load(f) # Update configuration if logfile: for name, h in config['handlers'].items(): if 'file_handler' in name: config['handlers'][name]['filename'] = logfile logging.config.dictConfig(config)
This method sets up the default configuration of the logger. Once this method is called all subsequent instances Logger instances will inherit this configuration.
entailment
def configure(name, logfile, loglevel=logging.DEBUG): """Create a python logger instance and configure it. Parameters ---------- name : str Logger name. logfile : str Path to the log file. loglevel : int Default log level for STDOUT. """ # logging.config.dictConfig({ # 'version': 1, # 'disable_existing_loggers': False}) logger = logging.getLogger(name) # Don't propagate to root logger logger.propagate = False logger.setLevel(logging.DEBUG) datefmt = '%Y-%m-%d %H:%M:%S' format_stream = ('%(asctime)s %(levelname)-8s' '%(name)s.%(funcName)s(): %(message)s') format_file = ('%(asctime)s %(levelname)-8s' '%(name)s.%(funcName)s(): %(message)s') # format_file = ('%(asctime)s %(levelname)-8s ' # '%(name)s.%(funcName)s() ' # '[%(filename)s:%(lineno)d]: %(message)s') if not logger.handlers: # Add a file handler if logfile is not None: logfile = logfile.replace('.log', '') + '.log' fh = logging.FileHandler(logfile) fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter(format_file, datefmt)) logger.addHandler(fh) # Add a stream handler ch = logging.StreamHandler() ch.setLevel(loglevel) ch.setFormatter(logging.Formatter(format_stream, datefmt)) logger.addHandler(ch) else: logger.handlers[-1].setLevel(loglevel) return logger
Create a python logger instance and configure it. Parameters ---------- name : str Logger name. logfile : str Path to the log file. loglevel : int Default log level for STDOUT.
entailment
def extract_arguments(args, defaults): """Extract a set of arguments from a large dictionary Parameters ---------- args : dict Dictionary with the arguments values to use defaults : dict Dictionary with all the argument to extract, and default values for each Returns ------- out_dict : dict A dictionary with only the extracted arguments """ out_dict = convert_option_dict_to_dict(defaults) for key in defaults.keys(): mapped_val = args.get(key, None) if mapped_val is None: pass else: out_dict[key] = mapped_val return out_dict
Extract a set of arguments from a large dictionary Parameters ---------- args : dict Dictionary with the arguments values to use defaults : dict Dictionary with all the argument to extract, and default values for each Returns ------- out_dict : dict A dictionary with only the extracted arguments
entailment
def check_files(filelist, file_stage_manager=None, return_found=True, return_missing=True): """Check that all files in a list exist Parameters ---------- filelist : list The list of files we are checking for. file_stage_manager : `fermipy.jobs.file_archive.FileStageManager` A object that maps files to scratch space if needed. return_found : list A list with the paths of the files that were found. return_missing : list A list with the paths of the files that were missing. Returns ------- found : list List of the found files, if requested, otherwise `None` missing : list List of the missing files, if requested, otherwise `None` """ found = [] missing = [] none_count = 0 for fname in filelist: if fname is None: none_count += 1 continue if fname[0] == '@': fname = fname[1:] if os.path.exists(fname): found.append(fname) continue if os.path.exists(fname + '.gz'): found.append(fname) continue if file_stage_manager is not None: fname = file_stage_manager.get_scratch_path(fname) if os.path.exists(fname): found.append(fname) continue missing.append(fname) if return_found and return_missing: return found, missing elif return_found: return found elif return_missing: return missing return None
Check that all files in a list exist Parameters ---------- filelist : list The list of files we are checking for. file_stage_manager : `fermipy.jobs.file_archive.FileStageManager` A object that maps files to scratch space if needed. return_found : list A list with the paths of the files that were found. return_missing : list A list with the paths of the files that were missing. Returns ------- found : list List of the found files, if requested, otherwise `None` missing : list List of the missing files, if requested, otherwise `None`
entailment
def add_argument(parser, dest, info): """ Add an argument to an `argparse.ArgumentParser` object Parameters ---------- parser : `argparse.ArgumentParser` The parser in question dest : str The destination for the argument info : `tuple` The information associated with the argument in question. """ default, helpstr, typeinfo = info if dest == 'args': parser.add_argument('args', nargs='+', default=None, help=helpstr) elif typeinfo == list: parser.add_argument('--%s' % dest, action='append', help=helpstr) elif typeinfo == bool: parser.add_argument('--%s' % dest, action='store_true', help=helpstr) else: parser.add_argument('--%s' % dest, action='store', type=typeinfo, default=default, help=helpstr)
Add an argument to an `argparse.ArgumentParser` object Parameters ---------- parser : `argparse.ArgumentParser` The parser in question dest : str The destination for the argument info : `tuple` The information associated with the argument in question.
entailment
def convert_dict_to_option_dict(input_dict): """Convert a simple key-value dictionary to a dictionary of options tuples""" ret_dict = {} for key, value in input_dict.items(): ret_dict[key] = convert_value_to_option_tuple(value) return ret_dict
Convert a simple key-value dictionary to a dictionary of options tuples
entailment
def convert_option_dict_to_dict(option_dict): """Convert a dictionary of options tuples to a simple key-value dictionary""" ret_dict = {} for key, value in option_dict.items(): if is_null(value): ret_dict[key] = None elif isinstance(value, tuple): ret_dict[key] = value[0] else: ret_dict[key] = value return ret_dict
Convert a dictionary of options tuples to a simple key-value dictionary
entailment
def reduce_by_keys(orig_dict, keys, default=None): """Reduce a dictionary by selecting a set of keys """ ret = {} for key in keys: ret[key] = orig_dict.get(key, default) return ret
Reduce a dictionary by selecting a set of keys
entailment
def construct_docstring(options): """Construct a docstring for a set of options""" s = "\nParameters\n" s += "----------\n\n" for key, opt in options.items(): s += "%s : %s\n %s [%s]\n" % (key, str(opt[2]), str(opt[1]), str(opt[0])) return s
Construct a docstring for a set of options
entailment
def register_class(cls): """Regsiter this class in the `LinkFactory` """ if cls.appname in LinkFactory._class_dict: return LinkFactory.register(cls.appname, cls)
Regsiter this class in the `LinkFactory`
entailment
def _fill_argparser(self, parser): """Fill an `argparser.ArgumentParser` with the options from this chain """ for key, val in self._options.items(): add_argument(parser, key, val)
Fill an `argparser.ArgumentParser` with the options from this chain
entailment
def _run_argparser(self, argv): """Initialize a link with a set of arguments using an `argparser.ArgumentParser` """ if self._parser is None: raise ValueError('Link was not given a parser on initialization') args = self._parser.parse_args(argv) self.update_args(args.__dict__) return args
Initialize a link with a set of arguments using an `argparser.ArgumentParser`
entailment
def _latch_file_info(self): """Internal function to update the dictionaries keeping track of input and output files """ self.files.file_dict.clear() self.files.latch_file_info(self.args)
Internal function to update the dictionaries keeping track of input and output files
entailment
def _update_sub_file_dict(self, sub_files): """Update a file dict with information from self""" sub_files.file_dict.clear() for job_details in self.jobs.values(): if job_details.file_dict is not None: sub_files.update(job_details.file_dict) if job_details.sub_file_dict is not None: sub_files.update(job_details.sub_file_dict)
Update a file dict with information from self
entailment
def _pre_run_checks(self, stream=sys.stdout, dry_run=False): """Do some checks before running this link This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this return False. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function dry_run : bool Print command but do not run it Returns ------- status : bool True if it is ok to proceed with running the link """ input_missing = self.check_input_files(return_found=False) if input_missing: if dry_run: stream.write("Input files are missing: %s: %i\n" % (self.linkname, len(input_missing))) else: print (self.args) raise OSError("Input files are missing: %s" % input_missing) output_found, output_missing = self.check_output_files() if output_found and not output_missing: stream.write("All output files for %s already exist: %i %i %i\n" % (self.linkname, len(output_found), len(output_missing), len(self.files.output_files))) if dry_run: pass else: pass # return False return True
Do some checks before running this link This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this return False. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function dry_run : bool Print command but do not run it Returns ------- status : bool True if it is ok to proceed with running the link
entailment
def _create_job_details(self, key, job_config, logfile, status): """Create a `JobDetails` for a single job Parameters ---------- key : str Key used to identify this particular job job_config : dict Dictionary with arguements passed to this particular job logfile : str Name of the associated log file status : int Current status of the job Returns ------- job_details : `fermipy.jobs.JobDetails` Object with the details about a particular job. """ self.update_args(job_config) job_details = JobDetails(jobname=self.full_linkname, jobkey=key, appname=self.appname, logfile=logfile, job_config=job_config, timestamp=get_timestamp(), file_dict=copy.deepcopy(self.files), sub_file_dict=copy.deepcopy(self.sub_files), status=status) return job_details
Create a `JobDetails` for a single job Parameters ---------- key : str Key used to identify this particular job job_config : dict Dictionary with arguements passed to this particular job logfile : str Name of the associated log file status : int Current status of the job Returns ------- job_details : `fermipy.jobs.JobDetails` Object with the details about a particular job.
entailment
def _map_scratch_files(self, file_dict): """Build and return the mapping for copying files to and from scratch area""" if self._file_stage is None: return ({}, {}) input_files = file_dict.input_files_to_stage output_files = file_dict.output_files_to_stage input_file_mapping = self._file_stage.map_files(input_files) output_file_mapping = self._file_stage.map_files(output_files) self._update_file_args(input_file_mapping) self._update_file_args(output_file_mapping) return input_file_mapping, output_file_mapping
Build and return the mapping for copying files to and from scratch area
entailment
def _update_file_args(self, file_mapping): """Adjust the arguments to deal with staging files to the scratch area""" for key, value in self.args.items(): new_value = file_mapping.get(value, value) if new_value != value: self.args[key] = new_value
Adjust the arguments to deal with staging files to the scratch area
entailment
def _stage_input_files(self, file_mapping, dry_run=True): """Stage the input files to the scratch area and adjust the arguments accordingly""" # print ("Staging input ", file_mapping) if self._file_stage is None: return self._file_stage.copy_to_scratch(file_mapping, dry_run)
Stage the input files to the scratch area and adjust the arguments accordingly
entailment
def _stage_output_files(self, file_mapping, dry_run=True): """Stage the output files to the scratch area and adjust the arguments accordingly""" # print ("Staging output ", file_mapping) if self._file_stage is None: return self._file_stage.copy_from_scratch(file_mapping, dry_run)
Stage the output files to the scratch area and adjust the arguments accordingly
entailment
def _run_link(self, stream=sys.stdout, dry_run=False, stage_files=True, resubmit_failed=False): """Internal function that actually runs this link. This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this will skip execution. Parameters ----------- stream : `file` Stream that this `Link` will print to, must have 'write' function. dry_run : bool Print command but do not run it. stage_files : bool Stage files to and from the scratch area. resubmit_failed : bool Resubmit failed jobs. """ check_ok = self._pre_run_checks(stream, dry_run) if not check_ok: return if self._file_stage is not None: input_file_mapping, output_file_mapping = self._map_scratch_files( self.files) if stage_files: self._file_stage.make_scratch_dirs(input_file_mapping, dry_run) self._file_stage.make_scratch_dirs( output_file_mapping, dry_run) self._stage_input_files(input_file_mapping, dry_run) return_code = self.run_command(stream, dry_run) print ("return code ", return_code) if return_code == 0: status = JobStatus.done if self._file_stage is not None and stage_files: self._stage_output_files(output_file_mapping, dry_run) self._finalize(dry_run) else: if resubmit_failed: print ("Not resubmitting failed link %s"%(self.linkname)) status = JobStatus.failed if dry_run: return self._write_status_to_log(return_code, stream) self._set_status_self(status=status)
Internal function that actually runs this link. This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this will skip execution. Parameters ----------- stream : `file` Stream that this `Link` will print to, must have 'write' function. dry_run : bool Print command but do not run it. stage_files : bool Stage files to and from the scratch area. resubmit_failed : bool Resubmit failed jobs.
entailment
def _register_job(self, key, job_config, logfile, status): """Create a `JobDetails` for this link and add it to the self.jobs dictionary. Parameters ---------- key : str Key used to identify this particular job job_config : dict Dictionary with arguments passed to this particular job logfile : str Name of the associated log file status : int Current status of the job Returns ------- job_details : `fermipy.jobs.JobDetails` Object with the details about this particular job. """ job_details = self._create_job_details( key, job_config, logfile, status) self.jobs[job_details.fullkey] = job_details return job_details
Create a `JobDetails` for this link and add it to the self.jobs dictionary. Parameters ---------- key : str Key used to identify this particular job job_config : dict Dictionary with arguments passed to this particular job logfile : str Name of the associated log file status : int Current status of the job Returns ------- job_details : `fermipy.jobs.JobDetails` Object with the details about this particular job.
entailment