code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
#log.debug('searching schema for (%s)', name)
qref = qualify(name, self.schema.root, self.schema.tns)
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
return None
#log.debug('found (%s) as (%s)', name, Repr(result))
if resolved:
result = result.resolve()
return result | def find(self, name, resolved=True) | Get the definition object for the schema object by name.
@param name: The name of a schema object.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject} | 5.610582 | 5.416219 | 1.035885 |
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
#log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack))
return frame | def push(self, x) | Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame} | 4.150942 | 3.84187 | 1.080448 |
if len(self.stack):
popped = self.stack.pop()
#log.debug('pop: (%s)\n%s', Repr(popped), Repr(self.stack))
return popped
else:
#log.debug('stack empty, not-popped')
pass
return None | def pop(self) | Pop the frame at the top of the stack.
@return: The popped frame, else None.
@rtype: L{Frame} | 5.127289 | 5.216693 | 0.982862 |
#log.debug('searching parent (%s) for (%s)', Repr(parent), name)
if name.startswith('@'):
return parent.get_attribute(name[1:])
else:
return parent.get_child(name) | def getchild(self, name, parent) | get a child by name | 5.933013 | 6.400992 | 0.92689 |
#log.debug('searching schema for (%s)', name)
qref = qualify(name, node, node.namespace())
query = BlindQuery(qref)
result = query.execute(self.schema)
return (result, []) | def query(self, name, node) | blindly query the schema by name | 10.724172 | 9.210355 | 1.16436 |
#log.debug('searching schema for (%s)', name)
schema = self.schema
wsdl = self.wsdl()
if wsdl is None:
qref = qualify(name, schema.root, schema.tns)
else:
qref = qualify(name, wsdl.root, wsdl.tns)
query = BlindQuery(qref)
result = query.execute(schema)
return (result, []) | def query(self, name) | blindly query the schema by name | 6.22122 | 5.611879 | 1.108581 |
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
parts = soap.input.body.parts
if len(parts):
pts = []
for p in ptop.input.parts:
if p.name in parts:
pts.append(p)
soap.input.body.parts = pts
else:
soap.input.body.parts = ptop.input.parts
parts = soap.output.body.parts
if len(parts):
pts = []
for p in ptop.output.parts:
if p.name in parts:
pts.append(p)
soap.output.body.parts = pts
else:
soap.output.body.parts = ptop.output.parts | def resolvesoapbody(self, definitions, op) | Resolve soap body I{message} parts by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation} | 2.318398 | 2.129515 | 1.088698 |
soap = op.soap
headers = soap.input.headers + soap.output.headers
for header in headers:
mn = header.message
ref = qualify(mn, self.root, definitions.tns)
message = definitions.messages.get(ref)
if message is None:
raise Exception, "message'%s', not-found" % mn
pn = header.part
for p in message.parts:
if p.name == pn:
header.part = p
break
if pn == header.part:
raise Exception, \
"message '%s' has not part named '%s'" % (ref, pn) | def resolveheaders(self, definitions, op) | Resolve soap header I{message} references.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation} | 4.56538 | 4.421212 | 1.032608 |
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
for fault in soap.faults:
for f in ptop.faults:
if f.name == fault.name:
fault.parts = f.message.parts
continue
if hasattr(fault, 'parts'):
continue
raise Exception, \
"fault '%s' not defined in portType '%s'" % (fault.name, self.type.name) | def resolvefaults(self, definitions, op) | Resolve soap fault I{message} references by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation} | 3.704058 | 3.221193 | 1.149902 |
try:
content = self.store[location]
return StringIO(content)
except:
reason = 'location "%s" not in document store' % location
raise Exception, reason | def find(self, location) | Find the specified location in the store.
@param location: The I{location} part of a URL.
@type location: str
@return: An input stream to the document.
@rtype: StringIO | 7.227798 | 5.444132 | 1.327631 |
for t in self.wsdl.schema.types.values():
if t in self.params: continue
if t in self.types: continue
item = (t, t)
self.types.append(item)
tc = lambda x,y: cmp(x[0].name, y[0].name)
self.types.sort(cmp=tc) | def publictypes(self) | get all public types | 4.369124 | 4.295936 | 1.017037 |
if isinstance(s, basestring) and self.needsEncoding(s):
for x in self.encodings:
s = s.replace(x[0], x[1])
return s | def encode(self, s) | Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str | 3.410726 | 3.47859 | 0.980491 |
if isinstance(s, basestring) and '&' in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s | def decode(self, s) | Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str | 3.747661 | 3.966689 | 0.944783 |
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0] | def parse(self, file=None, string=None) | SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str | 3.307703 | 3.260815 | 1.014379 |
content = self.headercontent(method, options=options)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env) | def get_message(self, method, args, kwargs, options=None) | Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document} | 6.980622 | 5.837481 | 1.195828 |
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs,
options=self.options)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result | def invoke(self, args, kwargs) | Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object} | 4.260905 | 3.976663 | 1.071477 |
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
timer = metrics.Timer()
timer.start()
result = self.succeeded(binding, reply.message)
#cProfile.runctx("result = self.succeeded(binding, reply.message)", globals(), locals(), "unmarshal_prof")
timer.stop()
metrics.log.debug(
"succeeded took: %s",
timer)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result | def send(self, soapenv) | Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object} | 4.489464 | 4.513371 | 0.994703 |
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result | def headers(self) | Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict | 6.853081 | 6.46695 | 1.059708 |
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
with LocalTimer() as lt:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
metrics.log.debug("Calling binding.get_reply took: %.03f" % lt.interval)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result) | def succeeded(self, binding, reply) | Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server. | 9.021653 | 8.872894 | 1.016766 |
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise HttpWebFault(status, reason)
else:
return (status, None) | def failed(self, binding, error) | Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError} | 6.148388 | 5.776987 | 1.06429 |
matched = LINECOL_COMMENT_RE.match(py_line)
if matched:
return int(matched.group(1))
else:
return 0 | def _get_line_no_from_comments(py_line) | Return the line number parsed from the comment or 0. | 4.344598 | 3.407048 | 1.27518 |
# Find lower bound
for line_no in range(py_line_no, 0, -1):
lower_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if lower_bound != 0:
break
else:
lower_bound = 0
# Find upper bound
for line_no in range(py_line_no, len(py_by_line_no)):
upper_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if upper_bound != 0:
# Since we'll eventually be building a range(), let's make this
# the non-inclusive upper-bound
upper_bound += 1
break
else:
upper_bound = len(cheetah_by_line_no)
return lower_bound, upper_bound | def _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no) | Searches before and after in the python source to find comments which
denote cheetah line numbers. If a lower bound is not found, 0 is
substituted. If an upper bound is not found, len(cheetah lines) is
returned. The result is a lower-inclusive upper-exclusive range:
[..., ...) | 2.271749 | 2.096427 | 1.083629 |
stripped_line = _fuzz_py_line(py_by_line_no[py_line_no])
cheetah_lower_bound, cheetah_upper_bound = _find_bounds(
py_line_no, py_by_line_no, cheetah_by_line_no,
)
sliced = list(enumerate(cheetah_by_line_no))[
cheetah_lower_bound:cheetah_upper_bound
]
if not prefer_first:
sliced = reversed(sliced)
for line_no, line in sliced:
if stripped_line in _fuzz_cheetah_line(line):
return line_no
else:
# We've failed to find a matching line
return 0 | def _find_fuzzy_line(
py_line_no, py_by_line_no, cheetah_by_line_no, prefer_first
) | Attempt to fuzzily find matching lines. | 2.743883 | 2.637037 | 1.040517 |
assert type(file_contents) is not bytes
xmldoc = parse(file_contents)
return step(xmldoc) | def perform_step(file_contents, step) | Performs a step of the transformation.
:param text file_contents: Contends of the cheetah template
:param function step: Function taking xmldoc and returning new contents
:returns: new contents of the file. | 7.48834 | 6.99704 | 1.070215 |
notify_func = kwargs['notify_func']
with _sender_instances_lock:
existing_sender = _sender_instances.get(sender_params, None)
if existing_sender:
sender = existing_sender
sender._notify = notify_func
else:
sender = _Sender(*sender_params, notify=notify_func)
_sender_instances[sender_params] = sender
return sender | def _get_sender(*sender_params, **kwargs) | Utility function acting as a Sender factory - ensures senders don't get
created twice of more for the same target server | 2.901707 | 2.618862 | 1.108003 |
with _sender_instances_lock:
for sender_key, sender in _sender_instances.items():
sender.close()
_sender_instances.clear() | def terminate() | Stops all the active Senders by flushing the buffers and closing the
underlying sockets | 4.97882 | 4.59414 | 1.083733 |
event_wrapper = {}
# Add ISO6801 timestamp and frame info
timestamp = datetime.datetime.utcnow().isoformat()
event_wrapper[consts.WRAPPER_REPORT_TIME] = timestamp
# Add the enclosing frame
frame = inspect.currentframe().f_back.f_back
filename = frame.f_code.co_filename
line_number = frame.f_lineno
event_wrapper[consts.WRAPPER_CALLING_FILE] = str(filename)
event_wrapper[consts.WRAPPER_CALLING_LINE] = str(line_number)
# Add the UUID to the event
event_wrapper[consts.WRAPPER_UUID] = str(uuid.uuid4())
# Try to set event type. If it throws, put the input label
try:
event_wrapper[consts.WRAPPER_EVENT_TYPE] = \
self._get_event_type(orig_event)
except Exception:
pass # The event type will be the input name, added by Alooma
# Optionally add external metadata
if external_metadata and isinstance(external_metadata, dict):
event_wrapper.update(external_metadata)
# Wrap the event with metadata
event_wrapper[consts.WRAPPER_MESSAGE] = orig_event
return json_dumps(event_wrapper) | def _format_event(self, orig_event, external_metadata=None) | Format the event to the expected Alooma format, packing it into a
message field and adding metadata
:param orig_event: The original event that was sent, should be
dict, str or unicode.
:param external_metadata: (Optional) a dict containing metadata to add
to the event
:return: a dict with the original event in a 'message'
field and all the supplied metadata | 3.552793 | 3.44937 | 1.029983 |
# Don't allow reporting if the underlying sender is terminated
if self._sender.is_terminated:
self._notify(logging.ERROR, consts.LOG_MSG_REPORT_AFTER_TERMINATION)
return False
# Send the event to the queue if it is a dict or a string.
if isinstance(event, (dict,) + py2to3.basestring):
formatted_event = self._format_event(event, metadata)
should_block = block if block is not None else self.is_blocking
return self._sender.enqueue_event(formatted_event, should_block)
else: # Event is not a dict nor a string. Deny it.
error_message = (consts.LOG_MSG_BAD_EVENT % (type(event), event))
self._notify(logging.ERROR, error_message)
return False | def report(self, event, metadata=None, block=None) | Reports an event to Alooma by formatting it properly and placing it in
the buffer to be sent by the Sender instance
:param event: A dict / string representing an event
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: True if the event was successfully enqueued, else False | 4.705466 | 4.267093 | 1.102734 |
failed_list = []
for index, event in enumerate(event_list):
queued_successfully = self.report(event, metadata, block)
if not queued_successfully:
failed_list.append((index, event))
return failed_list | def report_many(self, event_list, metadata=None, block=None) | Reports all the given events to Alooma by formatting them properly and
placing them in the buffer to be sent by the Sender instance
:param event_list: A list of dicts / strings representing events
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: A list with tuples, each containing a failed event
and its original index. An empty list means success | 3.148441 | 3.162215 | 0.995644 |
timestamp = datetime.datetime.utcnow()
logger.log(log_level, str(message))
try:
self._callback(log_level, message, timestamp)
except Exception as ex:
logger.warning(consts.LOG_MSG_CALLBACK_FAILURE % str(ex)) | def _notify(self, log_level, message) | Calls the callback function and logs messages using the PySDK logger
:param log_level: An integer representing the log level, as specified
in the Python `logging` library
:param message: The actual message to be sent to the logger and the
`callback` function | 4.364048 | 4.391598 | 0.993727 |
# If a host hasn't been chosen yet or there is only one host
if len(self._hosts) == 1 or self._http_host is None:
self._http_host = self._hosts[0]
else: # There is a list of hosts to choose from, pick a random one
choice = self._http_host
while choice == self._http_host:
choice = random.choice(self._hosts)
self._http_host = choice
self._notify(logging.INFO,
consts.LOG_MSG_NEW_SERVER % self._http_host)
# Set the validation and the REST URLs
secure = 's' if self._use_ssl else ''
self._connection_validation_url = \
consts.CONN_VALIDATION_URL_TEMPLATE.format(host=self._http_host,
secure=secure)
self._rest_url = consts.REST_URL_TEMPLATE.format(host=self._http_host,
token=self._token,
secure=secure)
self._token_verification_url = \
consts.TOKEN_VERIFICATION_URL_TEMPLATE.format(host=self._http_host,
token=self._token,
secure=secure) | def _choose_host(self) | This method randomly chooses a server from the server list given as
a parameter to the parent PythonSDK
:return: The selected host to which the Sender will attempt to
connect | 2.909925 | 2.896007 | 1.004806 |
try:
res = self._session.get(self._connection_validation_url, json={})
logger.debug(consts.LOG_MSG_VERIFYING_CONNECTION,
self._connection_validation_url,
res if res else 'No result from backend')
if not res.ok:
raise requests.exceptions.RequestException(res.content)
remote_batch_size = res.json().get(consts.MAX_REQUEST_SIZE_FIELD,
consts.DEFAULT_BATCH_SIZE)
if remote_batch_size < self._batch_max_size:
self._batch_max_size = remote_batch_size
self._notify(logging.INFO,
consts.LOG_MSG_NEW_BATCH_SIZE % remote_batch_size)
self._is_connected.set()
return True
except requests.exceptions.RequestException as ex:
msg = consts.LOG_MSG_CONNECTION_FAILED % str(ex)
self._notify(logging.ERROR, msg)
raise exceptions.ConnectionFailed(msg) | def _verify_connection(self) | Checks availability of the Alooma server
:return: If the server is reachable, returns True
:raises: If connection fails, raises exceptions.ConnectionFailed | 3.329785 | 3.261678 | 1.020881 |
res = self._session.get(self._token_verification_url)
if not res.ok:
raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN)
return True | def _verify_token(self) | Verifies the validity of the token against the remote server
:return: True if the token is valid, else raises exceptions.BadToken | 6.643524 | 5.124132 | 1.296517 |
try:
json_batch = '[' + ','.join(batch) + ']' # Make JSON array string
logger.debug(consts.LOG_MSG_SENDING_BATCH, len(batch),
len(json_batch), self._rest_url)
res = self._session.post(self._rest_url, data=json_batch,
headers=consts.CONTENT_TYPE_JSON)
logger.debug(consts.LOG_MSG_BATCH_SENT_RESULT, res.status_code,
res.content)
if res.status_code == 400:
self._notify(logging.CRITICAL, consts.LOG_MSG_BAD_TOKEN)
raise exceptions.BadToken(consts.LOG_MSG_BAD_TOKEN)
elif not res.ok:
raise exceptions.SendFailed("Got bad response code - %s: %s" % (
res.status_code, res.content if res.content else 'No info'))
except broken_pipe_errors as ex:
self._is_connected.clear()
raise exceptions.BatchTooBig(consts.LOG_MSG_BATCH_TOO_BIG % str(ex))
except requests.exceptions.RequestException as ex:
raise exceptions.SendFailed(str(ex)) | def _send_batch(self, batch) | Sends a batch to the destination server via HTTP REST API | 3.283901 | 3.168672 | 1.036365 |
if not self._http_host:
self._choose_host()
last_batch_time = datetime.datetime.utcnow()
while not (self._is_terminated.is_set() and self._event_queue.empty()):
batch = None
try:
if not self._is_connected.is_set():
self._verify_connection()
batch = self._get_batch(last_batch_time)
self._send_batch(batch)
except exceptions.ConnectionFailed: # Failed to connect to server
time.sleep(consts.NO_CONNECTION_SLEEP_TIME)
self._is_connected.clear()
except exceptions.EmptyBatch: # No events in queue, go to sleep
time.sleep(consts.EMPTY_BATCH_SLEEP_TIME)
except exceptions.SendFailed as ex: # Failed to send an event batch
self._notify(ex.severity, str(ex))
self._is_connected.clear()
if batch: # Failed after pulling a batch from the queue
self._enqueue_batch(batch)
logger.debug(consts.LOG_MSG_ENQUEUED_FAILED_BATCH,
len(batch))
else: # We sent a batch successfully, server is reachable
self._is_connected.set()
finally: # Advance last batch time
last_batch_time = datetime.datetime.utcnow() | def _sender_main(self) | Runs on a pysdk_sender_thread and handles sending events to the Alooma
server. Events are sent every <self._batch_interval> seconds or whenever
batch size reaches <self._batch_size> | 3.926685 | 3.754879 | 1.045755 |
try:
self._event_queue.put_nowait(event)
if self._notified_buffer_full: # Non-blocking and buffer was full
self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FREED)
self._notified_buffer_full = False
except py2to3.queue.Full:
if block: # Blocking - should block until space is freed
self._event_queue.put(event)
elif not self._notified_buffer_full: # Don't block, msg not emitted
self._notify(logging.WARNING, consts.LOG_MSG_BUFFER_FULL)
self._notified_buffer_full = True
return False
return True | def enqueue_event(self, event, block) | Enqueues an event in the buffer to be sent to the Alooma server
:param event: A dict representing a formatted event to be sent by the
sender
:param block: Whether or not we should block if the event buffer is full
:return: True if the event was enqueued successfully, else False | 4.241601 | 4.113111 | 1.031239 |
while True:
if self._exceeding_event: # An event was omitted from last batch
event = self._exceeding_event
self._exceeding_event = None
else: # No omitted event, get an event from the queue
event = self._event_queue.get(block, timeout)
event_size = len(event)
# If the event is bigger than the permitted batch size, ignore it
# The ( - 2 ) accounts for the parentheses enclosing the batch
if event_size - 2 >= self._batch_max_size:
self._notify(logging.WARNING,
consts.LOG_MSG_OMITTED_OVERSIZED_EVENT
% event_size)
else: # Event is of valid size, return it
return event | def __get_event(self, block=True, timeout=1) | Retrieves an event. If self._exceeding_event is not None, it'll be
returned. Otherwise, an event is dequeued from the event buffer. If
The event which was retrieved is bigger than the permitted batch size,
it'll be omitted, and the next event in the event buffer is returned | 5.963173 | 4.447302 | 1.340852 |
links = LinkNode()
# Generate (path, method, view) given (path, method, callback).
paths = []
view_endpoints = []
for path, method, callback in self.endpoints:
view = self.create_view(callback, method, request)
if getattr(view, 'exclude_from_schema', False):
continue
path = self.coerce_path(path, method, view)
paths.append(path)
view_endpoints.append((path, method, view))
# Only generate the path prefix for paths that will be included
if not paths:
return None
prefix = self.determine_path_prefix(paths)
for path, method, view in view_endpoints:
if not self.has_view_permissions(path, method, view):
continue
link = self.get_link(path, method, view, version=getattr(request, 'version', None))
subpath = path[len(prefix):]
keys = self.get_keys(subpath, method, view)
try:
insert_into(links, keys, link)
except Exception:
continue
return links | def get_links(self, request=None) | Return a dictionary containing all the links that should be
included in the API schema. | 3.67306 | 3.648351 | 1.006773 |
model = getattr(getattr(view, 'queryset', None), 'model', None)
fields = []
for variable in uritemplate.variables(path):
if variable == 'version':
continue
title = ''
description = ''
schema_cls = coreschema.String
kwargs = {}
if model is not None:
# Attempt to infer a field description if possible.
try:
model_field = model._meta.get_field(variable)
except:
model_field = None
if model_field is not None and model_field.verbose_name:
title = force_text(model_field.verbose_name)
if model_field is not None and model_field.help_text:
description = force_text(model_field.help_text)
elif model_field is not None and model_field.primary_key:
description = get_pk_description(model, model_field)
if hasattr(view, 'lookup_value_regex') and view.lookup_field == variable:
kwargs['pattern'] = view.lookup_value_regex
elif isinstance(model_field, models.AutoField):
schema_cls = coreschema.Integer
field = Field(
name=variable,
location='path',
required=True,
schema=schema_cls(title=title, description=description, **kwargs)
)
fields.append(field)
return fields | def get_path_fields(self, path, method, view) | Return a list of `coreapi.Field` instances corresponding to any
templated path variables. | 2.425537 | 2.263759 | 1.071464 |
if hasattr(method_func, 'request_serializer'):
return getattr(method_func, 'request_serializer')
if hasattr(view, 'serializer_class'):
return getattr(view, 'serializer_class')
if hasattr(view, 'get_serializer_class'):
return getattr(view, 'get_serializer_class')()
return None | def get_serializer_class(self, view, method_func) | Try to get the serializer class from view method.
If view method don't have request serializer, fallback to serializer_class on view class | 2.032566 | 1.760705 | 1.154405 |
title = force_text(field.label) if field.label else ''
description = force_text(field.help_text) if field.help_text else ''
# since we can't really inspect dictfield and jsonfield, at least display object as type
# instead of string
if isinstance(field, (serializers.DictField, serializers.JSONField)):
return coreschema.Object(
properties={},
title=title,
description=description
) | def fallback_schema_from_field(self, field) | Fallback schema for field that isn't inspected properly by DRF
and probably won't land in upstream canon due to its hacky nature only for doc purposes | 5.546886 | 4.950449 | 1.120481 |
if method in ('PUT', 'PATCH', 'POST'):
location = 'form'
else:
location = 'query'
serializer_class = self.get_serializer_class(view, method_func)
if not serializer_class:
return []
serializer = serializer_class()
if isinstance(serializer, serializers.ListSerializer):
return [
Field(
name='data',
location=location,
required=True,
schema=coreschema.Array()
)
]
if not isinstance(serializer, serializers.Serializer):
return []
fields = []
for field in serializer.fields.values():
if field.read_only or isinstance(field, serializers.HiddenField):
continue
required = field.required and method != 'PATCH'
# if the attribute ('help_text') of this field is a lazy translation object, force it to generate a string
description = str(field.help_text) if isinstance(field.help_text, Promise) else field.help_text
fallback_schema = self.fallback_schema_from_field(field)
field = Field(
name=field.field_name,
location=location,
required=required,
schema=fallback_schema if fallback_schema else field_to_schema(field),
description=description,
)
fields.append(field)
return fields | def get_serializer_fields(self, path, method, view, version=None, method_func=None) | Return a list of `coreapi.Field` instances corresponding to any
request body input, as determined by the serializer class. | 2.771935 | 2.622067 | 1.057156 |
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance | def update(self, instance, validated_data) | Update and return an existing `Snippet` instance, given the validated data. | 1.574335 | 1.364772 | 1.153552 |
parsed_url = urlparse.urlparse(document.url)
swagger = OrderedDict()
swagger['swagger'] = '2.0'
swagger['info'] = OrderedDict()
swagger['info']['title'] = document.title
swagger['info']['description'] = document.description
swagger['info']['version'] = document.version
if parsed_url.netloc:
swagger['host'] = parsed_url.netloc
if parsed_url.scheme:
swagger['schemes'] = [parsed_url.scheme]
swagger['paths'] = _get_paths_object(document)
return swagger | def _generate_openapi_object(document) | Generates root of the Swagger spec. | 1.884472 | 1.789119 | 1.053296 |
template = link.response_schema
template.update({'description': 'Success'})
res = {200: template}
res.update(link.error_status_codes)
return res | def _get_responses(link) | Returns an OpenApi-compliant response | 8.321598 | 7.390176 | 1.126035 |
parameters = []
properties = {}
required = []
for field in link.fields:
parser = OpenApiFieldParser(link, field)
if parser.location == 'form':
if encoding in ('multipart/form-data', 'application/x-www-form-urlencoded'):
# 'formData' in swagger MUST be one of these media types.
parameters.append(parser.as_parameter())
else:
# Expand coreapi fields with location='form' into a single swagger
# parameter, with a schema containing multiple properties.
properties[field.name] = parser.as_schema_property()
if field.required:
required.append(field.name)
elif parser.location == 'body':
parameters.append(parser.as_body_parameter(encoding))
else:
parameters.append(parser.as_parameter())
if properties:
parameter = {
'name': 'data',
'in': 'body',
'schema': {
'type': 'object',
'properties': properties
}
}
if required:
parameter['schema']['required'] = required
parameters.append(parameter)
return parameters | def _get_parameters(link, encoding) | Generates Swagger Parameter Item object. | 3.326411 | 3.11317 | 1.068496 |
kwargs.update({
'client_id': self.client_id,
'response_type': 'code',
})
if scope is not None:
kwargs['scope'] = scope
if state is not None:
kwargs['state'] = state
if redirect_uri is not None:
kwargs['redirect_uri'] = redirect_uri
return '%s?%s' % (self.auth_endpoint, urlencode(kwargs)) | def auth_uri(self, redirect_uri=None, scope=None, scope_delim=None,
state=None, **kwargs) | Builds the auth URI for the authorization endpoint
:param scope: (optional) The `scope` parameter to pass for
authorization. The format should match that expected by
the provider (i.e. Facebook expects comma-delimited,
while Google expects space-delimited)
:param state: (optional) The `state` parameter to pass for
authorization. If the provider follows the OAuth 2.0
spec, this will be returned to your `redirect_uri` after
authorization. Generally used for CSRF protection.
:param **kwargs: Any other querystring parameters to be passed to the
provider. | 1.56978 | 1.670762 | 0.939559 |
kwargs = kwargs and kwargs or {}
parser = parser or _default_parser
kwargs.update({
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'grant_type' in kwargs and kwargs['grant_type'] or \
'authorization_code'
})
if redirect_uri is not None:
kwargs.update({'redirect_uri': redirect_uri})
# TODO: maybe raise an exception here if status code isn't 200?
msg = urlopen(self.token_endpoint, urlencode(kwargs).encode(
'utf-8'))
data = parser(msg.read().decode(msg.info().get_content_charset() or
'utf-8'))
for key in data:
setattr(self, key, data[key])
# expires_in is RFC-compliant. if anything else is used by the
# provider, token_expires must be set manually
if hasattr(self, 'expires_in'):
try:
# python3 dosn't support long
seconds = long(self.expires_in)
except:
seconds = int(self.expires_in)
self.token_expires = mktime((datetime.utcnow() + timedelta(
seconds=seconds)).timetuple()) | def request_token(self, parser=None, redirect_uri=None, **kwargs) | Request an access token from the token endpoint.
This is largely a helper method and expects the client code to
understand what the server expects. Anything that's passed into
``**kwargs`` will be sent (``urlencode``d) to the endpoint. Client
secret and client ID are automatically included, so are not required
as kwargs. For example::
# if requesting access token from auth flow:
{
'code': rval_from_auth,
}
# if refreshing access token:
{
'refresh_token': stored_refresh_token,
'grant_type': 'refresh_token',
}
:param parser: Callback to deal with returned data. Not all providers
use JSON. | 3.214971 | 3.256207 | 0.987336 |
method = 'GET' if not data else 'POST'
req = self.token_transport('{0}{1}'.format(self.resource_endpoint,
url), self.access_token, data=data, method=method, headers=headers)
resp = urlopen(req)
data = resp.read()
try:
return parser(data.decode(resp.info().get_content_charset() or
'utf-8'))
# try to decode it first using either the content charset, falling
# back to utf-8
except UnicodeDecodeError:
# if we've gotten a decoder error, the calling code better know how
# to deal with it. some providers (i.e. stackexchange) like to gzip
# their responses, so this allows the client code to handle it
# directly.
return parser(data) | def request(self, url, method=None, data=None, headers=None, parser=None):
assert self.access_token is not None
parser = parser or loads
if not method | Request user data from the resource endpoint
:param url: The path to the resource and querystring if required
:param method: HTTP method. Defaults to ``GET`` unless data is not None
in which case it defaults to ``POST``
:param data: Data to be POSTed to the resource endpoint
:param parser: Parser callback to deal with the returned data. Defaults
to ``json.loads`.` | 6.358964 | 6.255289 | 1.016574 |
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o | def build_srcdict(gta, prop) | Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property | 6.537753 | 6.319592 | 1.034521 |
o = []
for s in gta.roi.sources:
o += [s.name]
return sorted(o) | def get_src_names(gta) | Build and return a list of source name
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
Returns
-------
l : list
Names of the source | 6.482912 | 7.785829 | 0.832655 |
if is_null(maskname):
maskname = None
gta.set_weights_map(maskname)
for name in gta.like.sourceNames():
gta._init_source(name)
gta._update_roi()
return build_srcdict(gta, 'npred_wt') | def set_wts_get_npred_wt(gta, maskname) | Set a weights file and get the weighted npred for all the sources
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
maskname : str
The path to the file with the mask
Returns
-------
odict : dict
Dictionary mapping from source name to weighted npred | 8.738318 | 8.848705 | 0.987525 |
gta.write_roi(key, save_model_map=True, make_plots=make_plots, save_weight_map=do_weighted)
if make_plots:
o = gta.residmap(key)
plotter.make_residmap_plots(o, gta.roi)
if do_weighted:
gta.make_plots("%s_wt"%key, weighted=True)
o = gta.residmap("%s_wt"%key, use_weights=True)
plotter.make_residmap_plots(o, gta.roi) | def snapshot(gta, plotter, key, do_weighted=True, make_plots=True) | Take a snapshot of the ROI
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
plotter : `fermipy.plotting.AnalysisPlotter`
The object that makes the plots
key : str
Key for this snapshot, used to create filenames
do_weighted : bool
If True, include weighted version of outputs
make_plots : bool
If True, make plots | 3.994908 | 4.032674 | 0.990635 |
o = []
for s in src_list:
npred_new = npred_dict_new[s]
if npred_new < npred_threshold:
o += [s]
continue
if npred_dict_old is None:
npred_old = 0.
else:
npred_old = npred_dict_old[s]
frac = npred_old / npred_new
if frac > frac_threshold:
o += [s]
return o | def get_unchanged(src_list, npred_dict_new,
npred_dict_old,
npred_threshold=1e4,
frac_threshold=0.9) | Compare two dictionarys of npreds, and get the list of sources
than have changed less that set thresholds
Parameters
----------
src_list : list
List of sources to examine
npred_dict_new : dict
Dictionary mapping source name to npred for the current weights file
npred_dict_old : dict
Dictionary mapping source name to npred for the previous weights file
npred_threshold : float
Minimum value of npred above which to consider sources changed
frac_threshold : float
Value of npred_old / npred_new above which to consider sources unchanged
Returns
-------
l : list
Names of 'unchanged' sources | 1.916255 | 2.146005 | 0.892941 |
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
if args.load_baseline:
gta = GTAnalysis.create(args.roi_baseline,
args.config)
else:
gta = GTAnalysis(args.config,
logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
gta.setup()
if is_not_null(args.input_pars):
gta.load_parameters_from_yaml(args.input_pars)
gta.write_roi(args.roi_baseline,
save_model_map=True,
save_weight_map=True,
make_plots=args.make_plots)
src_list = get_src_names(gta)
plotter = plotting.AnalysisPlotter(gta.config['plotting'],
fileio=gta.config['fileio'],
logging=gta.config['logging'])
if is_null(args.fit_strategy):
return
fit_strategy = load_yaml(args.fit_strategy)
npred_current = None
npred_prev = None
plots_only = False
for fit_stage in fit_strategy:
mask = fit_stage.get('mask', None)
npred_threshold = fit_stage.get('npred_threshold', 1.0e4)
frac_threshold = fit_stage.get('frac_threshold', 0.5)
npred_frac = fit_stage.get('npred_frac', 0.9999)
if plots_only:
gta.load_roi("%s.npy" % fit_stage['key'])
npred_current = set_wts_get_npred_wt(gta, mask)
skip_list_region = get_unchanged(src_list,
npred_current,
npred_prev,
frac_threshold=frac_threshold)
else:
npred_current = set_wts_get_npred_wt(gta, mask)
skip_list_region = get_unchanged(src_list,
npred_current,
npred_prev,
frac_threshold=frac_threshold)
gta.optimize(npred_frac=npred_frac,
npred_threshold=npred_threshold,
skip=skip_list_region)
snapshot(gta, plotter, fit_stage['key'], make_plots=args.make_plots)
npred_prev = npred_current
npred_current = build_srcdict(gta, 'npred_wt') | def run_analysis(self, argv) | Run this analysis | 3.977385 | 3.976118 | 1.000319 |
job_configs = {}
# Tweak the batch job args
try:
self._interface._lsf_args.update(dict(n=2))
self._interface._lsf_args.update(dict(R='\"select[rhel60&&!fell] -R span[hosts=1]\"'))
except AttributeError:
pass
models = load_yaml(args['models'])
base_config = dict(fit_strategy=args['fit_strategy'],
input_pars=args['input_pars'],
load_baseline=args['load_baseline'],
make_plots=args['make_plots'])
for modelkey in models:
config_file = os.path.join('analysis', 'model_%s' % modelkey,
args['config'])
#roi_baseline = os.path.join('analysis', 'model_%s' % modelkey,
# args['roi_baseline'])
roi_baseline = args['roi_baseline']
logfile = os.path.join('analysis', 'model_%s' % modelkey,
'fit_%s.log' % modelkey)
job_config = base_config.copy()
job_config.update(dict(config=config_file,
roi_baseline=roi_baseline,
logfile=logfile))
job_configs[modelkey] = job_config
return job_configs | def build_job_configs(self, args) | Hook to build job configurations | 4.179306 | 4.182489 | 0.999239 |
usage = "usage: %(prog)s [options] "
description = "Merge a set of Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--clobber', default=False, action='store_true',
help='Overwrite output file.')
parser.add_argument('files', nargs='+', default=None,
help='List of input files.')
args = parser.parse_args()
proj, f, hdu = fits_utils.read_projection_from_fits(args.files[0])
if isinstance(proj, WCS):
hdulist = merge_utils.merge_wcs_counts_cubes(args.files)
elif isinstance(proj, HPX):
hdulist = merge_utils.merge_hpx_counts_cubes(args.files)
else:
raise TypeError("Could not read projection from file %s" %
args.files[0])
if args.output:
hdulist.writeto(args.output, clobber=args.clobber, output_verify='silentfix') | def main() | Main function for command line usage | 2.800778 | 2.770768 | 1.010831 |
native_default_args = dict(max_jobs=500,
time_per_cycle=15,
jobs_per_cycle=20,
max_job_age=90,
no_batch=False)
return native_default_args.copy() | def get_native_default_args() | Get the correct set of batch jobs arguments. | 5.391871 | 4.854611 | 1.11067 |
full_sub_dict = job_config.copy()
full_command = "%s >& %s" % (
link.command_template().format(**full_sub_dict), logfile)
logdir = os.path.dirname(logfile)
if self._dry_run:
sys.stdout.write("%s\n" % full_command)
else:
try:
os.makedirs(logdir)
except OSError:
pass
os.system(full_command) | def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout) | Send a single job to be executed
Parameters
----------
link : `fermipy.jobs.chain.Link`
The link used to invoke the command we are running
key : str
A string that identifies this particular instance of the job
job_config : dict
A dictionrary with the arguments for the job. Used with
the self._command_template job template
logfile : str
The logfile for this job, may be used to check for success/ failure | 3.409976 | 3.713017 | 0.918384 |
levels_dict = {0: 50,
1: 40,
2: 30,
3: 20,
4: 10}
if not isinstance(level, int):
level = int(level)
if level > 4:
level = 4
return levels_dict[level] | def log_level(level) | This is a function that returns a python like
level from a HEASOFT like level. | 2.371691 | 2.306114 | 1.028436 |
if config is None:
configpath = os.path.join(fermipy.PACKAGE_ROOT, 'config',
'logging.yaml')
with open(configpath, 'r') as f:
config = yaml.load(f)
# Update configuration
if logfile:
for name, h in config['handlers'].items():
if 'file_handler' in name:
config['handlers'][name]['filename'] = logfile
logging.config.dictConfig(config) | def setup(config=None, logfile=None) | This method sets up the default configuration of the
logger. Once this method is called all subsequent instances
Logger instances will inherit this configuration. | 3.518562 | 3.59154 | 0.979681 |
# logging.config.dictConfig({
# 'version': 1,
# 'disable_existing_loggers': False})
logger = logging.getLogger(name)
# Don't propagate to root logger
logger.propagate = False
logger.setLevel(logging.DEBUG)
datefmt = '%Y-%m-%d %H:%M:%S'
format_stream = ('%(asctime)s %(levelname)-8s'
'%(name)s.%(funcName)s(): %(message)s')
format_file = ('%(asctime)s %(levelname)-8s'
'%(name)s.%(funcName)s(): %(message)s')
# format_file = ('%(asctime)s %(levelname)-8s '
# '%(name)s.%(funcName)s() '
# '[%(filename)s:%(lineno)d]: %(message)s')
if not logger.handlers:
# Add a file handler
if logfile is not None:
logfile = logfile.replace('.log', '') + '.log'
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(format_file, datefmt))
logger.addHandler(fh)
# Add a stream handler
ch = logging.StreamHandler()
ch.setLevel(loglevel)
ch.setFormatter(logging.Formatter(format_stream, datefmt))
logger.addHandler(ch)
else:
logger.handlers[-1].setLevel(loglevel)
return logger | def configure(name, logfile, loglevel=logging.DEBUG) | Create a python logger instance and configure it.
Parameters
----------
name : str
Logger name.
logfile : str
Path to the log file.
loglevel : int
Default log level for STDOUT. | 1.872477 | 1.913004 | 0.978815 |
out_dict = convert_option_dict_to_dict(defaults)
for key in defaults.keys():
mapped_val = args.get(key, None)
if mapped_val is None:
pass
else:
out_dict[key] = mapped_val
return out_dict | def extract_arguments(args, defaults) | Extract a set of arguments from a large dictionary
Parameters
----------
args : dict
Dictionary with the arguments values to use
defaults : dict
Dictionary with all the argument to extract, and default values for each
Returns
-------
out_dict : dict
A dictionary with only the extracted arguments | 3.484905 | 4.126857 | 0.844445 |
found = []
missing = []
none_count = 0
for fname in filelist:
if fname is None:
none_count += 1
continue
if fname[0] == '@':
fname = fname[1:]
if os.path.exists(fname):
found.append(fname)
continue
if os.path.exists(fname + '.gz'):
found.append(fname)
continue
if file_stage_manager is not None:
fname = file_stage_manager.get_scratch_path(fname)
if os.path.exists(fname):
found.append(fname)
continue
missing.append(fname)
if return_found and return_missing:
return found, missing
elif return_found:
return found
elif return_missing:
return missing
return None | def check_files(filelist,
file_stage_manager=None,
return_found=True,
return_missing=True) | Check that all files in a list exist
Parameters
----------
filelist : list
The list of files we are checking for.
file_stage_manager : `fermipy.jobs.file_archive.FileStageManager`
A object that maps files to scratch space if needed.
return_found : list
A list with the paths of the files that were found.
return_missing : list
A list with the paths of the files that were missing.
Returns
-------
found : list
List of the found files, if requested, otherwise `None`
missing : list
List of the missing files, if requested, otherwise `None` | 1.948656 | 2.060169 | 0.945872 |
default, helpstr, typeinfo = info
if dest == 'args':
parser.add_argument('args', nargs='+', default=None, help=helpstr)
elif typeinfo == list:
parser.add_argument('--%s' % dest, action='append', help=helpstr)
elif typeinfo == bool:
parser.add_argument('--%s' % dest, action='store_true', help=helpstr)
else:
parser.add_argument('--%s' % dest, action='store', type=typeinfo,
default=default, help=helpstr) | def add_argument(parser, dest, info) | Add an argument to an `argparse.ArgumentParser` object
Parameters
----------
parser : `argparse.ArgumentParser`
The parser in question
dest : str
The destination for the argument
info : `tuple`
The information associated with the argument in question. | 2.103787 | 2.593137 | 0.81129 |
ret_dict = {}
for key, value in input_dict.items():
ret_dict[key] = convert_value_to_option_tuple(value)
return ret_dict | def convert_dict_to_option_dict(input_dict) | Convert a simple key-value dictionary to a dictionary of options tuples | 2.343852 | 2.108247 | 1.111754 |
ret_dict = {}
for key, value in option_dict.items():
if is_null(value):
ret_dict[key] = None
elif isinstance(value, tuple):
ret_dict[key] = value[0]
else:
ret_dict[key] = value
return ret_dict | def convert_option_dict_to_dict(option_dict) | Convert a dictionary of options tuples to a simple key-value dictionary | 2.000728 | 1.960733 | 1.020398 |
ret = {}
for key in keys:
ret[key] = orig_dict.get(key, default)
return ret | def reduce_by_keys(orig_dict, keys, default=None) | Reduce a dictionary by selecting a set of keys | 2.354499 | 2.363675 | 0.996118 |
s = "\nParameters\n"
s += "----------\n\n"
for key, opt in options.items():
s += "%s : %s\n %s [%s]\n" % (key, str(opt[2]),
str(opt[1]), str(opt[0]))
return s | def construct_docstring(options) | Construct a docstring for a set of options | 3.534771 | 3.432227 | 1.029877 |
if cls.appname in LinkFactory._class_dict:
return
LinkFactory.register(cls.appname, cls) | def register_class(cls) | Regsiter this class in the `LinkFactory` | 9.655407 | 6.058829 | 1.593609 |
for key, val in self._options.items():
add_argument(parser, key, val) | def _fill_argparser(self, parser) | Fill an `argparser.ArgumentParser` with the options from this chain | 5.455603 | 4.757693 | 1.146691 |
if self._parser is None:
raise ValueError('Link was not given a parser on initialization')
args = self._parser.parse_args(argv)
self.update_args(args.__dict__)
return args | def _run_argparser(self, argv) | Initialize a link with a set of arguments using an `argparser.ArgumentParser` | 5.43572 | 4.042531 | 1.344633 |
self.files.file_dict.clear()
self.files.latch_file_info(self.args) | def _latch_file_info(self) | Internal function to update the dictionaries
keeping track of input and output files | 11.039207 | 8.753431 | 1.261129 |
sub_files.file_dict.clear()
for job_details in self.jobs.values():
if job_details.file_dict is not None:
sub_files.update(job_details.file_dict)
if job_details.sub_file_dict is not None:
sub_files.update(job_details.sub_file_dict) | def _update_sub_file_dict(self, sub_files) | Update a file dict with information from self | 2.569813 | 2.417176 | 1.063147 |
input_missing = self.check_input_files(return_found=False)
if input_missing:
if dry_run:
stream.write("Input files are missing: %s: %i\n" %
(self.linkname, len(input_missing)))
else:
print (self.args)
raise OSError("Input files are missing: %s" % input_missing)
output_found, output_missing = self.check_output_files()
if output_found and not output_missing:
stream.write("All output files for %s already exist: %i %i %i\n" %
(self.linkname, len(output_found),
len(output_missing), len(self.files.output_files)))
if dry_run:
pass
else:
pass
# return False
return True | def _pre_run_checks(self, stream=sys.stdout, dry_run=False) | Do some checks before running this link
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this return False.
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function
dry_run : bool
Print command but do not run it
Returns
-------
status : bool
True if it is ok to proceed with running the link | 3.520297 | 3.315562 | 1.06175 |
self.update_args(job_config)
job_details = JobDetails(jobname=self.full_linkname,
jobkey=key,
appname=self.appname,
logfile=logfile,
job_config=job_config,
timestamp=get_timestamp(),
file_dict=copy.deepcopy(self.files),
sub_file_dict=copy.deepcopy(self.sub_files),
status=status)
return job_details | def _create_job_details(self, key, job_config, logfile, status) | Create a `JobDetails` for a single job
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguements passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about a particular job. | 3.993 | 4.697445 | 0.850036 |
if self._file_stage is None:
return ({}, {})
input_files = file_dict.input_files_to_stage
output_files = file_dict.output_files_to_stage
input_file_mapping = self._file_stage.map_files(input_files)
output_file_mapping = self._file_stage.map_files(output_files)
self._update_file_args(input_file_mapping)
self._update_file_args(output_file_mapping)
return input_file_mapping, output_file_mapping | def _map_scratch_files(self, file_dict) | Build and return the mapping for copying files to and from scratch area | 2.372843 | 2.233504 | 1.062385 |
for key, value in self.args.items():
new_value = file_mapping.get(value, value)
if new_value != value:
self.args[key] = new_value | def _update_file_args(self, file_mapping) | Adjust the arguments to deal with staging files to the scratch area | 2.48436 | 2.325173 | 1.068462 |
# print ("Staging input ", file_mapping)
if self._file_stage is None:
return
self._file_stage.copy_to_scratch(file_mapping, dry_run) | def _stage_input_files(self, file_mapping, dry_run=True) | Stage the input files to the scratch area and adjust the arguments accordingly | 6.824514 | 5.424041 | 1.258197 |
# print ("Staging output ", file_mapping)
if self._file_stage is None:
return
self._file_stage.copy_from_scratch(file_mapping, dry_run) | def _stage_output_files(self, file_mapping, dry_run=True) | Stage the output files to the scratch area and adjust the arguments accordingly | 6.522479 | 5.55356 | 1.174468 |
check_ok = self._pre_run_checks(stream, dry_run)
if not check_ok:
return
if self._file_stage is not None:
input_file_mapping, output_file_mapping = self._map_scratch_files(
self.files)
if stage_files:
self._file_stage.make_scratch_dirs(input_file_mapping, dry_run)
self._file_stage.make_scratch_dirs(
output_file_mapping, dry_run)
self._stage_input_files(input_file_mapping, dry_run)
return_code = self.run_command(stream, dry_run)
print ("return code ", return_code)
if return_code == 0:
status = JobStatus.done
if self._file_stage is not None and stage_files:
self._stage_output_files(output_file_mapping, dry_run)
self._finalize(dry_run)
else:
if resubmit_failed:
print ("Not resubmitting failed link %s"%(self.linkname))
status = JobStatus.failed
if dry_run:
return
self._write_status_to_log(return_code, stream)
self._set_status_self(status=status) | def _run_link(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False) | Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs. | 3.023846 | 3.073352 | 0.983892 |
job_details = self._create_job_details(
key, job_config, logfile, status)
self.jobs[job_details.fullkey] = job_details
return job_details | def _register_job(self, key, job_config, logfile, status) | Create a `JobDetails` for this link
and add it to the self.jobs dictionary.
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguments passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about this particular job. | 3.493706 | 3.31664 | 1.053387 |
fullkey = JobDetails.make_fullkey(self.full_linkname, key)
if fullkey in self.jobs:
job_details = self.jobs[fullkey]
job_details.status = status
else:
job_details = self._register_job(key, self.args, logfile, status) | def _register_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown) | Runs this link, captures output to logfile,
and records the job in self.jobs | 3.724749 | 3.610202 | 1.031729 |
self._register_self(logfile, key, status)
if self._job_archive is None:
return
self._job_archive.register_jobs(self.get_jobs()) | def _archive_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown) | Write info about a job run by this `Link` to the job archive | 4.884939 | 4.3906 | 1.11259 |
fullkey = JobDetails.make_fullkey(self.full_linkname, key)
if fullkey in self.jobs:
self.jobs[fullkey].status = status
if self._job_archive:
self._job_archive.register_job(self.jobs[fullkey])
else:
self._register_self('dummy.log', key, status) | def _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown) | Set the status of this job, both in self.jobs and
in the `JobArchive` if it is present. | 5.309305 | 4.62196 | 1.148713 |
stream.write("Timestamp: %i\n" % get_timestamp())
if return_code == 0:
stream.write("%s\n" % self._interface.string_successful)
else:
stream.write("%s %i\n" %
(self._interface.string_exited, return_code)) | def _write_status_to_log(self, return_code, stream=sys.stdout) | Write the status of this job to a log stream.
This is used to check on job completion. | 4.504851 | 4.491284 | 1.003021 |
for rmfile in self.files.temp_files:
if dry_run:
print("remove %s" % rmfile)
else:
os.remove(rmfile)
for gzfile in self.files.gzip_files:
if dry_run:
# print ("gzip %s" % gzfile)
pass
else:
os.system('gzip -9 %s' % gzfile) | def _finalize(self, dry_run=False) | Remove / compress files as requested | 3.30222 | 2.81716 | 1.172181 |
self.args = extract_arguments(override_args, self.args)
self._latch_file_info()
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.') | def update_args(self, override_args) | Update the argument used to invoke the application
Note that this will also update the dictionary of input and output files.
Parameters
-----------
override_args : dict
Dictionary of arguments to override the current values | 7.257832 | 8.178479 | 0.88743 |
failed_jobs = {}
for job_key, job_details in self.jobs.items():
if job_details.status == JobStatus.failed:
failed_jobs[job_key] = job_details
elif job_details.status == JobStatus.partial_failed:
failed_jobs[job_key] = job_details
elif fail_running and job_details.status == JobStatus.running:
failed_jobs[job_key] = job_details
elif fail_pending and job_details.status <= JobStatus.pending:
failed_jobs[job_key] = job_details
return failed_jobs | def get_failed_jobs(self, fail_running=False, fail_pending=False) | Return a dictionary with the subset of jobs that are marked as failed
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
failed_jobs : dict
Dictionary mapping from job key to `JobDetails` for the failed jobs. | 1.740408 | 1.740131 | 1.000159 |
if key in self.jobs:
status = self.jobs[key].status
if status in [JobStatus.unknown, JobStatus.ready,
JobStatus.pending, JobStatus.running] or force_check:
status = self._interface.check_job(self.jobs[key])
if status == JobStatus.running and fail_running:
status = JobStatus.failed
if status == JobStatus.pending and fail_pending:
status = JobStatus.failed
self.jobs[key].status = status
if self._job_archive:
self._job_archive.register_job(self.jobs[key])
else:
status = JobStatus.no_job
return status | def check_job_status(self, key=JobDetails.topkey,
fail_running=False,
fail_pending=False,
force_check=False) | Check the status of a particular job
By default this checks the status of the top-level job, but
can by made to drill into the sub-jobs.
Parameters
----------
key : str
Key associated to the job in question
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
force_check : `bool`
Drill into status of individual jobs` instead of using top level job only
Returns
-------
status : `JobStatus`
Job status flag | 2.429678 | 2.56535 | 0.947114 |
n_failed = 0
n_partial = 0
n_passed = 0
n_total = 0
for job_details in self.jobs.values():
n_total += 1
if job_details.status in [JobStatus.failed, JobStatus.partial_failed]:
n_failed += 1
elif fail_running and job_details.status == JobStatus.running:
n_failed += 1
elif fail_pending and job_details.status == JobStatus.pending:
n_failed += 1
elif job_details.status == JobStatus.done:
n_passed += 1
if n_failed > 0:
return JobStatus.failed
elif n_passed == n_total:
return JobStatus.done
elif n_passed > 0:
return JobStatus.running
return JobStatus.pending | def check_jobs_status(self,
fail_running=False,
fail_pending=False) | Check the status of all the jobs run from this link
and return a status flag that summarizes that.
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
status : `JobStatus`
Job status flag that summarizes the status of all the jobs, | 1.831386 | 1.920544 | 0.953577 |
if recursive:
ret_dict = self.jobs.copy()
return ret_dict
return self.jobs | def get_jobs(self, recursive=True) | Return a dictionary with all the jobs
For sub-classes, if recursive is True this will include jobs
from any internal `Link` | 6.31751 | 8.237143 | 0.766954 |
all_input_files = self.files.chain_input_files + self.sub_files.chain_input_files
return check_files(all_input_files, self._file_stage,
return_found, return_missing) | def check_input_files(self,
return_found=True,
return_missing=True) | Check if input files exist.
Parameters
----------
return_found : list
A list with the paths of the files that were found.
return_missing : list
A list with the paths of the files that were missing.
Returns
-------
found : list
List of the found files, if requested, otherwise `None`
missing : list
List of the missing files, if requested, otherwise `None` | 5.450898 | 8.255527 | 0.660273 |
all_output_files = self.files.chain_output_files + \
self.sub_files.chain_output_files
return check_files(all_output_files, self._file_stage,
return_found, return_missing) | def check_output_files(self,
return_found=True,
return_missing=True) | Check if output files exist.
Parameters
----------
return_found : list
A list with the paths of the files that were found.
return_missing : list
A list with the paths of the files that were missing.
Returns
-------
found : list
List of the found files, if requested, otherwise `None`
missing : list
List of the missing files, if requested, otherwise `None` | 5.668375 | 8.523614 | 0.66502 |
missing = self.check_input_files(return_found=False)
ret_dict = {}
for miss_file in missing:
ret_dict[miss_file] = [self.linkname]
return ret_dict | def missing_input_files(self) | Make and return a dictionary of the missing input files.
This returns a dictionary mapping
filepath to list of `Link` that use the file as input. | 6.175916 | 6.106465 | 1.011373 |
missing = self.check_output_files(return_found=False)
ret_dict = {}
for miss_file in missing:
ret_dict[miss_file] = [self.linkname]
return ret_dict | def missing_output_files(self) | Make and return a dictionary of the missing output files.
This returns a dictionary mapping
filepath to list of links that produce the file as output. | 6.051796 | 5.700616 | 1.061604 |
# FIXME, this isn't really great as it force you to have all the arguments
command_template = self.command_template()
format_dict = self.args.copy()
for key, value in format_dict.items():
# protect whitespace
if isinstance(value, list):
outstr = ""
if key == 'args':
outkey = ""
else:
outkey = "--%s "
for lval in value:
outstr += ' '
outstr += outkey
outstr += lval
format_dict[key] = '"%s"' % outstr
elif isinstance(value, str) and value.find(' ') >= 0 and key != 'args':
format_dict[key] = '"%s"' % value
elif value is None:
format_dict[key] = 'none'
command = command_template.format(**format_dict)
return command | def formatted_command(self) | Build and return the formatted command for this `Link`.
This is exactly the command as called from the Unix command line. | 3.867465 | 3.763455 | 1.027637 |
command = self.formatted_command()
if dry_run:
stream.write("%s\n" % command)
stream.flush()
return 0
proc = subprocess.Popen(command.split(),
stderr=stream,
stdout=stream)
proc.communicate()
return proc.returncode | def run_command(self, stream=sys.stdout, dry_run=False) | Runs the command for this link. This method can be overridden by
sub-classes to invoke a different command
Parameters
----------
stream : `file`
Stream that this `Link` will print to,
Must have 'write' function
dry_run : bool
Print command but do not run it
Returns
-------
code : int
Return code from sub-process | 2.771229 | 3.572237 | 0.775769 |
fullkey = JobDetails.make_fullkey(self.full_linkname)
job_details = self.jobs[fullkey]
odir = os.path.dirname(job_details.logfile)
try:
os.makedirs(odir)
except OSError:
pass
ostream = open(job_details.logfile, 'w')
self.run(ostream, dry_run, stage_files, resubmit_failed) | def run_with_log(self, dry_run=False, stage_files=True, resubmit_failed=False) | Runs this link with output sent to a pre-defined logfile
Parameters
-----------
dry_run : bool
Print command but do not run it.
stage_files : bool
Copy files to and from scratch staging area.
resubmit_failed : bool
Flag for sub-classes to resubmit failed jobs. | 4.026789 | 4.031415 | 0.998852 |
com_out = self.appname
arg_string = ""
flag_string = ""
# Loop over the key, value pairs in self.args
for key, val in self.args.items():
# Check if the value is set in self._options
# If so, get the value from there
if val is None:
opt_val = self._options[key][0]
else:
opt_val = val
opt_type = self._options[key][2]
if key == 'args':
# 'args' is special, pull it out and move it to the back
arg_string += ' {%s}' % key
elif opt_type is bool:
if opt_val:
flag_string += ' --%s' % (key)
elif opt_type is list:
if is_null(opt_val):
continue
elif isinstance(opt_val, str):
com_out += ' --%s %s' % (key, opt_val)
elif isinstance(opt_val, list):
for arg_val in opt_val:
com_out += ' --%s %s' % (key, arg_val)
else:
com_out += ' --%s {%s}' % (key, key)
com_out += flag_string
com_out += arg_string
return com_out | def command_template(self) | Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)` | 2.986135 | 2.938549 | 1.016194 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.