code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
cipher = PKCS1_v1_5.new(private_key) aes_key_str = cipher.decrypt(b64decode(payload.get("aes_key")), sentinel=None) aes_key = json.loads(aes_key_str.decode("utf-8")) key = b64decode(aes_key.get("key")) iv = b64decode(aes_key.get("iv")) encrypted_magic_envelope = b64decode(payload.get("encrypted_magic_envelope")) encrypter = AES.new(key, AES.MODE_CBC, iv) content = encrypter.decrypt(encrypted_magic_envelope) return etree.fromstring(pkcs7_unpad(content))
def decrypt(payload, private_key)
Decrypt an encrypted JSON payload and return the Magic Envelope document inside.
2.450936
2.229134
1.099502
iv, key, encrypter = EncryptedPayload.get_iv_key_encrypter() aes_key_json = EncryptedPayload.get_aes_key_json(iv, key) cipher = PKCS1_v1_5.new(public_key) aes_key = b64encode(cipher.encrypt(aes_key_json)) padded_payload = pkcs7_pad(payload.encode("utf-8"), AES.block_size) encrypted_me = b64encode(encrypter.encrypt(padded_payload)) return { "aes_key": aes_key.decode("utf-8"), "encrypted_magic_envelope": encrypted_me.decode("utf8"), }
def encrypt(payload, public_key)
Encrypt a payload using an encrypted JSON wrapper. See: https://diaspora.github.io/diaspora_federation/federation/encryption.html :param payload: Payload document as a string. :param public_key: Public key of recipient as an RSA object. :return: Encrypted JSON wrapper as dict.
3.435795
3.803284
0.903376
if self._template: return self._template template_json = self.read_template(self.args.tmplname) self._template = loads(template_json) return self._template
def template(self)
Returns the template in JSON form
6.014853
5.197089
1.157351
ct = self.converter(record.created) if datefmt: if datefmt == 'Z': t = time.strftime("%Y-%m-%dT%H:%M:%S", ct) s = "{}.{:03.0f}Z".format(t, record.msecs) else: s = time.strftime(datefmt, ct) else: t = time.strftime(self.default_time_format, ct) s = self.default_msec_format % (t, record.msecs) return s
def formatTime(self, record, datefmt=None)
Return the creation time of the specified LogRecord as formatted text. If ``datefmt`` (a string) is specified, it is used to format the creation time of the record. If ``datefmt`` is 'Z' then creation time of the record will be in Zulu Time Zone. Otherwise, the ISO8601 format is used.
2.282152
2.258524
1.010462
if not isinstance(mapping, dict): return for key, val in mapping.items(): if key in self._level_names: self._level_names[key] = val
def override_level_names(self, mapping)
Rename level names. :param mapping: Mapping level names to new ones :type mapping: dict
2.504289
2.890417
0.866411
if not isinstance(colors, dict): return for key in self._color[True]: if key in colors: self._color[True][key] = colors[key]
def override_colors(self, colors)
Override default color of elements. :param colors: New color value for given elements :type colors: dict
3.660509
4.27531
0.856197
message = record.getMessage() if hasattr(record, 'prefix'): message = "{}{}".format((str(record.prefix) + ' ') if record.prefix else '', message) obj = { 'name': record.name, 'asctime': self.formatTime(record, self.datefmt), 'created': record.created, 'msecs': record.msecs, 'relativeCreated': record.relativeCreated, 'levelno': record.levelno, 'levelname': self._level_names[record.levelname], 'thread': record.thread, 'threadName': record.threadName, 'process': record.process, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcName': record.funcName, 'message': message, 'exception': record.exc_info[0].__name__ if record.exc_info else None, 'stacktrace': record.exc_text, } if not isinstance(enabled_fields, list): enabled_fields = [str(enabled_fields)] ef = {} for item in enabled_fields: if not isinstance(item, (str, tuple)): continue if not isinstance(item, tuple): ef[item] = item else: ef[item[0]] = item[1] result = {} for key, val in obj.items(): if key in ef: result[ef[key]] = val return result
def __prepare_record(self, record, enabled_fields)
Prepare log record with given fields.
2.123597
2.011866
1.055536
return json.dumps(obj, indent=self._indent, sort_keys=self._sort_keys)
def __obj2json(self, obj)
Serialize obj to a JSON formatted string. This is useful for pretty printing log records in the console.
3.214065
3.376828
0.9518
cn = None for t, v in cert.get_subject().get_components(): if t == b'CN': cn = v break if cn == name: return True # checking SAN s_name = name.decode('ascii') for i in range(cert.get_extension_count()): ext = cert.get_extension(i) if ext.get_short_name() == b'subjectAltName': s = str(ext) # SANs are usually have form like: DNS:hostname if s.startswith('DNS:') and s[4:] == s_name: return True # TODO handle wildcards return False
def validate_host(cert, name)
Validates host name against certificate @param cert: Certificate returned by host @param name: Actual host name used for connection @return: Returns true if host name matches certificate
3.66299
3.907193
0.937499
enc_conn = tds_sock.conn.sock clear_conn = enc_conn._transport enc_conn.shutdown() tds_sock.conn.sock = clear_conn tds_sock._writer._transport = clear_conn tds_sock._reader._transport = clear_conn
def revert_to_clear(tds_sock)
Reverts connection back to non-encrypted mode Used when client sent ENCRYPT_OFF flag @param tds_sock: @return:
4.009632
4.182024
0.958778
encoded = bytearray(ucs2_codec.encode(password)[0]) for i, ch in enumerate(encoded): encoded[i] = ((ch << 4) & 0xff | (ch >> 4)) ^ 0xA5 return encoded
def tds7_crypt_pass(password)
Mangle password according to tds rules :param password: Password str :returns: Byte-string with encoded password
4.464116
5.138805
0.868707
buf, offset = readall_fast(self, struc.size) return struc.unpack_from(buf, offset)
def unpack(self, struc)
Unpacks given structure from stream :param struc: A struct.Struct instance :returns: Result of unpacking
9.548719
9.489656
1.006224
buf = readall(self, num_chars * 2) return ucs2_codec.decode(buf)[0]
def read_ucs2(self, num_chars)
Reads num_chars UCS2 string from the stream
6.046493
6.421067
0.941665
buf = readall(self, Collation.wire_size) return Collation.unpack(buf)
def get_collation(self)
Reads :class:`Collation` object from stream
23.436949
12.679263
1.848447
try: pos = 0 while pos < _header.size: received = self._transport.recv_into(self._bufview[pos:_header.size-pos]) if received == 0: raise tds_base.ClosedConnectionError() pos += received except tds_base.TimeoutError: self._session.put_cancel() raise self._pos = _header.size self._type, self._status, self._size, self._session._spid, _ = _header.unpack_from(self._bufview, 0) self._have = pos while pos < self._size: received = self._transport.recv_into(self._bufview[pos:], self._size - pos) if received == 0: raise tds_base.ClosedConnectionError() pos += received self._have += received
def _read_packet(self)
Reads next TDS packet from the underlying transport If timeout is happened during reading of packet's header will cancel current request. Can only be called when transport's read pointer is at the begining of the packet.
3.536595
3.230216
1.094848
self._read_packet() return readall(self, self._size - _header.size)
def read_whole_packet(self)
Reads single packet and returns bytes payload of the packet Can only be called when transport's read pointer is at the beginning of the packet.
16.506887
20.439665
0.807591
data_off = 0 while data_off < len(data): left = len(self._buf) - self._pos if left <= 0: self._write_packet(final=False) else: to_write = min(left, len(data) - data_off) self._buf[self._pos:self._pos + to_write] = data[data_off:data_off + to_write] self._pos += to_write data_off += to_write
def write(self, data)
Writes given bytes buffer into the stream Function returns only when entire buffer is written
2.240858
2.274014
0.985419
for i in range(0, len(s), self.bufsize): chunk = s[i:i + self.bufsize] buf, consumed = codec.encode(chunk) assert consumed == len(chunk) self.write(buf)
def write_string(self, s, codec)
Write string encoding it with codec into stream
2.653374
2.676079
0.991516
status = 1 if final else 0 _header.pack_into(self._buf, 0, self._type, status, self._pos, 0, self._packet_no) self._packet_no = (self._packet_no + 1) % 256 self._transport.sendall(self._buf[:self._pos]) self._pos = 8
def _write_packet(self, final)
Writes single TDS packet into underlying transport. Data for the packet is taken from internal buffer. :param final: True means this is the final packet in substream.
3.835012
4.211649
0.910573
if not self.messages: raise tds_base.Error("Request failed, server didn't send error message") msg = None while True: msg = self.messages[-1] if msg['msgno'] == 3621: # the statement has been terminated self.messages = self.messages[:-1] else: break error_msg = ' '.join(m['message'] for m in self.messages) ex = _create_exception_by_message(msg, error_msg) raise ex
def raise_db_exception(self)
Raises exception from last server message This function will skip messages: The statement has been terminated
6.025027
5.008863
1.202873
r = self._reader # User defined data type of the column if tds_base.IS_TDS72_PLUS(self): user_type = r.get_uint() else: user_type = r.get_usmallint() curcol.column_usertype = user_type curcol.flags = r.get_usmallint() # Flags type_id = r.get_byte() serializer_class = self._tds.type_factory.get_type_serializer(type_id) curcol.serializer = serializer_class.from_stream(r)
def get_type_info(self, curcol)
Reads TYPE_INFO structure (http://msdn.microsoft.com/en-us/library/dd358284.aspx) :param curcol: An instance of :class:`Column` that will receive read information
6.157535
6.034568
1.020377
self.log_response_message('got COLMETADATA') r = self._reader # read number of columns and allocate the columns structure num_cols = r.get_smallint() # This can be a DUMMY results token from a cursor fetch if num_cols == -1: return self.param_info = None self.has_status = False self.ret_status = None self.skipped_to_status = False self.rows_affected = tds_base.TDS_NO_COUNT self.more_rows = True self.row = [None] * num_cols self.res_info = info = _Results() # # loop through the columns populating COLINFO struct from # server response # header_tuple = [] for col in range(num_cols): curcol = tds_base.Column() info.columns.append(curcol) self.get_type_info(curcol) curcol.column_name = r.read_ucs2(r.get_byte()) precision = curcol.serializer.precision scale = curcol.serializer.scale size = curcol.serializer.size header_tuple.append( (curcol.column_name, curcol.serializer.get_typeid(), None, size, precision, scale, curcol.flags & tds_base.Column.fNullable)) info.description = tuple(header_tuple) return info
def tds7_process_result(self)
Reads and processes COLMETADATA stream This stream contains a list of returned columns. Stream format link: http://msdn.microsoft.com/en-us/library/dd357363.aspx
6.861932
6.461772
1.061927
self.log_response_message('got RETURNVALUE message') r = self._reader if tds_base.IS_TDS72_PLUS(self): ordinal = r.get_usmallint() else: r.get_usmallint() # ignore size ordinal = self._out_params_indexes[self.return_value_index] name = r.read_ucs2(r.get_byte()) r.get_byte() # 1 - OUTPUT of sp, 2 - result of udf param = tds_base.Column() param.column_name = name self.get_type_info(param) param.value = param.serializer.read(r) self.output_params[ordinal] = param self.return_value_index += 1
def process_param(self)
Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx
8.065095
6.940246
1.162076
self.log_response_message('got CANCEL message') # silly cases, nothing to do if not self.in_cancel: return while True: token_id = self.get_token_id() self.process_token(token_id) if not self.in_cancel: return
def process_cancel(self)
Process the incoming token stream until it finds an end token DONE with the cancel flag set. At that point the connection should be ready to handle a new query. In case when no cancel request is pending this function does nothing.
6.65354
5.488067
1.212365
self.log_response_message('got ERROR/INFO message') r = self._reader r.get_smallint() # size msg = {'marker': marker, 'msgno': r.get_int(), 'state': r.get_byte(), 'severity': r.get_byte(), 'sql_state': None} if marker == tds_base.TDS_INFO_TOKEN: msg['priv_msg_type'] = 0 elif marker == tds_base.TDS_ERROR_TOKEN: msg['priv_msg_type'] = 1 else: logger.error('tds_process_msg() called with unknown marker "{0}"'.format(marker)) msg['message'] = r.read_ucs2(r.get_smallint()) # server name msg['server'] = r.read_ucs2(r.get_byte()) # stored proc name if available msg['proc_name'] = r.read_ucs2(r.get_byte()) msg['line_number'] = r.get_int() if tds_base.IS_TDS72_PLUS(self) else r.get_smallint() # in case extended error data is sent, we just try to discard it # special case self.messages.append(msg)
def process_msg(self, marker)
Reads and processes ERROR/INFO streams Stream formats: - ERROR: http://msdn.microsoft.com/en-us/library/dd304156.aspx - INFO: http://msdn.microsoft.com/en-us/library/dd303398.aspx :param marker: TDS_ERROR_TOKEN or TDS_INFO_TOKEN
5.278393
4.667706
1.130832
self.log_response_message("got ROW message") r = self._reader info = self.res_info info.row_count += 1 for i, curcol in enumerate(info.columns): curcol.value = self.row[i] = curcol.serializer.read(r)
def process_row(self)
Reads and handles ROW stream. This stream contains list of values of one returned row. Stream format url: http://msdn.microsoft.com/en-us/library/dd357254.aspx
10.600603
8.673519
1.22218
self.log_response_message("got NBCROW message") r = self._reader info = self.res_info if not info: self.bad_stream('got row without info') assert len(info.columns) > 0 info.row_count += 1 # reading bitarray for nulls, 1 represent null values for # corresponding fields nbc = readall(r, (len(info.columns) + 7) // 8) for i, curcol in enumerate(info.columns): if tds_base.my_ord(nbc[i // 8]) & (1 << (i % 8)): value = None else: value = curcol.serializer.read(r) self.row[i] = value
def process_nbcrow(self)
Reads and handles NBCROW stream. This stream contains list of values of one returned row in a compressed way, introduced in TDS 7.3.B Stream format url: http://msdn.microsoft.com/en-us/library/dd304783.aspx
7.667022
6.624406
1.15739
code_to_str = { tds_base.TDS_DONE_TOKEN: 'DONE', tds_base.TDS_DONEINPROC_TOKEN: 'DONEINPROC', tds_base.TDS_DONEPROC_TOKEN: 'DONEPROC', } self.end_marker = marker self.more_rows = False r = self._reader status = r.get_usmallint() r.get_usmallint() # cur_cmd more_results = status & tds_base.TDS_DONE_MORE_RESULTS != 0 was_cancelled = status & tds_base.TDS_DONE_CANCELLED != 0 done_count_valid = status & tds_base.TDS_DONE_COUNT != 0 if self.res_info: self.res_info.more_results = more_results rows_affected = r.get_int8() if tds_base.IS_TDS72_PLUS(self) else r.get_int() self.log_response_message("got {} message, more_res={}, cancelled={}, rows_affected={}".format( code_to_str[marker], more_results, was_cancelled, rows_affected)) if was_cancelled or (not more_results and not self.in_cancel): self.in_cancel = False self.set_state(tds_base.TDS_IDLE) if done_count_valid: self.rows_affected = rows_affected else: self.rows_affected = -1 self.done_flags = status if self.done_flags & tds_base.TDS_DONE_ERROR and not was_cancelled and not self.in_cancel: self.raise_db_exception()
def process_end(self, marker)
Reads and processes DONE/DONEINPROC/DONEPROC streams Stream format urls: - DONE: http://msdn.microsoft.com/en-us/library/dd340421.aspx - DONEINPROC: http://msdn.microsoft.com/en-us/library/dd340553.aspx - DONEPROC: http://msdn.microsoft.com/en-us/library/dd340753.aspx :param marker: Can be TDS_DONE_TOKEN or TDS_DONEINPROC_TOKEN or TDS_DONEPROC_TOKEN
3.756241
3.336776
1.12571
r = self._reader w = self._writer pdu_size = r.get_smallint() if not self.authentication: raise tds_base.Error('Got unexpected token') packet = self.authentication.handle_next(readall(r, pdu_size)) if packet: w.write(packet) w.flush()
def process_auth(self)
Reads and processes SSPI stream. Stream info: http://msdn.microsoft.com/en-us/library/dd302844.aspx
8.038357
7.024608
1.144314
prior_state = self.state if state == prior_state: return state if state == tds_base.TDS_PENDING: if prior_state in (tds_base.TDS_READING, tds_base.TDS_QUERYING): self.state = tds_base.TDS_PENDING else: raise tds_base.InterfaceError('logic error: cannot chage query state from {0} to {1}'. format(tds_base.state_names[prior_state], tds_base.state_names[state])) elif state == tds_base.TDS_READING: # transition to READING are valid only from PENDING if self.state != tds_base.TDS_PENDING: raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'. format(tds_base.state_names[prior_state], tds_base.state_names[state])) else: self.state = state elif state == tds_base.TDS_IDLE: if prior_state == tds_base.TDS_DEAD: raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'. format(tds_base.state_names[prior_state], tds_base.state_names[state])) self.state = state elif state == tds_base.TDS_DEAD: self.state = state elif state == tds_base.TDS_QUERYING: if self.state == tds_base.TDS_DEAD: raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'. format(tds_base.state_names[prior_state], tds_base.state_names[state])) elif self.state != tds_base.TDS_IDLE: raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'. format(tds_base.state_names[prior_state], tds_base.state_names[state])) else: self.rows_affected = tds_base.TDS_NO_COUNT self.internal_sp_called = 0 self.state = state else: assert False return self.state
def set_state(self, state)
Switches state of the TDS session. It also does state transitions checks. :param state: New state, one of TDS_PENDING/TDS_READING/TDS_IDLE/TDS_DEAD/TDS_QUERING
1.903878
1.800943
1.057156
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING: raise tds_base.Error("Couldn't switch to state") self._writer.begin_packet(packet_type) try: yield except: if self.state != tds_base.TDS_DEAD: self.set_state(tds_base.TDS_IDLE) raise else: self.set_state(tds_base.TDS_PENDING) self._writer.flush()
def querying_context(self, packet_type)
Context manager for querying. Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block, and to TDS_PENDING if managed block succeeds and flushes buffer.
3.641455
2.678098
1.359717
if isinstance(value, tds_base.Column): value.column_name = name return value column = tds_base.Column() column.column_name = name column.flags = 0 if isinstance(value, output): column.flags |= tds_base.fByRefValue if isinstance(value.type, six.string_types): column.type = tds_types.sql_type_by_declaration(value.type) elif value.type: column.type = self.conn.type_inferrer.from_class(value.type) value = value.value if value is default: column.flags |= tds_base.fDefaultValue value = None column.value = value if column.type is None: column.type = self.conn.type_inferrer.from_value(value) return column
def make_param(self, name, value)
Generates instance of :class:`Column` from value and name Value can also be of a special types: - An instance of :class:`Column`, in which case it is just returned. - An instance of :class:`output`, in which case parameter will become an output parameter. - A singleton :var:`default`, in which case default value will be passed into a stored proc. :param name: Name of the parameter, will populate column_name property of returned column. :param value: Value of the parameter, also used to guess the type of parameter. :return: An instance of :class:`Column`
3.646989
3.342788
1.091002
if isinstance(parameters, dict): return [self.make_param(name, value) for name, value in parameters.items()] else: params = [] for parameter in parameters: params.append(self.make_param('', parameter)) return params
def _convert_params(self, parameters)
Converts a dict of list of parameters into a list of :class:`Column` instances. :param parameters: Can be a list of parameter values, or a dict of parameter names to values. :return: A list of :class:`Column` instances.
2.708054
2.827064
0.957903
if self.state == tds_base.TDS_IDLE: return if not self.in_cancel: self.put_cancel() self.process_cancel()
def cancel_if_pending(self)
Cancels current pending request. Does nothing if no request is pending, otherwise sends cancel request, and waits for response.
8.728719
8.921226
0.978421
logger.info('Sending RPC %s flags=%d', rpc_name, flags) self.messages = [] self.output_params = {} self.cancel_if_pending() self.res_info = None w = self._writer with self.querying_context(tds_base.PacketType.RPC): if tds_base.IS_TDS72_PLUS(self): self._start_query() if tds_base.IS_TDS71_PLUS(self) and isinstance(rpc_name, tds_base.InternalProc): w.put_smallint(-1) w.put_smallint(rpc_name.proc_id) else: if isinstance(rpc_name, tds_base.InternalProc): rpc_name = rpc_name.name w.put_smallint(len(rpc_name)) w.write_ucs2(rpc_name) # # TODO support flags # bit 0 (1 as flag) in TDS7/TDS5 is "recompile" # bit 1 (2 as flag) in TDS7+ is "no metadata" bit this will prevent sending of column infos # w.put_usmallint(flags) self._out_params_indexes = [] for i, param in enumerate(params): if param.flags & tds_base.fByRefValue: self._out_params_indexes.append(i) w.put_byte(len(param.column_name)) w.write_ucs2(param.column_name) # # TODO support other flags (use defaul null/no metadata) # bit 1 (2 as flag) in TDS7+ is "default value" bit # (what's the meaning of "default value" ?) # w.put_byte(param.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = param.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation or raw_collation ) type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) serializer.write(w, param.value)
def submit_rpc(self, rpc_name, params, flags=0)
Sends an RPC request. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd357576.aspx :param rpc_name: Name of the RPC to call, can be an instance of :class:`InternalProc` :param params: Stored proc parameters, should be a list of :class:`Column` instances. :param flags: See spec for possible flags.
4.980357
4.719921
1.055178
self.messages = [] self.cancel_if_pending() self.res_info = None logger.info("Sending query %s", operation[:100]) w = self._writer with self.querying_context(tds_base.PacketType.QUERY): if tds_base.IS_TDS72_PLUS(self): self._start_query() w.write_ucs2(operation)
def submit_plain_query(self, operation)
Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement.
9.649354
9.273676
1.04051
logger.info('Sending INSERT BULK') num_cols = len(metadata) w = self._writer serializers = [] with self.querying_context(tds_base.PacketType.BULK): w.put_byte(tds_base.TDS7_RESULT_TOKEN) w.put_usmallint(num_cols) for col in metadata: if tds_base.IS_TDS72_PLUS(self): w.put_uint(col.column_usertype) else: w.put_usmallint(col.column_usertype) w.put_usmallint(col.flags) serializer = col.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation, ) type_id = serializer.type w.put_byte(type_id) serializers.append(serializer) serializer.write_info(w) w.put_byte(len(col.column_name)) w.write_ucs2(col.column_name) for row in rows: w.put_byte(tds_base.TDS_ROW_TOKEN) for i, col in enumerate(metadata): serializers[i].write(w, row[i]) # https://msdn.microsoft.com/en-us/library/dd340421.aspx w.put_byte(tds_base.TDS_DONE_TOKEN) w.put_usmallint(tds_base.TDS_DONE_FINAL) w.put_usmallint(0) # curcmd # row count if tds_base.IS_TDS72_PLUS(self): w.put_int8(0) else: w.put_int(0)
def submit_bulk(self, metadata, rows)
Sends insert bulk command. Spec: http://msdn.microsoft.com/en-us/library/dd358082.aspx :param metadata: A list of :class:`Column` instances. :param rows: A collection of rows, each row is a collection of values. :return:
3.611953
3.500909
1.031719
logger.info('Sending CANCEL') self._writer.begin_packet(tds_base.PacketType.CANCEL) self._writer.flush() self.in_cancel = 1
def put_cancel(self)
Sends a cancel request to the server. Switches connection to IN_CANCEL state.
11.980095
9.978637
1.200574
decoder = codec.incrementaldecoder() for chunk in iterable: yield decoder.decode(chunk) yield decoder.decode(b'', True)
def iterdecode(iterable, codec)
Uses an incremental decoder to decode each chunk in iterable. This function is a generator. :param iterable: Iterable object which yields raw data to be decoded :param codec: An instance of codec
3.49664
4.214411
0.829686
res = stm.recv(size) if len(res) == size: return elif len(res) == 0: raise ClosedConnectionError() left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() left -= len(buf)
def skipall(stm, size)
Skips exactly size bytes in stm If EOF is reached before size bytes are skipped will raise :class:`ClosedConnectionError` :param stm: Stream to skip bytes in, should have read method this read method can return less than requested number of bytes. :param size: Number of bytes to skip.
2.738923
2.638376
1.038109
if size == 0: yield b'' return res = stm.recv(size) if len(res) == 0: raise ClosedConnectionError() yield res left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() yield buf left -= len(buf)
def read_chunks(stm, size)
Reads exactly size bytes from stm and produces chunks May call stm.read multiple times until required number of bytes is read. If EOF is reached before size bytes are read will raise :class:`ClosedConnectionError` :param stm: Stream to read bytes from, should have read method, this read method can return less than requested number of bytes. :param size: Number of bytes to read.
2.331095
2.527925
0.922138
buf, offset = stm.read_fast(size) if len(buf) - offset < size: # slow case buf = buf[offset:] buf += stm.recv(size - len(buf)) return buf, 0 return buf, offset
def readall_fast(stm, size)
Slightly faster version of readall, it reads no more than two chunks. Meaning that it can only be used to read small data that doesn't span more that two packets. :param stm: Stream to read from, should have read method. :param size: Number of bytes to read. :return:
4.407783
5.230947
0.842636
return functools.reduce(lambda acc, val: acc * 256 + tds_base.my_ord(val), reversed(buf), 0)
def _decode_num(buf)
Decodes little-endian integer from buffer Buffer can be of any size
7.640604
11.192459
0.682656
if self.is_null(): return total = 0 while True: chunk_len = self._rdr.get_uint() if chunk_len == 0: if not self.is_unknown_len() and total != self._size: msg = "PLP actual length (%d) doesn't match reported length (%d)" % (total, self._size) self._rdr.session.bad_stream(msg) return total += chunk_len left = chunk_len while left: buf = self._rdr.recv(left) yield buf left -= len(buf)
def chunks(self)
Generates chunks from stream, each chunk is an instace of bytes.
4.449934
4.220092
1.054464
return cls(days=(datetime.datetime.combine(pydate, datetime.time(0, 0, 0)) - _datetime2_base_date).days)
def from_pydate(cls, pydate)
Creates sql date object from Python date object. @param pydate: Python date @return: sql date
5.397369
6.980377
0.77322
nanoseconds = self._nsec hours = nanoseconds // 1000000000 // 60 // 60 nanoseconds -= hours * 60 * 60 * 1000000000 minutes = nanoseconds // 1000000000 // 60 nanoseconds -= minutes * 60 * 1000000000 seconds = nanoseconds // 1000000000 nanoseconds -= seconds * 1000000000 return datetime.time(hours, minutes, seconds, nanoseconds // 1000)
def to_pytime(self)
Converts sql time object into Python's time object this will truncate nanoseconds to microseconds @return: naive time
1.58413
1.643433
0.963915
secs = pytime.hour * 60 * 60 + pytime.minute * 60 + pytime.second nsec = secs * 10 ** 9 + pytime.microsecond * 1000 return cls(nsec=nsec)
def from_pytime(cls, pytime)
Converts Python time object to sql time object ignoring timezone @param pytime: Python time object @return: sql time object
2.598582
3.106772
0.836425
return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
def to_pydatetime(self)
Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime
4.336272
5.674429
0.764178
return cls(date=Date.from_pydate(pydatetime.date), time=Time.from_pytime(pydatetime.time))
def from_pydatetime(cls, pydatetime)
Creates sql datetime2 object from Python datetime object ignoring timezone @param pydatetime: Python datetime object @return: sql datetime2 object
3.429144
4.223919
0.811839
dt = datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime()) from .tz import FixedOffsetTimezone return dt.replace(tzinfo=_utc).astimezone(FixedOffsetTimezone(self._offset))
def to_pydatetime(self)
Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime
3.999138
4.388336
0.911311
w.write_b_varchar("") # db_name, should be empty w.write_b_varchar(self._table_type.typ_schema) w.write_b_varchar(self._table_type.typ_name)
def write_info(self, w)
Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return:
6.546942
6.170461
1.061014
if val.is_null(): w.put_usmallint(tds_base.TVP_NULL_TOKEN) else: columns = self._table_type.columns w.put_usmallint(len(columns)) for i, column in enumerate(columns): w.put_uint(column.column_usertype) w.put_usmallint(column.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = self._columns_serializers[i] type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) w.write_b_varchar('') # ColName, must be empty in TVP according to spec # here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING # https://msdn.microsoft.com/en-us/library/dd305261.aspx # terminating optional metadata w.put_byte(tds_base.TVP_END_TOKEN) # now sending rows using TVP_ROW # https://msdn.microsoft.com/en-us/library/dd305261.aspx if val.rows: for row in val.rows: w.put_byte(tds_base.TVP_ROW_TOKEN) for i, col in enumerate(self._table_type.columns): if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG: self._columns_serializers[i].write(w, row[i]) # terminating rows w.put_byte(tds_base.TVP_END_TOKEN)
def write(self, w, val)
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA specs: https://msdn.microsoft.com/en-us/library/dd302994.aspx https://msdn.microsoft.com/en-us/library/dd305261.aspx https://msdn.microsoft.com/en-us/library/dd303230.aspx @param w: TdsWriter @param val: TableValuedParam or None @return:
4.327621
3.799403
1.139026
declaration = declaration.strip() for regex, constructor in self._compiled: m = regex.match(declaration) if m: return constructor(*m.groups()) raise ValueError('Unable to parse type declaration', declaration)
def parse(self, declaration)
Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class, e.g. VarCharType(10) @param declaration: Sql declaration to parse, e.g. varchar(10) @return: instance of SqlTypeMetaclass
4.526376
5.862851
0.772043
if value is None: sql_type = NVarCharType(size=1) else: sql_type = self._from_class_value(value, type(value)) return sql_type
def from_value(self, value)
Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType`
6.047194
6.605358
0.915498
# replace empty column names with indices column_names = [(name or idx) for idx, name in enumerate(column_names)] def row_factory(row): return dict(zip(column_names, row)) return row_factory
def dict_row_strategy(column_names)
Dict row strategy, rows returned as dictionaries
4.035383
4.230455
0.953888
import collections # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] row_class = collections.namedtuple('Row', column_names) def row_factory(row): return row_class(*row) return row_factory
def namedtuple_row_strategy(column_names)
Namedtuple row strategy, rows returned as named tuples Column names that are not valid Python identifiers will be replaced with col<number>_
3.721385
3.734102
0.996594
try: from namedlist import namedlist as recordtype # optional dependency except ImportError: from recordtype import recordtype # optional dependency # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) # custom extension class that supports indexing class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
def recordtype_row_strategy(column_names)
Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_
2.964711
2.905019
1.020548
key = (servers, database) if key not in _servers_deques: _servers_deques[key] = deque(servers) return _servers_deques[key]
def _get_servers_deque(servers, database)
Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function.
2.520255
2.715031
0.92826
res = {} for item in connstr.split(';'): item = item.strip() if not item: continue key, value = item.split('=', 1) key = key.strip().lower().replace(' ', '_') value = value.strip() res[key] = value return res
def _parse_connection_string(connstr)
MSSQL style connection string parser Returns normalized dictionary of connection string parameters
1.935047
1.884284
1.02694
self._assert_open() if self._autocommit: return if not self._conn.tds72_transaction: return self._main_cursor._commit(cont=True, isolation_level=self._isolation_level)
def commit(self)
Commit transaction which is currently in progress.
10.241199
8.294955
1.23463
self._assert_open() if self.mars_enabled: in_tran = self._conn.tds72_transaction if in_tran and self._dirty: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: self._conn.close() raise else: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET): raise self._conn.close() except ClosedConnectionError: pass self._assert_open() return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) else: return Cursor(self, self._conn.main_session, self._tzinfo_factory)
def cursor(self)
Return cursor object that can be used to make queries and fetch results from the database.
3.124943
3.070472
1.01774
try: if self._autocommit: return if not self._conn or not self._conn.is_connected(): return if not self._conn.tds72_transaction: return self._main_cursor._rollback(cont=True, isolation_level=self._isolation_level) except socket.error as e: if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE): return self._conn.close() raise except ClosedConnectionError: pass
def rollback(self)
Roll back transaction which is currently in progress.
5.119611
4.712289
1.086438
if self._conn: if self._pooling: _connection_pool.add(self._key, (self._conn, self._main_cursor._session)) else: self._conn.close() self._active_cursor = None self._main_cursor = None self._conn = None self._closed = True
def close(self)
Close connection to an MS SQL Server. This function tries to close the connection and free all memory used. It can be called more than once in a row. No exception is raised in this case.
5.078874
5.375737
0.944777
self._session.complete_rpc() results = [None] * len(self._session.output_params.items()) for key, param in self._session.output_params.items(): results[key] = param.value return results
def get_proc_outputs(self)
If stored procedure has result sets and OUTPUT parameters use this method after you processed all result sets to get values of OUTPUT parameters. :return: A list of output parameter values.
5.399429
4.480407
1.20512
conn = self._assert_open() conn._try_activate_cursor(self) return self._callproc(procname, parameters)
def callproc(self, procname, parameters=())
Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence Note: If stored procedure has OUTPUT parameters and result sets this method will not return values for OUTPUT parameters, you should call get_proc_outputs to get values for OUTPUT parameters.
8.415299
12.160617
0.692013
if self._session is None: return None if not self._session.has_status: self._session.find_return_status() return self._session.ret_status if self._session.has_status else None
def get_proc_return_status(self)
Last stored proc result
4.752473
4.504269
1.055104
conn = self._assert_open() conn._try_activate_cursor(self) self._session.cancel_if_pending()
def cancel(self)
Cancel current statement
18.788979
18.313173
1.025982
conn = self._conn if conn is not None: conn = conn() if conn is not None: if self is conn._active_cursor: conn._active_cursor = conn._main_cursor self._session = None self._conn = None
def close(self)
Closes the cursor. The cursor is unusable from this point.
4.855291
4.577763
1.060625
conn = self._assert_open() conn._try_activate_cursor(self) self._execute(operation, params) # for compatibility with pyodbc return self
def execute(self, operation, params=())
Execute the query :param operation: SQL statement :type operation: str
10.995302
13.988864
0.786004
self.execute(query_string, params) row = self.fetchone() if not row: return None return row[0]
def execute_scalar(self, query_string, params=None)
This method sends a query to the MS SQL Server to which this object instance is connected, then returns first column of first row from result. An exception is raised on failure. If there are pending results or rows prior to executing this command, they are silently discarded. This method accepts Python formatting. Please see execute_query() for details. This method is useful if you want just a single value, as in: ``conn.execute_scalar('SELECT COUNT(*) FROM employees')`` This method works in the same way as ``iter(conn).next()[0]``. Remaining rows, if any, can still be iterated after calling this method.
2.484576
3.423287
0.725787
if self._session is None: return None res = self._session.res_info if res: return res.description else: return None
def description(self)
Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description
6.294755
5.637377
1.116611
if self._session: result = [] for msg in self._session.messages: ex = _create_exception_by_message(msg) result.append((type(ex), ex)) return result else: return None
def messages(self)
Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages
4.756067
4.139813
1.14886
if self._session is None: return None res = self._session.res_info if res: return res.native_descr else: return None
def native_description(self)
todo document
6.811982
5.938025
1.14718
row = self._session.fetchone() if row: return self._row_factory(row)
def fetchone(self)
Fetches next row, or ``None`` if there are no more rows
6.049727
5.474028
1.105169
if self._session is not None: try: self._session.close() self._session = None except socket.error as e: if e.errno != errno.ECONNRESET: raise
def close(self)
Closes the cursor. The cursor is unusable from this point.
2.691159
2.690058
1.000409
self._assert_open() return self._callproc(procname, parameters)
def callproc(self, procname, parameters=())
Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence
5.825709
7.769036
0.749862
partial_path = deque() while True: if not path or path == '/': break (path, base) = posixpath.split(path) partial_path.appendleft(base) return local_path_module.join(*partial_path)
def __posix_to_local_path(path, local_path_module=os.path)
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows). >>> import ntpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath) 'dataset_1_files\\\\moo\\\\cow' >>> import posixpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath) 'dataset_1_files/moo/cow'
3.110296
4.580253
0.679067
directory, allow_nested_files = self._directory_for_file_type(input_type) path = get_mapped_file(directory, remote_path, allow_nested_files=allow_nested_files) return path
def calculate_path(self, remote_path, input_type)
Verify remote_path is in directory for input_type inputs and create directory if needed.
5.703136
5.491813
1.03848
setup_args = {"job_id": self.job_id} if tool_id: setup_args["tool_id"] = tool_id if tool_version: setup_args["tool_version"] = tool_version if preserve_galaxy_python_environment: setup_args["preserve_galaxy_python_environment"] = preserve_galaxy_python_environment return self.setup_handler.setup(**setup_args)
def setup(self, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None)
Setup remote Pulsar server to run this job.
1.877692
1.764825
1.063953
launch_params = dict(command_line=command_line, job_id=self.job_id) submit_params_dict = submit_params(self.destination_params) if submit_params_dict: launch_params['params'] = json_dumps(submit_params_dict) if dependencies_description: launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict()) if env: launch_params['env'] = json_dumps(env) if remote_staging: launch_params['remote_staging'] = json_dumps(remote_staging) if job_config and 'touch_outputs' in job_config: # message clients pass the entire job config launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']}) if job_config and self.setup_handler.local: # Setup not yet called, job properties were inferred from # destination arguments. Hence, must have Pulsar setup job # before queueing. setup_params = _setup_params_from_job_config(job_config) launch_params['setup_params'] = json_dumps(setup_params) return self._raw_execute("submit", launch_params)
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None)
Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute.
4.053294
4.386073
0.924128
if output_type in ['output_workdir', 'output_metadata']: self._populate_output_path(name, path, action_type, output_type) elif output_type == 'output': self._fetch_output(path=path, name=name, action_type=action_type) else: raise Exception("Unknown output_type %s" % output_type)
def fetch_output(self, path, name, working_directory, action_type, output_type)
Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred.
3.156185
3.08549
1.022912
proc = Popen(args, stdout=PIPE) out = proc.communicate()[0] if proc.returncode: raise CalledProcessError(proc.returncode, args, output=out) return out
def check_output(args)
Pipe-safe (and 2.6 compatible) version of subprocess.check_output
2.100097
1.945214
1.079623
return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
def unstructured_mappers(self)
Return mappers that will map 'unstructured' files (i.e. go beyond mapping inputs, outputs, and config files).
9.596457
7.852435
1.2221
if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action)
def __process_action(self, action, file_type)
Extension point to populate extra action information after an action has been created.
4.338056
3.924347
1.105421
try: parent = Process(pid) for child in parent.children(recursive=True): child.kill() parent.kill() except NoSuchProcess: return
def _psutil_kill_pid(pid)
http://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows
2.939581
2.741693
1.072177
external_id = None for pattern_type, pattern in EXTERNAL_ID_PATTERNS: if type != EXTERNAL_ID_TYPE_ANY and type != pattern_type: continue match = search(pattern, output) if match: external_id = match.group(1) break return external_id
def parse_external_id(output, type=EXTERNAL_ID_TYPE_ANY)
Attempt to parse the output of job submission commands for an external id.__doc__ >>> parse_external_id("12345.pbsmanager") '12345.pbsmanager' >>> parse_external_id('Submitted batch job 185') '185' >>> parse_external_id('Submitted batch job 185', type='torque') 'Submitted batch job 185' >>> parse_external_id('submitted to cluster 125.') '125' >>> parse_external_id('submitted to cluster 125.', type='slurm') >>>
2.397463
3.436979
0.697549
if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
def _handle_default(value, script_name)
There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found.
5.788596
4.362903
1.326776
input = open(path, "r", encoding="utf-8") try: return input.read() finally: input.close()
def _read(path)
Utility method to quickly read small files (config files and tool wrappers) into memory as bytes.
2.960134
3.380413
0.875672
if directory is None: return [] pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep)) return self.find_pattern_references(pattern)
def find_referenced_subfiles(self, directory)
Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search.
7.322061
8.335661
0.878402
self.__rewrite_command_line(local_path, remote_path) self.__rewrite_config_files(local_path, remote_path)
def rewrite_paths(self, local_path, remote_path)
Rewrite references to `local_path` with `remote_path` in job inputs.
3.824416
3.411814
1.120933
for local_path, remote_path in self.file_renames.items(): self.job_inputs.rewrite_paths(local_path, remote_path)
def rewrite_input_paths(self)
For each file that has been transferred and renamed, updated command_line and configfiles to reflect that rewrite.
5.638778
4.594756
1.22722
# Caller should have the publish_uuid_store lock try: return self.publish_uuid_store[uuid] except Exception as exc: msg = "Failed to load payload from publish store for UUID %s, %s: %s" if uuid in failed: log.error(msg, uuid, "discarding", str(exc)) self.__discard_publish_uuid(uuid, failed) else: log.error(msg, uuid, "will try agan", str(exc)) failed.add(uuid) return None
def __get_payload(self, uuid, failed)
Retry reading a message from the publish_uuid_store once, delete on the second failure.
4.850998
3.891319
1.246621
return_code = manager.return_code(job_id) if return_code == PULSAR_UNKNOWN_RETURN_CODE: return_code = None stdout_contents = manager.stdout_contents(job_id).decode("utf-8") stderr_contents = manager.stderr_contents(job_id).decode("utf-8") job_directory = manager.job_directory(job_id) as_dict = dict( job_id=job_id, complete="true", # Is this still used or is it legacy. status=complete_status, returncode=return_code, stdout=stdout_contents, stderr=stderr_contents, working_directory=job_directory.working_directory(), metadata_directory=job_directory.metadata_directory(), working_directory_contents=job_directory.working_directory_contents(), metadata_directory_contents=job_directory.metadata_directory_contents(), outputs_directory_contents=job_directory.outputs_directory_contents(), system_properties=manager.system_properties(), pulsar_version=pulsar_version, ) return as_dict
def __job_complete_dict(complete_status, manager, job_id)
Build final dictionary describing completed job for consumption by Pulsar client.
2.651138
2.518296
1.052751
# job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') try: command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) touch_outputs = job_config.get('touch_outputs', []) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) use_metadata = setup_params.get("use_metadata", False) job_config = setup_job( manager, input_job_id, tool_id, tool_version, use_metadata, ) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.touch_outputs(job_id, touch_outputs) launch_config = { "remote_staging": remote_staging, "command_line": command_line, "dependencies_description": dependencies_description, "submit_params": submit_params, "env": env, "setup_params": setup_params, } manager.preprocess_and_launch(job_id, launch_config) except Exception: manager.handle_failure_before_launch(job_id) raise
def submit_job(manager, job_config)
Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty.
2.93337
2.829402
1.036746
job_id = manager.setup_job(job_id, tool_id, tool_version) if use_metadata: manager.enable_metadata_directory(job_id) return build_job_config( job_id=job_id, job_directory=manager.job_directory(job_id), system_properties=manager.system_properties(), tool_id=tool_id, tool_version=tool_version )
def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False)
Setup new job from these inputs and return dict summarizing state (used to configure command line).
2.801501
2.935763
0.954267
tool_config_files = conf.get("tool_config_files", None) if not tool_config_files: # For compatibity with Galaxy, allow tool_config_file # option name. tool_config_files = conf.get("tool_config_file", None) toolbox = None if tool_config_files: toolbox = ToolBox(tool_config_files) else: log.info(NOT_WHITELIST_WARNING) self.toolbox = toolbox self.authorizer = get_authorizer(toolbox)
def __setup_tool_config(self, conf)
Setups toolbox object and authorization mechanism based on supplied toolbox_path.
4.227847
3.8327
1.103099
assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE return list(self.managers.values())[0]
def only_manager(self)
Convience accessor for tests and contexts with sole manager.
5.610364
4.307002
1.302615
configuration_file = global_conf.get("__file__", None) webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf) return webapp
def app_factory(global_conf, **local_conf)
Returns the Pulsar WSGI application.
6.46147
6.188916
1.044039
while 1: (op, obj) = self.work_queue.get() if op is STOP_SIGNAL: return try: (job_id, command_line) = obj try: os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE)) except Exception: log.exception("Running command but failed to delete - command may rerun on Pulsar boot.") # _run will not do anything if job has been cancelled. self._run(job_id, command_line, background=False) except Exception: log.warn("Uncaught exception running job with job_id %s" % job_id) traceback.print_exc()
def run_next(self)
Run the next item in the queue (a job waiting to run).
6.196301
6.047219
1.024653
print 'Checking dependencies...' if not HAS_VIRTUALENV: print 'Virtual environment not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', run_command(['easy_install', 'virtualenv'], die_message='easy_install failed to install virtualenv' '\ndevelopment requires virtualenv, please' ' install it using your favorite tool') if not run_command(['which', 'virtualenv']): die('ERROR: virtualenv not found in path.\n\ndevelopment ' ' requires virtualenv, please install it using your' ' favorite package management tool and ensure' ' virtualenv is in your path') print 'virtualenv installation done.' else: die('easy_install not found.\n\nInstall easy_install' ' (python-setuptools in ubuntu) or virtualenv by hand,' ' then rerun.') print 'dependency check done.'
def check_dependencies()
Make sure virtualenv is in the path.
6.445462
5.983135
1.077272