id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
3,767
def _wrap_with_after(action, responder): if ('resource' in get_argnames(action)): shim = action else: def shim(req, resp, resource): action(req, resp) @wraps(responder) def do_after(self, req, resp, **kwargs): responder(self, req, resp, **kwargs) shim(req, resp, self) return do_after
[ "def", "_wrap_with_after", "(", "action", ",", "responder", ")", ":", "if", "(", "'resource'", "in", "get_argnames", "(", "action", ")", ")", ":", "shim", "=", "action", "else", ":", "def", "shim", "(", "req", ",", "resp", ",", "resource", ")", ":", "action", "(", "req", ",", "resp", ")", "@", "wraps", "(", "responder", ")", "def", "do_after", "(", "self", ",", "req", ",", "resp", ",", "**", "kwargs", ")", ":", "responder", "(", "self", ",", "req", ",", "resp", ",", "**", "kwargs", ")", "shim", "(", "req", ",", "resp", ",", "self", ")", "return", "do_after" ]
execute the given action function after a responder method .
train
false
3,768
def DeconstructTimestampAssetId(id_prefix, asset_id, reverse_ts=True): assert IdPrefix.IsValid(id_prefix), id_prefix assert (asset_id[0] == id_prefix), asset_id byte_str = base64hex.B64HexDecode(asset_id[1:], padding=False) (timestamp,) = struct.unpack('>I', byte_str[:4]) if reverse_ts: timestamp = (((1L << 32) - timestamp) - 1) (device_id, num_bytes) = util.DecodeVarLengthNumber(byte_str[4:]) uniquifier = _DecodeUniquifier(byte_str[(4 + num_bytes):]) return (timestamp, device_id, uniquifier)
[ "def", "DeconstructTimestampAssetId", "(", "id_prefix", ",", "asset_id", ",", "reverse_ts", "=", "True", ")", ":", "assert", "IdPrefix", ".", "IsValid", "(", "id_prefix", ")", ",", "id_prefix", "assert", "(", "asset_id", "[", "0", "]", "==", "id_prefix", ")", ",", "asset_id", "byte_str", "=", "base64hex", ".", "B64HexDecode", "(", "asset_id", "[", "1", ":", "]", ",", "padding", "=", "False", ")", "(", "timestamp", ",", ")", "=", "struct", ".", "unpack", "(", "'>I'", ",", "byte_str", "[", ":", "4", "]", ")", "if", "reverse_ts", ":", "timestamp", "=", "(", "(", "(", "1", "L", "<<", "32", ")", "-", "timestamp", ")", "-", "1", ")", "(", "device_id", ",", "num_bytes", ")", "=", "util", ".", "DecodeVarLengthNumber", "(", "byte_str", "[", "4", ":", "]", ")", "uniquifier", "=", "_DecodeUniquifier", "(", "byte_str", "[", "(", "4", "+", "num_bytes", ")", ":", "]", ")", "return", "(", "timestamp", ",", "device_id", ",", "uniquifier", ")" ]
deconstructs an asset id that was previously constructed according to the rules of "constructtimestampassetid" .
train
false
3,769
def modify_atomic_group(id, **data): models.AtomicGroup.smart_get(id).update_object(data)
[ "def", "modify_atomic_group", "(", "id", ",", "**", "data", ")", ":", "models", ".", "AtomicGroup", ".", "smart_get", "(", "id", ")", ".", "update_object", "(", "data", ")" ]
modify atomic group .
train
false
3,770
def apply_query(query, entities, _key=None): if (not isinstance(query, Query)): raise datastore_errors.BadArgumentError(('query argument must be a datastore_query.Query (%r)' % (query,))) if (not isinstance(entities, list)): raise datastore_errors.BadArgumentError(('entities argument must be a list (%r)' % (entities,))) key = (_key or (lambda x: x)) filtered_results = filter((lambda r: query._key_filter(key(r))), entities) if (not query._order): if query._filter_predicate: return filter((lambda r: query._filter_predicate(key(r))), filtered_results) return filtered_results names = query._order._get_prop_names() if query._filter_predicate: names |= query._filter_predicate._get_prop_names() exists_filter = _PropertyExistsFilter(names) value_maps = [] for result in filtered_results: value_map = _make_key_value_map(key(result), names) if (exists_filter._apply(value_map) and ((not query._filter_predicate) or query._filter_predicate._prune(value_map))): value_map['__result__'] = result value_maps.append(value_map) value_maps.sort(query._order._cmp) return [value_map['__result__'] for value_map in value_maps]
[ "def", "apply_query", "(", "query", ",", "entities", ",", "_key", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "query", ",", "Query", ")", ")", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'query argument must be a datastore_query.Query (%r)'", "%", "(", "query", ",", ")", ")", ")", "if", "(", "not", "isinstance", "(", "entities", ",", "list", ")", ")", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'entities argument must be a list (%r)'", "%", "(", "entities", ",", ")", ")", ")", "key", "=", "(", "_key", "or", "(", "lambda", "x", ":", "x", ")", ")", "filtered_results", "=", "filter", "(", "(", "lambda", "r", ":", "query", ".", "_key_filter", "(", "key", "(", "r", ")", ")", ")", ",", "entities", ")", "if", "(", "not", "query", ".", "_order", ")", ":", "if", "query", ".", "_filter_predicate", ":", "return", "filter", "(", "(", "lambda", "r", ":", "query", ".", "_filter_predicate", "(", "key", "(", "r", ")", ")", ")", ",", "filtered_results", ")", "return", "filtered_results", "names", "=", "query", ".", "_order", ".", "_get_prop_names", "(", ")", "if", "query", ".", "_filter_predicate", ":", "names", "|=", "query", ".", "_filter_predicate", ".", "_get_prop_names", "(", ")", "exists_filter", "=", "_PropertyExistsFilter", "(", "names", ")", "value_maps", "=", "[", "]", "for", "result", "in", "filtered_results", ":", "value_map", "=", "_make_key_value_map", "(", "key", "(", "result", ")", ",", "names", ")", "if", "(", "exists_filter", ".", "_apply", "(", "value_map", ")", "and", "(", "(", "not", "query", ".", "_filter_predicate", ")", "or", "query", ".", "_filter_predicate", ".", "_prune", "(", "value_map", ")", ")", ")", ":", "value_map", "[", "'__result__'", "]", "=", "result", "value_maps", ".", "append", "(", "value_map", ")", "value_maps", ".", "sort", "(", "query", ".", "_order", ".", "_cmp", ")", "return", "[", "value_map", "[", "'__result__'", "]", "for", "value_map", "in", "value_maps", "]" ]
performs the given query on a set of in-memory entities .
train
false
3,771
def _activities_union_all(*qlist): import ckan.model as model return model.Session.query(model.Activity).select_entity_from(union_all(*[q.subquery().select() for q in qlist])).distinct(model.Activity.timestamp)
[ "def", "_activities_union_all", "(", "*", "qlist", ")", ":", "import", "ckan", ".", "model", "as", "model", "return", "model", ".", "Session", ".", "query", "(", "model", ".", "Activity", ")", ".", "select_entity_from", "(", "union_all", "(", "*", "[", "q", ".", "subquery", "(", ")", ".", "select", "(", ")", "for", "q", "in", "qlist", "]", ")", ")", ".", "distinct", "(", "model", ".", "Activity", ".", "timestamp", ")" ]
return union of two or more queries sorted by timestamp .
train
false
3,772
def _encode_multipart(vars, content_type): f = StringIO() w = f.write CRLF = '\r\n' boundary = _get_multipart_boundary(content_type) if (not boundary): boundary = os.urandom(10).encode('hex') content_type += ('; boundary=%s' % boundary) for (name, value) in vars: w(('--%s' % boundary)) w(CRLF) assert (name is not None), ('Value associated with no name: %r' % value) w(('Content-Disposition: form-data; name="%s"' % name)) filename = None if getattr(value, 'filename', None): filename = value.filename elif isinstance(value, (list, tuple)): (filename, value) = value if hasattr(value, 'read'): value = value.read() if (filename is not None): w(('; filename="%s"' % filename)) w(CRLF) if getattr(value, 'type', None): w(('Content-type: %s' % value.type)) if value.type_options: for (ct_name, ct_value) in sorted(value.type_options.items()): w(('; %s="%s"' % (ct_name, ct_value))) w(CRLF) w(CRLF) if hasattr(value, 'value'): w(value.value) else: w(value) w(CRLF) w(('--%s--' % boundary)) return (content_type, f.getvalue())
[ "def", "_encode_multipart", "(", "vars", ",", "content_type", ")", ":", "f", "=", "StringIO", "(", ")", "w", "=", "f", ".", "write", "CRLF", "=", "'\\r\\n'", "boundary", "=", "_get_multipart_boundary", "(", "content_type", ")", "if", "(", "not", "boundary", ")", ":", "boundary", "=", "os", ".", "urandom", "(", "10", ")", ".", "encode", "(", "'hex'", ")", "content_type", "+=", "(", "'; boundary=%s'", "%", "boundary", ")", "for", "(", "name", ",", "value", ")", "in", "vars", ":", "w", "(", "(", "'--%s'", "%", "boundary", ")", ")", "w", "(", "CRLF", ")", "assert", "(", "name", "is", "not", "None", ")", ",", "(", "'Value associated with no name: %r'", "%", "value", ")", "w", "(", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "name", ")", ")", "filename", "=", "None", "if", "getattr", "(", "value", ",", "'filename'", ",", "None", ")", ":", "filename", "=", "value", ".", "filename", "elif", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "(", "filename", ",", "value", ")", "=", "value", "if", "hasattr", "(", "value", ",", "'read'", ")", ":", "value", "=", "value", ".", "read", "(", ")", "if", "(", "filename", "is", "not", "None", ")", ":", "w", "(", "(", "'; filename=\"%s\"'", "%", "filename", ")", ")", "w", "(", "CRLF", ")", "if", "getattr", "(", "value", ",", "'type'", ",", "None", ")", ":", "w", "(", "(", "'Content-type: %s'", "%", "value", ".", "type", ")", ")", "if", "value", ".", "type_options", ":", "for", "(", "ct_name", ",", "ct_value", ")", "in", "sorted", "(", "value", ".", "type_options", ".", "items", "(", ")", ")", ":", "w", "(", "(", "'; %s=\"%s\"'", "%", "(", "ct_name", ",", "ct_value", ")", ")", ")", "w", "(", "CRLF", ")", "w", "(", "CRLF", ")", "if", "hasattr", "(", "value", ",", "'value'", ")", ":", "w", "(", "value", ".", "value", ")", "else", ":", "w", "(", "value", ")", "w", "(", "CRLF", ")", "w", "(", "(", "'--%s--'", "%", "boundary", ")", ")", "return", "(", "content_type", ",", "f", ".", "getvalue", "(", ")", ")" ]
build a multipart/form-data body with randomly generated boundary .
train
false
3,773
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
[ "def", "partial_token_sort_ratio", "(", "s1", ",", "s2", ",", "force_ascii", "=", "True", ",", "full_process", "=", "True", ")", ":", "return", "_token_sort", "(", "s1", ",", "s2", ",", "partial", "=", "True", ",", "force_ascii", "=", "force_ascii", ",", "full_process", "=", "full_process", ")" ]
return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing .
train
true
3,774
def _safe_svd(A, **kwargs): if kwargs.get('overwrite_a', False): raise ValueError('Cannot set overwrite_a=True with this function') try: return linalg.svd(A, **kwargs) except np.linalg.LinAlgError as exp: from .utils import warn if ('lapack_driver' in _get_args(linalg.svd)): warn(('SVD error (%s), attempting to use GESVD instead of GESDD' % (exp,))) return linalg.svd(A, lapack_driver='gesvd', **kwargs) else: raise
[ "def", "_safe_svd", "(", "A", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'overwrite_a'", ",", "False", ")", ":", "raise", "ValueError", "(", "'Cannot set overwrite_a=True with this function'", ")", "try", ":", "return", "linalg", ".", "svd", "(", "A", ",", "**", "kwargs", ")", "except", "np", ".", "linalg", ".", "LinAlgError", "as", "exp", ":", "from", ".", "utils", "import", "warn", "if", "(", "'lapack_driver'", "in", "_get_args", "(", "linalg", ".", "svd", ")", ")", ":", "warn", "(", "(", "'SVD error (%s), attempting to use GESVD instead of GESDD'", "%", "(", "exp", ",", ")", ")", ")", "return", "linalg", ".", "svd", "(", "A", ",", "lapack_driver", "=", "'gesvd'", ",", "**", "kwargs", ")", "else", ":", "raise" ]
wrapper to get around the svd did not converge error of death .
train
false
3,775
def PackDatetime(name, value, pbvalue): pbvalue.set_int64value(DatetimeToTimestamp(value))
[ "def", "PackDatetime", "(", "name", ",", "value", ",", "pbvalue", ")", ":", "pbvalue", ".", "set_int64value", "(", "DatetimeToTimestamp", "(", "value", ")", ")" ]
packs a datetime-typed property into a entity_pb .
train
false
3,777
def morphological_laplace(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0): tmp1 = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) numpy.add(tmp1, output, output) numpy.subtract(output, input, output) return numpy.subtract(output, input, output) else: tmp2 = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) numpy.add(tmp1, tmp2, tmp2) numpy.subtract(tmp2, input, tmp2) numpy.subtract(tmp2, input, tmp2) return tmp2
[ "def", "morphological_laplace", "(", "input", ",", "size", "=", "None", ",", "footprint", "=", "None", ",", "structure", "=", "None", ",", "output", "=", "None", ",", "mode", "=", "'reflect'", ",", "cval", "=", "0.0", ",", "origin", "=", "0", ")", ":", "tmp1", "=", "grey_dilation", "(", "input", ",", "size", ",", "footprint", ",", "structure", ",", "None", ",", "mode", ",", "cval", ",", "origin", ")", "if", "isinstance", "(", "output", ",", "numpy", ".", "ndarray", ")", ":", "grey_erosion", "(", "input", ",", "size", ",", "footprint", ",", "structure", ",", "output", ",", "mode", ",", "cval", ",", "origin", ")", "numpy", ".", "add", "(", "tmp1", ",", "output", ",", "output", ")", "numpy", ".", "subtract", "(", "output", ",", "input", ",", "output", ")", "return", "numpy", ".", "subtract", "(", "output", ",", "input", ",", "output", ")", "else", ":", "tmp2", "=", "grey_erosion", "(", "input", ",", "size", ",", "footprint", ",", "structure", ",", "None", ",", "mode", ",", "cval", ",", "origin", ")", "numpy", ".", "add", "(", "tmp1", ",", "tmp2", ",", "tmp2", ")", "numpy", ".", "subtract", "(", "tmp2", ",", "input", ",", "tmp2", ")", "numpy", ".", "subtract", "(", "tmp2", ",", "input", ",", "tmp2", ")", "return", "tmp2" ]
multi-dimensional morphological laplace .
train
false
3,778
def writeToMongo(): sys.stderr.write('Saving to db.messages.errors, will not check for duplicates!') from pymongo import Connection connection = Connection() db = connection['messages'] errorcodes = db['errors'] for errCode in messages.keys(): sys.stderr.write('Inserting code: {}\n'.format(errCode)) result = errorcodes.insert(messages[errCode]) sys.stderr.write('Result: {}\n'.format(result))
[ "def", "writeToMongo", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Saving to db.messages.errors, will not check for duplicates!'", ")", "from", "pymongo", "import", "Connection", "connection", "=", "Connection", "(", ")", "db", "=", "connection", "[", "'messages'", "]", "errorcodes", "=", "db", "[", "'errors'", "]", "for", "errCode", "in", "messages", ".", "keys", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Inserting code: {}\\n'", ".", "format", "(", "errCode", ")", ")", "result", "=", "errorcodes", ".", "insert", "(", "messages", "[", "errCode", "]", ")", "sys", ".", "stderr", ".", "write", "(", "'Result: {}\\n'", ".", "format", "(", "result", ")", ")" ]
pipe the messages array into mongodb .
train
false
3,780
def pip_install(req_file, constraints_file=None): cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file)) if constraints_file: cmd += ' -c {}'.format(constraints_file) if WHEELHOUSE_PATH: cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH) return cmd
[ "def", "pip_install", "(", "req_file", ",", "constraints_file", "=", "None", ")", ":", "cmd", "=", "bin_prefix", "(", "'pip install --exists-action w --upgrade -r {} '", ".", "format", "(", "req_file", ")", ")", "if", "constraints_file", ":", "cmd", "+=", "' -c {}'", ".", "format", "(", "constraints_file", ")", "if", "WHEELHOUSE_PATH", ":", "cmd", "+=", "' --no-index --find-links={}'", ".", "format", "(", "WHEELHOUSE_PATH", ")", "return", "cmd" ]
install the package using pip .
train
false
3,781
def sorted_by_field(issues, field='closed_at', reverse=False): return sorted(issues, key=(lambda i: i[field]), reverse=reverse)
[ "def", "sorted_by_field", "(", "issues", ",", "field", "=", "'closed_at'", ",", "reverse", "=", "False", ")", ":", "return", "sorted", "(", "issues", ",", "key", "=", "(", "lambda", "i", ":", "i", "[", "field", "]", ")", ",", "reverse", "=", "reverse", ")" ]
return a list of issues sorted by closing date date .
train
true
3,782
def SerializeEntries(entries): output = [] for (python_format, wire_format, type_descriptor) in entries: if ((wire_format is None) or (python_format and type_descriptor.IsDirty(python_format))): wire_format = type_descriptor.ConvertToWireFormat(python_format) output.extend(wire_format) return ''.join(output)
[ "def", "SerializeEntries", "(", "entries", ")", ":", "output", "=", "[", "]", "for", "(", "python_format", ",", "wire_format", ",", "type_descriptor", ")", "in", "entries", ":", "if", "(", "(", "wire_format", "is", "None", ")", "or", "(", "python_format", "and", "type_descriptor", ".", "IsDirty", "(", "python_format", ")", ")", ")", ":", "wire_format", "=", "type_descriptor", ".", "ConvertToWireFormat", "(", "python_format", ")", "output", ".", "extend", "(", "wire_format", ")", "return", "''", ".", "join", "(", "output", ")" ]
serializes given triplets of python and wire values and a descriptor .
train
false
3,784
def run_CSS(input_path, out_path, output_CSS_statistics): if (not output_CSS_statistics): command_args = [('-i %s -o %s' % (input_path, out_path))] else: command_args = [('-i %s -o %s -s %s' % (input_path, out_path, output_CSS_statistics))] rsl = RExecutor(TmpDir=get_qiime_temp_dir()) app_result = rsl(command_args=command_args, script_name='CSS.r') return app_result
[ "def", "run_CSS", "(", "input_path", ",", "out_path", ",", "output_CSS_statistics", ")", ":", "if", "(", "not", "output_CSS_statistics", ")", ":", "command_args", "=", "[", "(", "'-i %s -o %s'", "%", "(", "input_path", ",", "out_path", ")", ")", "]", "else", ":", "command_args", "=", "[", "(", "'-i %s -o %s -s %s'", "%", "(", "input_path", ",", "out_path", ",", "output_CSS_statistics", ")", ")", "]", "rsl", "=", "RExecutor", "(", "TmpDir", "=", "get_qiime_temp_dir", "(", ")", ")", "app_result", "=", "rsl", "(", "command_args", "=", "command_args", ",", "script_name", "=", "'CSS.r'", ")", "return", "app_result" ]
run metagenomeseqs css algorithm through rscript .
train
false
3,785
def sparse_categorical_accuracy(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(tf.argmax(y_pred, (len(y_pred.get_shape()) - 1)), tf.float32) return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
[ "def", "sparse_categorical_accuracy", "(", "y_true", ",", "y_pred", ")", ":", "y_true", "=", "tf", ".", "cast", "(", "y_true", ",", "tf", ".", "float32", ")", "y_pred", "=", "tf", ".", "cast", "(", "tf", ".", "argmax", "(", "y_pred", ",", "(", "len", "(", "y_pred", ".", "get_shape", "(", ")", ")", "-", "1", ")", ")", ",", "tf", ".", "float32", ")", "return", "tf", ".", "reduce_mean", "(", "tf", ".", "cast", "(", "tf", ".", "equal", "(", "y_true", ",", "y_pred", ")", ",", "tf", ".", "float32", ")", ")" ]
multi-class prediction accuracy .
train
false
3,786
def _create_instances_with_cached_ips(orig_func, *args, **kwargs): (instances, reservation_id) = orig_func(*args, **kwargs) fake_cache = _get_fake_cache() for instance in instances: instance['info_cache'].network_info = fake_cache db.instance_info_cache_update(args[1], instance['uuid'], {'network_info': fake_cache}) return (instances, reservation_id)
[ "def", "_create_instances_with_cached_ips", "(", "orig_func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "(", "instances", ",", "reservation_id", ")", "=", "orig_func", "(", "*", "args", ",", "**", "kwargs", ")", "fake_cache", "=", "_get_fake_cache", "(", ")", "for", "instance", "in", "instances", ":", "instance", "[", "'info_cache'", "]", ".", "network_info", "=", "fake_cache", "db", ".", "instance_info_cache_update", "(", "args", "[", "1", "]", ",", "instance", "[", "'uuid'", "]", ",", "{", "'network_info'", ":", "fake_cache", "}", ")", "return", "(", "instances", ",", "reservation_id", ")" ]
kludge the above kludge so that the database doesnt get out of sync with the actual instance .
train
false
3,787
def _matplotlib_list(interval_list): xlist = [] ylist = [] if len(interval_list): for intervals in interval_list: intervalx = intervals[0] intervaly = intervals[1] xlist.extend([intervalx.start, intervalx.start, intervalx.end, intervalx.end, None]) ylist.extend([intervaly.start, intervaly.end, intervaly.end, intervaly.start, None]) else: xlist.extend([None, None, None, None]) ylist.extend([None, None, None, None]) return (xlist, ylist)
[ "def", "_matplotlib_list", "(", "interval_list", ")", ":", "xlist", "=", "[", "]", "ylist", "=", "[", "]", "if", "len", "(", "interval_list", ")", ":", "for", "intervals", "in", "interval_list", ":", "intervalx", "=", "intervals", "[", "0", "]", "intervaly", "=", "intervals", "[", "1", "]", "xlist", ".", "extend", "(", "[", "intervalx", ".", "start", ",", "intervalx", ".", "start", ",", "intervalx", ".", "end", ",", "intervalx", ".", "end", ",", "None", "]", ")", "ylist", ".", "extend", "(", "[", "intervaly", ".", "start", ",", "intervaly", ".", "end", ",", "intervaly", ".", "end", ",", "intervaly", ".", "start", ",", "None", "]", ")", "else", ":", "xlist", ".", "extend", "(", "[", "None", ",", "None", ",", "None", ",", "None", "]", ")", "ylist", ".", "extend", "(", "[", "None", ",", "None", ",", "None", ",", "None", "]", ")", "return", "(", "xlist", ",", "ylist", ")" ]
returns lists for matplotlib fill command from a list of bounding rectangular intervals .
train
false
3,788
def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if (not dirname.startswith(parentdir_prefix)): if verbose: print(("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix))) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {'version': dirname[len(parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None}
[ "def", "versions_from_parentdir", "(", "parentdir_prefix", ",", "root", ",", "verbose", ")", ":", "dirname", "=", "os", ".", "path", ".", "basename", "(", "root", ")", "if", "(", "not", "dirname", ".", "startswith", "(", "parentdir_prefix", ")", ")", ":", "if", "verbose", ":", "print", "(", "(", "\"guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'\"", "%", "(", "root", ",", "dirname", ",", "parentdir_prefix", ")", ")", ")", "raise", "NotThisMethod", "(", "\"rootdir doesn't start with parentdir_prefix\"", ")", "return", "{", "'version'", ":", "dirname", "[", "len", "(", "parentdir_prefix", ")", ":", "]", ",", "'full-revisionid'", ":", "None", ",", "'dirty'", ":", "False", ",", "'error'", ":", "None", "}" ]
try to determine the version from the parent directory name .
train
true
3,789
def truncated_cube_graph(create_using=None): description = ['adjacencylist', 'Truncated Cube Graph', 24, [[2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], []]] G = make_small_undirected_graph(description, create_using) return G
[ "def", "truncated_cube_graph", "(", "create_using", "=", "None", ")", ":", "description", "=", "[", "'adjacencylist'", ",", "'Truncated Cube Graph'", ",", "24", ",", "[", "[", "2", ",", "3", ",", "5", "]", ",", "[", "12", ",", "15", "]", ",", "[", "4", ",", "5", "]", ",", "[", "7", ",", "9", "]", ",", "[", "6", "]", ",", "[", "17", ",", "19", "]", ",", "[", "8", ",", "9", "]", ",", "[", "11", ",", "13", "]", ",", "[", "10", "]", ",", "[", "18", ",", "21", "]", ",", "[", "12", ",", "13", "]", ",", "[", "15", "]", ",", "[", "14", "]", ",", "[", "22", ",", "23", "]", ",", "[", "16", "]", ",", "[", "20", ",", "24", "]", ",", "[", "18", ",", "19", "]", ",", "[", "21", "]", ",", "[", "20", "]", ",", "[", "24", "]", ",", "[", "22", "]", ",", "[", "23", "]", ",", "[", "24", "]", ",", "[", "]", "]", "]", "G", "=", "make_small_undirected_graph", "(", "description", ",", "create_using", ")", "return", "G" ]
return the skeleton of the truncated cube .
train
false
3,791
def colorscale_to_scale(colorscale): scale_list = [] for item in colorscale: scale_list.append(item[0]) return scale_list
[ "def", "colorscale_to_scale", "(", "colorscale", ")", ":", "scale_list", "=", "[", "]", "for", "item", "in", "colorscale", ":", "scale_list", ".", "append", "(", "item", "[", "0", "]", ")", "return", "scale_list" ]
extracts the interpolation scale values from colorscale as a list .
train
false
3,792
def get_offset_name(offset): msg = 'get_offset_name(offset) is deprecated. Use offset.freqstr instead' warnings.warn(msg, FutureWarning, stacklevel=2) return offset.freqstr
[ "def", "get_offset_name", "(", "offset", ")", ":", "msg", "=", "'get_offset_name(offset) is deprecated. Use offset.freqstr instead'", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "offset", ".", "freqstr" ]
return rule name associated with a dateoffset object examples get_offset_name(bmonthend(1)) --> eom .
train
false
3,793
def closure(source, pointerType, accumulator=None): if isinstance(source, Word): return reduce(union, map((lambda s, t=pointerType: tree(s, t)), source.getSenses())) _requireSource(source) if (accumulator is None): accumulator = [] if (source not in accumulator): accumulator.append(source) for target in source.pointerTargets(pointerType): closure(target, pointerType, accumulator) return accumulator
[ "def", "closure", "(", "source", ",", "pointerType", ",", "accumulator", "=", "None", ")", ":", "if", "isinstance", "(", "source", ",", "Word", ")", ":", "return", "reduce", "(", "union", ",", "map", "(", "(", "lambda", "s", ",", "t", "=", "pointerType", ":", "tree", "(", "s", ",", "t", ")", ")", ",", "source", ".", "getSenses", "(", ")", ")", ")", "_requireSource", "(", "source", ")", "if", "(", "accumulator", "is", "None", ")", ":", "accumulator", "=", "[", "]", "if", "(", "source", "not", "in", "accumulator", ")", ":", "accumulator", ".", "append", "(", "source", ")", "for", "target", "in", "source", ".", "pointerTargets", "(", "pointerType", ")", ":", "closure", "(", "target", ",", "pointerType", ",", "accumulator", ")", "return", "accumulator" ]
return the transitive closure of source under the pointertype relationship .
train
false
3,794
def edge_dfs(G, source=None, orientation='original'): nodes = list(G.nbunch_iter(source)) if (not nodes): raise StopIteration kwds = {'data': False} if G.is_multigraph(): kwds['keys'] = True (out_edges, key, tailhead) = helper_funcs(G, orientation) visited_edges = set() visited_nodes = set() edges = {} for start_node in nodes: stack = [start_node] while stack: current_node = stack[(-1)] if (current_node not in visited_nodes): edges[current_node] = out_edges(current_node, **kwds) visited_nodes.add(current_node) try: edge = next(edges[current_node]) except StopIteration: stack.pop() else: edge_key = key(edge) if (edge_key not in visited_edges): visited_edges.add(edge_key) stack.append(tailhead(edge)[1]) (yield edge)
[ "def", "edge_dfs", "(", "G", ",", "source", "=", "None", ",", "orientation", "=", "'original'", ")", ":", "nodes", "=", "list", "(", "G", ".", "nbunch_iter", "(", "source", ")", ")", "if", "(", "not", "nodes", ")", ":", "raise", "StopIteration", "kwds", "=", "{", "'data'", ":", "False", "}", "if", "G", ".", "is_multigraph", "(", ")", ":", "kwds", "[", "'keys'", "]", "=", "True", "(", "out_edges", ",", "key", ",", "tailhead", ")", "=", "helper_funcs", "(", "G", ",", "orientation", ")", "visited_edges", "=", "set", "(", ")", "visited_nodes", "=", "set", "(", ")", "edges", "=", "{", "}", "for", "start_node", "in", "nodes", ":", "stack", "=", "[", "start_node", "]", "while", "stack", ":", "current_node", "=", "stack", "[", "(", "-", "1", ")", "]", "if", "(", "current_node", "not", "in", "visited_nodes", ")", ":", "edges", "[", "current_node", "]", "=", "out_edges", "(", "current_node", ",", "**", "kwds", ")", "visited_nodes", ".", "add", "(", "current_node", ")", "try", ":", "edge", "=", "next", "(", "edges", "[", "current_node", "]", ")", "except", "StopIteration", ":", "stack", ".", "pop", "(", ")", "else", ":", "edge_key", "=", "key", "(", "edge", ")", "if", "(", "edge_key", "not", "in", "visited_edges", ")", ":", "visited_edges", ".", "add", "(", "edge_key", ")", "stack", ".", "append", "(", "tailhead", "(", "edge", ")", "[", "1", "]", ")", "(", "yield", "edge", ")" ]
a directed .
train
false
3,795
def dumb_property_dict(style): return dict([(x.strip(), y.strip()) for (x, y) in [z.split(':', 1) for z in style.split(';') if (':' in z)]])
[ "def", "dumb_property_dict", "(", "style", ")", ":", "return", "dict", "(", "[", "(", "x", ".", "strip", "(", ")", ",", "y", ".", "strip", "(", ")", ")", "for", "(", "x", ",", "y", ")", "in", "[", "z", ".", "split", "(", "':'", ",", "1", ")", "for", "z", "in", "style", ".", "split", "(", "';'", ")", "if", "(", "':'", "in", "z", ")", "]", "]", ")" ]
returns a hash of css attributes .
train
true
3,796
def rm(pattern, directory=False): def safe_remove(path): try: os.remove(path) except OSError as err: if (err.errno != errno.ENOENT): raise else: print ('rm %s' % path) def safe_rmtree(path): def onerror(fun, path, excinfo): exc = excinfo[1] if (exc.errno != errno.ENOENT): raise existed = os.path.isdir(path) shutil.rmtree(path, onerror=onerror) if existed: print ('rmdir -f %s' % path) if ('*' not in pattern): if directory: safe_rmtree(pattern) else: safe_remove(pattern) return for (root, subdirs, subfiles) in os.walk('.'): root = os.path.normpath(root) if root.startswith('.git/'): continue found = fnmatch.filter((subdirs if directory else subfiles), pattern) for name in found: path = os.path.join(root, name) if directory: print ('rmdir -f %s' % path) safe_rmtree(path) else: print ('rm %s' % path) safe_remove(path)
[ "def", "rm", "(", "pattern", ",", "directory", "=", "False", ")", ":", "def", "safe_remove", "(", "path", ")", ":", "try", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", "as", "err", ":", "if", "(", "err", ".", "errno", "!=", "errno", ".", "ENOENT", ")", ":", "raise", "else", ":", "print", "(", "'rm %s'", "%", "path", ")", "def", "safe_rmtree", "(", "path", ")", ":", "def", "onerror", "(", "fun", ",", "path", ",", "excinfo", ")", ":", "exc", "=", "excinfo", "[", "1", "]", "if", "(", "exc", ".", "errno", "!=", "errno", ".", "ENOENT", ")", ":", "raise", "existed", "=", "os", ".", "path", ".", "isdir", "(", "path", ")", "shutil", ".", "rmtree", "(", "path", ",", "onerror", "=", "onerror", ")", "if", "existed", ":", "print", "(", "'rmdir -f %s'", "%", "path", ")", "if", "(", "'*'", "not", "in", "pattern", ")", ":", "if", "directory", ":", "safe_rmtree", "(", "pattern", ")", "else", ":", "safe_remove", "(", "pattern", ")", "return", "for", "(", "root", ",", "subdirs", ",", "subfiles", ")", "in", "os", ".", "walk", "(", "'.'", ")", ":", "root", "=", "os", ".", "path", ".", "normpath", "(", "root", ")", "if", "root", ".", "startswith", "(", "'.git/'", ")", ":", "continue", "found", "=", "fnmatch", ".", "filter", "(", "(", "subdirs", "if", "directory", "else", "subfiles", ")", ",", "pattern", ")", "for", "name", "in", "found", ":", "path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", "if", "directory", ":", "print", "(", "'rmdir -f %s'", "%", "path", ")", "safe_rmtree", "(", "path", ")", "else", ":", "print", "(", "'rm %s'", "%", "path", ")", "safe_remove", "(", "path", ")" ]
remove stopped containers in the docker-compose file .
train
false
3,797
def test_solve_polynomial_cv_1a(): assert (solve((sqrt(x) - 1), x) == [1]) assert (solve((sqrt(x) - 2), x) == [4]) assert (solve(((x ** Rational(1, 4)) - 2), x) == [16]) assert (solve(((x ** Rational(1, 3)) - 3), x) == [27]) assert (solve(((sqrt(x) + (x ** Rational(1, 3))) + (x ** Rational(1, 4))), x) == [0])
[ "def", "test_solve_polynomial_cv_1a", "(", ")", ":", "assert", "(", "solve", "(", "(", "sqrt", "(", "x", ")", "-", "1", ")", ",", "x", ")", "==", "[", "1", "]", ")", "assert", "(", "solve", "(", "(", "sqrt", "(", "x", ")", "-", "2", ")", ",", "x", ")", "==", "[", "4", "]", ")", "assert", "(", "solve", "(", "(", "(", "x", "**", "Rational", "(", "1", ",", "4", ")", ")", "-", "2", ")", ",", "x", ")", "==", "[", "16", "]", ")", "assert", "(", "solve", "(", "(", "(", "x", "**", "Rational", "(", "1", ",", "3", ")", ")", "-", "3", ")", ",", "x", ")", "==", "[", "27", "]", ")", "assert", "(", "solve", "(", "(", "(", "sqrt", "(", "x", ")", "+", "(", "x", "**", "Rational", "(", "1", ",", "3", ")", ")", ")", "+", "(", "x", "**", "Rational", "(", "1", ",", "4", ")", ")", ")", ",", "x", ")", "==", "[", "0", "]", ")" ]
test for solving on equations that can be converted to a polynomial equation using the change of variable y -> x**rational .
train
false
3,799
def setup_module(module): import os import numpy as np import random _random_seed = os.environ.get('SKLEARN_SEED', None) if (_random_seed is None): _random_seed = (np.random.uniform() * ((2 ** 31) - 1)) _random_seed = int(_random_seed) print ('I: Seeding RNGs with %r' % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
[ "def", "setup_module", "(", "module", ")", ":", "import", "os", "import", "numpy", "as", "np", "import", "random", "_random_seed", "=", "os", ".", "environ", ".", "get", "(", "'SKLEARN_SEED'", ",", "None", ")", "if", "(", "_random_seed", "is", "None", ")", ":", "_random_seed", "=", "(", "np", ".", "random", ".", "uniform", "(", ")", "*", "(", "(", "2", "**", "31", ")", "-", "1", ")", ")", "_random_seed", "=", "int", "(", "_random_seed", ")", "print", "(", "'I: Seeding RNGs with %r'", "%", "_random_seed", ")", "np", ".", "random", ".", "seed", "(", "_random_seed", ")", "random", ".", "seed", "(", "_random_seed", ")" ]
set up test fixtures .
train
false
3,800
def fileobj_name(f): if isinstance(f, string_types): return f elif isinstance(f, gzip.GzipFile): return fileobj_name(f.fileobj) elif hasattr(f, 'name'): return f.name elif hasattr(f, 'filename'): return f.filename elif hasattr(f, '__class__'): return str(f.__class__) else: return str(type(f))
[ "def", "fileobj_name", "(", "f", ")", ":", "if", "isinstance", "(", "f", ",", "string_types", ")", ":", "return", "f", "elif", "isinstance", "(", "f", ",", "gzip", ".", "GzipFile", ")", ":", "return", "fileobj_name", "(", "f", ".", "fileobj", ")", "elif", "hasattr", "(", "f", ",", "'name'", ")", ":", "return", "f", ".", "name", "elif", "hasattr", "(", "f", ",", "'filename'", ")", ":", "return", "f", ".", "filename", "elif", "hasattr", "(", "f", ",", "'__class__'", ")", ":", "return", "str", "(", "f", ".", "__class__", ")", "else", ":", "return", "str", "(", "type", "(", "f", ")", ")" ]
returns the name of file-like object f .
train
false
3,801
def finite_diff(expression, variable, increment=1): expression = expression.expand() expression2 = expression.subs(variable, (variable + increment)) expression2 = expression2.expand() return (expression2 - expression)
[ "def", "finite_diff", "(", "expression", ",", "variable", ",", "increment", "=", "1", ")", ":", "expression", "=", "expression", ".", "expand", "(", ")", "expression2", "=", "expression", ".", "subs", "(", "variable", ",", "(", "variable", "+", "increment", ")", ")", "expression2", "=", "expression2", ".", "expand", "(", ")", "return", "(", "expression2", "-", "expression", ")" ]
takes as input a polynomial expression and the variable used to construct it and returns the difference between functions value when the input is incremented to 1 and the original function value .
train
false
3,802
def walk_skip_hidden(top, onerror=None, followlinks=False): for (root, dirs, files) in os.walk(top, topdown=True, onerror=onerror, followlinks=followlinks): dirs[:] = [d for d in dirs if (not is_path_hidden(d))] files[:] = [f for f in files if (not is_path_hidden(f))] (yield (root, dirs, files))
[ "def", "walk_skip_hidden", "(", "top", ",", "onerror", "=", "None", ",", "followlinks", "=", "False", ")", ":", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "top", ",", "topdown", "=", "True", ",", "onerror", "=", "onerror", ",", "followlinks", "=", "followlinks", ")", ":", "dirs", "[", ":", "]", "=", "[", "d", "for", "d", "in", "dirs", "if", "(", "not", "is_path_hidden", "(", "d", ")", ")", "]", "files", "[", ":", "]", "=", "[", "f", "for", "f", "in", "files", "if", "(", "not", "is_path_hidden", "(", "f", ")", ")", "]", "(", "yield", "(", "root", ",", "dirs", ",", "files", ")", ")" ]
a wrapper for os .
train
true
3,804
@pick_context_manager_writer def flavor_access_remove(context, flavor_id, project_id): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) count = _flavor_access_query(context).filter_by(instance_type_id=instance_type_id).filter_by(project_id=project_id).soft_delete(synchronize_session=False) if (count == 0): raise exception.FlavorAccessNotFound(flavor_id=flavor_id, project_id=project_id)
[ "@", "pick_context_manager_writer", "def", "flavor_access_remove", "(", "context", ",", "flavor_id", ",", "project_id", ")", ":", "instance_type_id", "=", "_flavor_get_id_from_flavor", "(", "context", ",", "flavor_id", ")", "count", "=", "_flavor_access_query", "(", "context", ")", ".", "filter_by", "(", "instance_type_id", "=", "instance_type_id", ")", ".", "filter_by", "(", "project_id", "=", "project_id", ")", ".", "soft_delete", "(", "synchronize_session", "=", "False", ")", "if", "(", "count", "==", "0", ")", ":", "raise", "exception", ".", "FlavorAccessNotFound", "(", "flavor_id", "=", "flavor_id", ",", "project_id", "=", "project_id", ")" ]
remove flavor access for project .
train
false
3,805
def touch(name, atime=None, mtime=None): name = os.path.expanduser(name) if (atime and atime.isdigit()): atime = int(atime) if (mtime and mtime.isdigit()): mtime = int(mtime) try: if (not os.path.exists(name)): with salt.utils.fopen(name, 'a') as fhw: fhw.write('') if ((not atime) and (not mtime)): times = None elif ((not mtime) and atime): times = (atime, time.time()) elif ((not atime) and mtime): times = (time.time(), mtime) else: times = (atime, mtime) os.utime(name, times) except TypeError: raise SaltInvocationError('atime and mtime must be integers') except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return os.path.exists(name)
[ "def", "touch", "(", "name", ",", "atime", "=", "None", ",", "mtime", "=", "None", ")", ":", "name", "=", "os", ".", "path", ".", "expanduser", "(", "name", ")", "if", "(", "atime", "and", "atime", ".", "isdigit", "(", ")", ")", ":", "atime", "=", "int", "(", "atime", ")", "if", "(", "mtime", "and", "mtime", ".", "isdigit", "(", ")", ")", ":", "mtime", "=", "int", "(", "mtime", ")", "try", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "name", ")", ")", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "name", ",", "'a'", ")", "as", "fhw", ":", "fhw", ".", "write", "(", "''", ")", "if", "(", "(", "not", "atime", ")", "and", "(", "not", "mtime", ")", ")", ":", "times", "=", "None", "elif", "(", "(", "not", "mtime", ")", "and", "atime", ")", ":", "times", "=", "(", "atime", ",", "time", ".", "time", "(", ")", ")", "elif", "(", "(", "not", "atime", ")", "and", "mtime", ")", ":", "times", "=", "(", "time", ".", "time", "(", ")", ",", "mtime", ")", "else", ":", "times", "=", "(", "atime", ",", "mtime", ")", "os", ".", "utime", "(", "name", ",", "times", ")", "except", "TypeError", ":", "raise", "SaltInvocationError", "(", "'atime and mtime must be integers'", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "raise", "CommandExecutionError", "(", "exc", ".", "strerror", ")", "return", "os", ".", "path", ".", "exists", "(", "name", ")" ]
touch generated files that are older than their sources after an update .
train
true
3,806
def is_eui64_address(ip_address): ip = netaddr.IPAddress(ip_address) return ((ip.version == 6) and (not ((ip & 1099494850560) ^ 1099478073344)))
[ "def", "is_eui64_address", "(", "ip_address", ")", ":", "ip", "=", "netaddr", ".", "IPAddress", "(", "ip_address", ")", "return", "(", "(", "ip", ".", "version", "==", "6", ")", "and", "(", "not", "(", "(", "ip", "&", "1099494850560", ")", "^", "1099478073344", ")", ")", ")" ]
check if ip address is eui64 .
train
false
3,807
def _find_all_structured_arrays(handle): import h5py structured_arrays = [] def append_structured_arrays(name, obj): if (isinstance(obj, h5py.Dataset) and (obj.dtype.kind == u'V')): structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays
[ "def", "_find_all_structured_arrays", "(", "handle", ")", ":", "import", "h5py", "structured_arrays", "=", "[", "]", "def", "append_structured_arrays", "(", "name", ",", "obj", ")", ":", "if", "(", "isinstance", "(", "obj", ",", "h5py", ".", "Dataset", ")", "and", "(", "obj", ".", "dtype", ".", "kind", "==", "u'V'", ")", ")", ":", "structured_arrays", ".", "append", "(", "name", ")", "handle", ".", "visititems", "(", "append_structured_arrays", ")", "return", "structured_arrays" ]
find all structured arrays in an hdf5 file .
train
false
3,808
def name_to_batch(name, batch_size, num_steps): data = np.zeros(((batch_size * num_steps) + 1)) data_index = 0 for letter in (map(_letter_to_number, name) + [_EON]): data[data_index] = letter data_index += 1 x = data[:(batch_size * num_steps)].reshape((batch_size, num_steps)) y = data[1:((batch_size * num_steps) + 1)].reshape((batch_size, num_steps)) return (x, y)
[ "def", "name_to_batch", "(", "name", ",", "batch_size", ",", "num_steps", ")", ":", "data", "=", "np", ".", "zeros", "(", "(", "(", "batch_size", "*", "num_steps", ")", "+", "1", ")", ")", "data_index", "=", "0", "for", "letter", "in", "(", "map", "(", "_letter_to_number", ",", "name", ")", "+", "[", "_EON", "]", ")", ":", "data", "[", "data_index", "]", "=", "letter", "data_index", "+=", "1", "x", "=", "data", "[", ":", "(", "batch_size", "*", "num_steps", ")", "]", ".", "reshape", "(", "(", "batch_size", ",", "num_steps", ")", ")", "y", "=", "data", "[", "1", ":", "(", "(", "batch_size", "*", "num_steps", ")", "+", "1", ")", "]", ".", "reshape", "(", "(", "batch_size", ",", "num_steps", ")", ")", "return", "(", "x", ",", "y", ")" ]
takes a single name and fills a batch with it args: name: lowercase composed of 26 characters batch_size: int num_steps: int returns: x .
train
false
3,810
def sys_info(fname=None, overwrite=False): if ((fname is not None) and op.isfile(fname) and (not overwrite)): raise IOError('file exists, use overwrite=True to overwrite') out = '' try: from ..app import use_app, Canvas from ..app.backends import BACKEND_NAMES from ..gloo import gl from ..testing import has_backend with use_log_level('warning'): app = use_app(call_reuse=False) out += ('Platform: %s\n' % platform.platform()) out += ('Python: %s\n' % str(sys.version).replace('\n', ' ')) out += ('Backend: %s\n' % app.backend_name) for backend in BACKEND_NAMES: if backend.startswith('ipynb_'): continue with use_log_level('warning', print_msg=False): which = has_backend(backend, out=['which'])[1] out += '{0:<9} {1}\n'.format((backend + ':'), which) out += '\n' canvas = Canvas('Test', (10, 10), show=False, app=app) canvas._backend._vispy_set_current() out += ('GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)) x_ = gl.GL_MAX_TEXTURE_SIZE out += ('MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)) out += ('Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)) canvas.close() except Exception: out += ('\nInfo-gathering error:\n%s' % traceback.format_exc()) pass if (fname is not None): with open(fname, 'w') as fid: fid.write(out) return out
[ "def", "sys_info", "(", "fname", "=", "None", ",", "overwrite", "=", "False", ")", ":", "if", "(", "(", "fname", "is", "not", "None", ")", "and", "op", ".", "isfile", "(", "fname", ")", "and", "(", "not", "overwrite", ")", ")", ":", "raise", "IOError", "(", "'file exists, use overwrite=True to overwrite'", ")", "out", "=", "''", "try", ":", "from", ".", ".", "app", "import", "use_app", ",", "Canvas", "from", ".", ".", "app", ".", "backends", "import", "BACKEND_NAMES", "from", ".", ".", "gloo", "import", "gl", "from", ".", ".", "testing", "import", "has_backend", "with", "use_log_level", "(", "'warning'", ")", ":", "app", "=", "use_app", "(", "call_reuse", "=", "False", ")", "out", "+=", "(", "'Platform: %s\\n'", "%", "platform", ".", "platform", "(", ")", ")", "out", "+=", "(", "'Python: %s\\n'", "%", "str", "(", "sys", ".", "version", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ")", "out", "+=", "(", "'Backend: %s\\n'", "%", "app", ".", "backend_name", ")", "for", "backend", "in", "BACKEND_NAMES", ":", "if", "backend", ".", "startswith", "(", "'ipynb_'", ")", ":", "continue", "with", "use_log_level", "(", "'warning'", ",", "print_msg", "=", "False", ")", ":", "which", "=", "has_backend", "(", "backend", ",", "out", "=", "[", "'which'", "]", ")", "[", "1", "]", "out", "+=", "'{0:<9} {1}\\n'", ".", "format", "(", "(", "backend", "+", "':'", ")", ",", "which", ")", "out", "+=", "'\\n'", "canvas", "=", "Canvas", "(", "'Test'", ",", "(", "10", ",", "10", ")", ",", "show", "=", "False", ",", "app", "=", "app", ")", "canvas", ".", "_backend", ".", "_vispy_set_current", "(", ")", "out", "+=", "(", "'GL version: %r\\n'", "%", "(", "gl", ".", "glGetParameter", "(", "gl", ".", "GL_VERSION", ")", ",", ")", ")", "x_", "=", "gl", ".", "GL_MAX_TEXTURE_SIZE", "out", "+=", "(", "'MAX_TEXTURE_SIZE: %r\\n'", "%", "(", "gl", ".", "glGetParameter", "(", "x_", ")", ",", ")", ")", "out", "+=", "(", "'Extensions: %r\\n'", "%", "(", "gl", ".", "glGetParameter", "(", "gl", ".", "GL_EXTENSIONS", ")", ",", ")", ")", "canvas", ".", "close", "(", ")", "except", "Exception", ":", "out", "+=", "(", "'\\nInfo-gathering error:\\n%s'", "%", "traceback", ".", "format_exc", "(", ")", ")", "pass", "if", "(", "fname", "is", "not", "None", ")", ":", "with", "open", "(", "fname", ",", "'w'", ")", "as", "fid", ":", "fid", ".", "write", "(", "out", ")", "return", "out" ]
return useful information about ipython and the system .
train
true
3,812
def process_token_or_pass(func): @functools.wraps(func) def wrapper(*args, **kwargs): encoded_token = request.args.get('token') if encoded_token: handler = TokenHandler.from_string(encoded_token) try: res = handler.to_response() except TokenHandlerNotFound as e: raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Invalid Token', 'message_long': 'No token handler for action: {} found'.format(e.action)}) if res: return res return func(*args, **kwargs) return wrapper
[ "def", "process_token_or_pass", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "encoded_token", "=", "request", ".", "args", ".", "get", "(", "'token'", ")", "if", "encoded_token", ":", "handler", "=", "TokenHandler", ".", "from_string", "(", "encoded_token", ")", "try", ":", "res", "=", "handler", ".", "to_response", "(", ")", "except", "TokenHandlerNotFound", "as", "e", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "{", "'message_short'", ":", "'Invalid Token'", ",", "'message_long'", ":", "'No token handler for action: {} found'", ".", "format", "(", "e", ".", "action", ")", "}", ")", "if", "res", ":", "return", "res", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
parse encoded token and run attached handlers .
train
false
3,813
def fileobj_mode(f): if (hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode')): fileobj = f.fileobj elif hasattr(f, 'fileobj_mode'): return f.fileobj_mode elif (hasattr(f, 'fp') and hasattr(f.fp, 'mode')): fileobj = f.fp elif hasattr(f, 'mode'): fileobj = f else: return None return _fileobj_normalize_mode(fileobj)
[ "def", "fileobj_mode", "(", "f", ")", ":", "if", "(", "hasattr", "(", "f", ",", "'fileobj'", ")", "and", "hasattr", "(", "f", ".", "fileobj", ",", "'mode'", ")", ")", ":", "fileobj", "=", "f", ".", "fileobj", "elif", "hasattr", "(", "f", ",", "'fileobj_mode'", ")", ":", "return", "f", ".", "fileobj_mode", "elif", "(", "hasattr", "(", "f", ",", "'fp'", ")", "and", "hasattr", "(", "f", ".", "fp", ",", "'mode'", ")", ")", ":", "fileobj", "=", "f", ".", "fp", "elif", "hasattr", "(", "f", ",", "'mode'", ")", ":", "fileobj", "=", "f", "else", ":", "return", "None", "return", "_fileobj_normalize_mode", "(", "fileobj", ")" ]
returns the mode string of a file-like object if such a thing exists .
train
false
3,814
def getDefaultFetcher(): global _default_fetcher if (_default_fetcher is None): setDefaultFetcher(createHTTPFetcher()) return _default_fetcher
[ "def", "getDefaultFetcher", "(", ")", ":", "global", "_default_fetcher", "if", "(", "_default_fetcher", "is", "None", ")", ":", "setDefaultFetcher", "(", "createHTTPFetcher", "(", ")", ")", "return", "_default_fetcher" ]
return the default fetcher instance if no fetcher has been set .
train
false
3,815
def dyld_find(name, executable_path=None, env=None): name = ensure_utf8(name) executable_path = ensure_utf8(executable_path) for path in dyld_image_suffix_search(chain(dyld_override_search(name, env), dyld_executable_path_search(name, executable_path), dyld_default_search(name, env)), env): if os.path.isfile(path): return path raise ValueError(('dylib %s could not be found' % (name,)))
[ "def", "dyld_find", "(", "name", ",", "executable_path", "=", "None", ",", "env", "=", "None", ")", ":", "name", "=", "ensure_utf8", "(", "name", ")", "executable_path", "=", "ensure_utf8", "(", "executable_path", ")", "for", "path", "in", "dyld_image_suffix_search", "(", "chain", "(", "dyld_override_search", "(", "name", ",", "env", ")", ",", "dyld_executable_path_search", "(", "name", ",", "executable_path", ")", ",", "dyld_default_search", "(", "name", ",", "env", ")", ")", ",", "env", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "path", "raise", "ValueError", "(", "(", "'dylib %s could not be found'", "%", "(", "name", ",", ")", ")", ")" ]
find a library or framework using dyld semantics .
train
true
3,817
@library.global_function def stringify_groups(groups): return u','.join([group.name for group in groups])
[ "@", "library", ".", "global_function", "def", "stringify_groups", "(", "groups", ")", ":", "return", "u','", ".", "join", "(", "[", "group", ".", "name", "for", "group", "in", "groups", "]", ")" ]
change a list of group objects into a space-delimited string .
train
false
3,818
def argmax_list(seq, func): return argmin_list(seq, (lambda x: (- func(x))))
[ "def", "argmax_list", "(", "seq", ",", "func", ")", ":", "return", "argmin_list", "(", "seq", ",", "(", "lambda", "x", ":", "(", "-", "func", "(", "x", ")", ")", ")", ")" ]
return a list of elements of seq[i] with the highest func scores .
train
false
3,819
def CDLGRAVESTONEDOJI(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLGRAVESTONEDOJI)
[ "def", "CDLGRAVESTONEDOJI", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLGRAVESTONEDOJI", ")" ]
gravestone doji .
train
false
3,821
@handle_response_format @treeio_login_required def settings_edit(request, response_format='html'): if (not request.user.profile.is_admin('treeio.projects')): return user_denied(request, message="You don't have administrator access to the Projects module") form = None if request.POST: if ('cancel' not in request.POST): form = SettingsForm(request.user.profile, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('projects_settings_view')) else: return HttpResponseRedirect(reverse('projects_settings_view')) else: form = SettingsForm(request.user.profile) context = _get_default_context(request) context.update({'form': form}) return render_to_response('projects/settings_edit', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "settings_edit", "(", "request", ",", "response_format", "=", "'html'", ")", ":", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "is_admin", "(", "'treeio.projects'", ")", ")", ":", "return", "user_denied", "(", "request", ",", "message", "=", "\"You don't have administrator access to the Projects module\"", ")", "form", "=", "None", "if", "request", ".", "POST", ":", "if", "(", "'cancel'", "not", "in", "request", ".", "POST", ")", ":", "form", "=", "SettingsForm", "(", "request", ".", "user", ".", "profile", ",", "request", ".", "POST", ")", "if", "form", ".", "is_valid", "(", ")", ":", "form", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'projects_settings_view'", ")", ")", "else", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'projects_settings_view'", ")", ")", "else", ":", "form", "=", "SettingsForm", "(", "request", ".", "user", ".", "profile", ")", "context", "=", "_get_default_context", "(", "request", ")", "context", ".", "update", "(", "{", "'form'", ":", "form", "}", ")", "return", "render_to_response", "(", "'projects/settings_edit'", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
settings edit .
train
false
3,822
def get_namespaces(start=None, end=None): q = Namespace.all() if (start is not None): q.filter('__key__ >=', Namespace.key_for_namespace(start)) if (end is not None): q.filter('__key__ <', Namespace.key_for_namespace(end)) return [x.namespace_name for x in q.run()]
[ "def", "get_namespaces", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Namespace", ".", "all", "(", ")", "if", "(", "start", "is", "not", "None", ")", ":", "q", ".", "filter", "(", "'__key__ >='", ",", "Namespace", ".", "key_for_namespace", "(", "start", ")", ")", "if", "(", "end", "is", "not", "None", ")", ":", "q", ".", "filter", "(", "'__key__ <'", ",", "Namespace", ".", "key_for_namespace", "(", "end", ")", ")", "return", "[", "x", ".", "namespace_name", "for", "x", "in", "q", ".", "run", "(", ")", "]" ]
return all namespaces in the specified range .
train
false
3,825
def get_reader_session(): return context_manager.reader.get_sessionmaker()()
[ "def", "get_reader_session", "(", ")", ":", "return", "context_manager", ".", "reader", ".", "get_sessionmaker", "(", ")", "(", ")" ]
helper to get reader session .
train
false
3,826
def have_qstring(): return (not ((sys.version_info.major >= 3) or QT_VERSION_STR.startswith('5.')))
[ "def", "have_qstring", "(", ")", ":", "return", "(", "not", "(", "(", "sys", ".", "version_info", ".", "major", ">=", "3", ")", "or", "QT_VERSION_STR", ".", "startswith", "(", "'5.'", ")", ")", ")" ]
p3/qt5 get rid of qstring wrapper as py3 has native unicode str type .
train
false
3,827
def makeKickstartFloppy(): kickstart = 'ks.cfg' with open(kickstart, 'w') as f: f.write(KickstartText) preseed = 'ks.preseed' with open(preseed, 'w') as f: f.write(PreseedText) floppy = 'ksfloppy.img' run(('qemu-img create %s 1440k' % floppy)) run(('mkfs -t msdos ' + floppy)) run(('mcopy -i %s %s ::/' % (floppy, kickstart))) run(('mcopy -i %s %s ::/' % (floppy, preseed))) return (floppy, kickstart, preseed)
[ "def", "makeKickstartFloppy", "(", ")", ":", "kickstart", "=", "'ks.cfg'", "with", "open", "(", "kickstart", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "KickstartText", ")", "preseed", "=", "'ks.preseed'", "with", "open", "(", "preseed", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "PreseedText", ")", "floppy", "=", "'ksfloppy.img'", "run", "(", "(", "'qemu-img create %s 1440k'", "%", "floppy", ")", ")", "run", "(", "(", "'mkfs -t msdos '", "+", "floppy", ")", ")", "run", "(", "(", "'mcopy -i %s %s ::/'", "%", "(", "floppy", ",", "kickstart", ")", ")", ")", "run", "(", "(", "'mcopy -i %s %s ::/'", "%", "(", "floppy", ",", "preseed", ")", ")", ")", "return", "(", "floppy", ",", "kickstart", ",", "preseed", ")" ]
create and return kickstart floppy .
train
false
3,828
def read_config_file(option, opt, value, parser): try: new_settings = parser.get_config_file_settings(value) except ValueError as error: parser.error(error) parser.values.update(new_settings, parser)
[ "def", "read_config_file", "(", "option", ",", "opt", ",", "value", ",", "parser", ")", ":", "try", ":", "new_settings", "=", "parser", ".", "get_config_file_settings", "(", "value", ")", "except", "ValueError", "as", "error", ":", "parser", ".", "error", "(", "error", ")", "parser", ".", "values", ".", "update", "(", "new_settings", ",", "parser", ")" ]
reads all uppercase variables defined in the given module file .
train
false
3,829
def efetch(db, **keywords): cgi = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi' variables = {'db': db} variables.update(keywords) post = False try: ids = variables['id'] except KeyError: pass else: if isinstance(ids, list): ids = ','.join(ids) variables['id'] = ids elif isinstance(ids, int): ids = str(ids) variables['id'] = ids if (ids.count(',') >= 200): post = True return _open(cgi, variables, post=post)
[ "def", "efetch", "(", "db", ",", "**", "keywords", ")", ":", "cgi", "=", "'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'", "variables", "=", "{", "'db'", ":", "db", "}", "variables", ".", "update", "(", "keywords", ")", "post", "=", "False", "try", ":", "ids", "=", "variables", "[", "'id'", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "isinstance", "(", "ids", ",", "list", ")", ":", "ids", "=", "','", ".", "join", "(", "ids", ")", "variables", "[", "'id'", "]", "=", "ids", "elif", "isinstance", "(", "ids", ",", "int", ")", ":", "ids", "=", "str", "(", "ids", ")", "variables", "[", "'id'", "]", "=", "ids", "if", "(", "ids", ".", "count", "(", "','", ")", ">=", "200", ")", ":", "post", "=", "True", "return", "_open", "(", "cgi", ",", "variables", ",", "post", "=", "post", ")" ]
fetches entrez results which are returned as a handle .
train
false
3,830
def edge_detect(pin, event_callback, bounce): import Adafruit_BBIO.GPIO as GPIO GPIO.add_event_detect(pin, GPIO.BOTH, callback=event_callback, bouncetime=bounce)
[ "def", "edge_detect", "(", "pin", ",", "event_callback", ",", "bounce", ")", ":", "import", "Adafruit_BBIO", ".", "GPIO", "as", "GPIO", "GPIO", ".", "add_event_detect", "(", "pin", ",", "GPIO", ".", "BOTH", ",", "callback", "=", "event_callback", ",", "bouncetime", "=", "bounce", ")" ]
add detection for rising and falling events .
train
false
3,831
def dipy_version(): if no_dipy(): return None return dipy.__version__
[ "def", "dipy_version", "(", ")", ":", "if", "no_dipy", "(", ")", ":", "return", "None", "return", "dipy", ".", "__version__" ]
check dipy version .
train
false
3,833
def RemoveELBInstance(region, instance_id, node_type): balancers = GetLoadBalancers(region, node_types=[node_type]) assert balancers, ('No %s load balancer in region %s' % (node_type, region)) assert (len(balancers) == 1) b = balancers[0] balancer_instances = set([i.id for i in b.instances]) if (instance_id not in balancer_instances): print ('Instance %s not found in %s load balancer in regions %s' % (instance_id, node_type, region)) return b.deregister_instances([instance_id]) print ('Removed instance %s from %s load balancer in region %s' % (instance_id, node_type, region))
[ "def", "RemoveELBInstance", "(", "region", ",", "instance_id", ",", "node_type", ")", ":", "balancers", "=", "GetLoadBalancers", "(", "region", ",", "node_types", "=", "[", "node_type", "]", ")", "assert", "balancers", ",", "(", "'No %s load balancer in region %s'", "%", "(", "node_type", ",", "region", ")", ")", "assert", "(", "len", "(", "balancers", ")", "==", "1", ")", "b", "=", "balancers", "[", "0", "]", "balancer_instances", "=", "set", "(", "[", "i", ".", "id", "for", "i", "in", "b", ".", "instances", "]", ")", "if", "(", "instance_id", "not", "in", "balancer_instances", ")", ":", "print", "(", "'Instance %s not found in %s load balancer in regions %s'", "%", "(", "instance_id", ",", "node_type", ",", "region", ")", ")", "return", "b", ".", "deregister_instances", "(", "[", "instance_id", "]", ")", "print", "(", "'Removed instance %s from %s load balancer in region %s'", "%", "(", "instance_id", ",", "node_type", ",", "region", ")", ")" ]
add an instance to the load balancer in region .
train
false
3,834
def memory_usage(): return _GetSystemStats().memory()
[ "def", "memory_usage", "(", ")", ":", "return", "_GetSystemStats", "(", ")", ".", "memory", "(", ")" ]
log memory usage before and after a method .
train
false
3,835
@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_read_html_unicode(): table_in = [u'<table>', u'<tr><td>&#x0394;</td></tr>', u'<tr><td>\u0394</td></tr>', u'</table>'] dat = Table.read(table_in, format='ascii.html') assert np.all((dat['col1'] == [u'\u0394', u'\u0394']))
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'not HAS_BEAUTIFUL_SOUP'", ")", "def", "test_read_html_unicode", "(", ")", ":", "table_in", "=", "[", "u'<table>'", ",", "u'<tr><td>&#x0394;</td></tr>'", ",", "u'<tr><td>\\u0394</td></tr>'", ",", "u'</table>'", "]", "dat", "=", "Table", ".", "read", "(", "table_in", ",", "format", "=", "'ascii.html'", ")", "assert", "np", ".", "all", "(", "(", "dat", "[", "'col1'", "]", "==", "[", "u'\\u0394'", ",", "u'\\u0394'", "]", ")", ")" ]
test reading an html table with unicode values .
train
false
3,836
def CloseBuffersForFilename(filename): buffer_number = GetBufferNumberForFilename(filename, False) while (buffer_number != (-1)): vim.command(u'silent! bwipeout! {0}'.format(buffer_number)) new_buffer_number = GetBufferNumberForFilename(filename, False) if (buffer_number == new_buffer_number): raise RuntimeError(u"Buffer {0} for filename '{1}' should already be wiped out.".format(buffer_number, filename)) buffer_number = new_buffer_number
[ "def", "CloseBuffersForFilename", "(", "filename", ")", ":", "buffer_number", "=", "GetBufferNumberForFilename", "(", "filename", ",", "False", ")", "while", "(", "buffer_number", "!=", "(", "-", "1", ")", ")", ":", "vim", ".", "command", "(", "u'silent! bwipeout! {0}'", ".", "format", "(", "buffer_number", ")", ")", "new_buffer_number", "=", "GetBufferNumberForFilename", "(", "filename", ",", "False", ")", "if", "(", "buffer_number", "==", "new_buffer_number", ")", ":", "raise", "RuntimeError", "(", "u\"Buffer {0} for filename '{1}' should already be wiped out.\"", ".", "format", "(", "buffer_number", ",", "filename", ")", ")", "buffer_number", "=", "new_buffer_number" ]
close all buffers for a specific file .
train
false
3,837
def nearest_unequal_elements(dts, dt): if (not dts.is_unique): raise ValueError('dts must be unique') if (not dts.is_monotonic_increasing): raise ValueError('dts must be sorted in increasing order') if (not len(dts)): return (None, None) sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpos] except IndexError: return (dts[(-1)], None) if (dt < sortval): lower_ix = (sortpos - 1) upper_ix = sortpos elif (dt == sortval): lower_ix = (sortpos - 1) upper_ix = (sortpos + 1) else: lower_ix = sortpos upper_ix = (sortpos + 1) lower_value = (dts[lower_ix] if (lower_ix >= 0) else None) upper_value = (dts[upper_ix] if (upper_ix < len(dts)) else None) return (lower_value, upper_value)
[ "def", "nearest_unequal_elements", "(", "dts", ",", "dt", ")", ":", "if", "(", "not", "dts", ".", "is_unique", ")", ":", "raise", "ValueError", "(", "'dts must be unique'", ")", "if", "(", "not", "dts", ".", "is_monotonic_increasing", ")", ":", "raise", "ValueError", "(", "'dts must be sorted in increasing order'", ")", "if", "(", "not", "len", "(", "dts", ")", ")", ":", "return", "(", "None", ",", "None", ")", "sortpos", "=", "dts", ".", "searchsorted", "(", "dt", ",", "side", "=", "'left'", ")", "try", ":", "sortval", "=", "dts", "[", "sortpos", "]", "except", "IndexError", ":", "return", "(", "dts", "[", "(", "-", "1", ")", "]", ",", "None", ")", "if", "(", "dt", "<", "sortval", ")", ":", "lower_ix", "=", "(", "sortpos", "-", "1", ")", "upper_ix", "=", "sortpos", "elif", "(", "dt", "==", "sortval", ")", ":", "lower_ix", "=", "(", "sortpos", "-", "1", ")", "upper_ix", "=", "(", "sortpos", "+", "1", ")", "else", ":", "lower_ix", "=", "sortpos", "upper_ix", "=", "(", "sortpos", "+", "1", ")", "lower_value", "=", "(", "dts", "[", "lower_ix", "]", "if", "(", "lower_ix", ">=", "0", ")", "else", "None", ")", "upper_value", "=", "(", "dts", "[", "upper_ix", "]", "if", "(", "upper_ix", "<", "len", "(", "dts", ")", ")", "else", "None", ")", "return", "(", "lower_value", ",", "upper_value", ")" ]
find values in dts closest but not equal to dt .
train
true
3,838
def transfer_create(context, values): return IMPL.transfer_create(context, values)
[ "def", "transfer_create", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "transfer_create", "(", "context", ",", "values", ")" ]
create an entry in the transfers table .
train
false
3,839
def ExpectedFailure(reason, *exception_matchers): def decorator(test): @functools.wraps(test) def Wrapper(*args, **kwargs): try: test(*args, **kwargs) except Exception as test_exception: test_exception_message = ToUnicode(test_exception) try: for matcher in exception_matchers: assert_that(test_exception_message, matcher) except AssertionError: import traceback print((u'Test failed for the wrong reason: ' + traceback.format_exc())) raise test_exception raise nose.SkipTest(reason) else: raise AssertionError(u'Test was expected to fail: {0}'.format(reason)) return Wrapper return decorator
[ "def", "ExpectedFailure", "(", "reason", ",", "*", "exception_matchers", ")", ":", "def", "decorator", "(", "test", ")", ":", "@", "functools", ".", "wraps", "(", "test", ")", "def", "Wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "test", "(", "*", "args", ",", "**", "kwargs", ")", "except", "Exception", "as", "test_exception", ":", "test_exception_message", "=", "ToUnicode", "(", "test_exception", ")", "try", ":", "for", "matcher", "in", "exception_matchers", ":", "assert_that", "(", "test_exception_message", ",", "matcher", ")", "except", "AssertionError", ":", "import", "traceback", "print", "(", "(", "u'Test failed for the wrong reason: '", "+", "traceback", ".", "format_exc", "(", ")", ")", ")", "raise", "test_exception", "raise", "nose", ".", "SkipTest", "(", "reason", ")", "else", ":", "raise", "AssertionError", "(", "u'Test was expected to fail: {0}'", ".", "format", "(", "reason", ")", ")", "return", "Wrapper", "return", "decorator" ]
defines a decorator to be attached to tests .
train
false
3,840
def _mark_cookie_for_deletion(request): request.need_to_delete_cookie = True
[ "def", "_mark_cookie_for_deletion", "(", "request", ")", ":", "request", ".", "need_to_delete_cookie", "=", "True" ]
updates the given request object to designate that the session cookie should be deleted .
train
false
3,841
@snippet def topic_iam_policy(client, to_delete): TOPIC_NAME = ('topic_iam_policy-%d' % (_millis(),)) topic = client.topic(TOPIC_NAME) topic.create() to_delete.append(topic) policy = topic.get_iam_policy() assert (len(policy.viewers) == 0) assert (len(policy.editors) == 0) assert (len(policy.owners) == 0) ALL_USERS = policy.all_users() policy.viewers.add(ALL_USERS) LOGS_GROUP = policy.group('cloud-logs@google.com') policy.editors.add(LOGS_GROUP) new_policy = topic.set_iam_policy(policy) assert (ALL_USERS in new_policy.viewers) assert (LOGS_GROUP in new_policy.editors)
[ "@", "snippet", "def", "topic_iam_policy", "(", "client", ",", "to_delete", ")", ":", "TOPIC_NAME", "=", "(", "'topic_iam_policy-%d'", "%", "(", "_millis", "(", ")", ",", ")", ")", "topic", "=", "client", ".", "topic", "(", "TOPIC_NAME", ")", "topic", ".", "create", "(", ")", "to_delete", ".", "append", "(", "topic", ")", "policy", "=", "topic", ".", "get_iam_policy", "(", ")", "assert", "(", "len", "(", "policy", ".", "viewers", ")", "==", "0", ")", "assert", "(", "len", "(", "policy", ".", "editors", ")", "==", "0", ")", "assert", "(", "len", "(", "policy", ".", "owners", ")", "==", "0", ")", "ALL_USERS", "=", "policy", ".", "all_users", "(", ")", "policy", ".", "viewers", ".", "add", "(", "ALL_USERS", ")", "LOGS_GROUP", "=", "policy", ".", "group", "(", "'cloud-logs@google.com'", ")", "policy", ".", "editors", ".", "add", "(", "LOGS_GROUP", ")", "new_policy", "=", "topic", ".", "set_iam_policy", "(", "policy", ")", "assert", "(", "ALL_USERS", "in", "new_policy", ".", "viewers", ")", "assert", "(", "LOGS_GROUP", "in", "new_policy", ".", "editors", ")" ]
fetch / set a topics iam policy .
train
false
3,842
def acquire_lock(): pass
[ "def", "acquire_lock", "(", ")", ":", "pass" ]
acquiring the lock is a no-op since no threading is supported .
train
false
3,844
def unescape_all(url): if isinstance(url, bytes): func2use = _unescape_bytes keys2use = _bytes_keys else: func2use = _unescape_str keys2use = _str_keys clean_url = func2use(url) not_done = [(clean_url.count(key) > 0) for key in keys2use] if (True in not_done): return unescape_all(clean_url) else: return clean_url
[ "def", "unescape_all", "(", "url", ")", ":", "if", "isinstance", "(", "url", ",", "bytes", ")", ":", "func2use", "=", "_unescape_bytes", "keys2use", "=", "_bytes_keys", "else", ":", "func2use", "=", "_unescape_str", "keys2use", "=", "_str_keys", "clean_url", "=", "func2use", "(", "url", ")", "not_done", "=", "[", "(", "clean_url", ".", "count", "(", "key", ")", ">", "0", ")", "for", "key", "in", "keys2use", "]", "if", "(", "True", "in", "not_done", ")", ":", "return", "unescape_all", "(", "clean_url", ")", "else", ":", "return", "clean_url" ]
recursively unescape a given url .
train
false
3,845
def visstd(a, s=0.1): return ((((a - a.mean()) / max(a.std(), 0.0001)) * s) + 0.5)
[ "def", "visstd", "(", "a", ",", "s", "=", "0.1", ")", ":", "return", "(", "(", "(", "(", "a", "-", "a", ".", "mean", "(", ")", ")", "/", "max", "(", "a", ".", "std", "(", ")", ",", "0.0001", ")", ")", "*", "s", ")", "+", "0.5", ")" ]
normalize the image range for visualization .
train
false
3,847
def product_upper_triangle(values, include_diagonal=False): return all_pairs_matching_predicate(values, (operator.le if include_diagonal else operator.lt))
[ "def", "product_upper_triangle", "(", "values", ",", "include_diagonal", "=", "False", ")", ":", "return", "all_pairs_matching_predicate", "(", "values", ",", "(", "operator", ".", "le", "if", "include_diagonal", "else", "operator", ".", "lt", ")", ")" ]
return an iterator over pairs .
train
false
3,849
def image_preprocessing(image_buffer, bbox, train, thread_id=0): if (bbox is None): raise ValueError('Please supply a bounding box.') image = decode_jpeg(image_buffer) height = FLAGS.image_size width = FLAGS.image_size if train: image = distort_image(image, height, width, bbox, thread_id) else: image = eval_image(image, height, width) image = tf.sub(image, 0.5) image = tf.mul(image, 2.0) return image
[ "def", "image_preprocessing", "(", "image_buffer", ",", "bbox", ",", "train", ",", "thread_id", "=", "0", ")", ":", "if", "(", "bbox", "is", "None", ")", ":", "raise", "ValueError", "(", "'Please supply a bounding box.'", ")", "image", "=", "decode_jpeg", "(", "image_buffer", ")", "height", "=", "FLAGS", ".", "image_size", "width", "=", "FLAGS", ".", "image_size", "if", "train", ":", "image", "=", "distort_image", "(", "image", ",", "height", ",", "width", ",", "bbox", ",", "thread_id", ")", "else", ":", "image", "=", "eval_image", "(", "image", ",", "height", ",", "width", ")", "image", "=", "tf", ".", "sub", "(", "image", ",", "0.5", ")", "image", "=", "tf", ".", "mul", "(", "image", ",", "2.0", ")", "return", "image" ]
decode and preprocess one image for evaluation or training .
train
false
3,851
def _int_arith_flags(rettype): if rettype.signed: return ['nsw'] else: return []
[ "def", "_int_arith_flags", "(", "rettype", ")", ":", "if", "rettype", ".", "signed", ":", "return", "[", "'nsw'", "]", "else", ":", "return", "[", "]" ]
return the modifier flags for integer arithmetic .
train
false
3,853
def make_csv_output(res, dt): import frappe from cStringIO import StringIO import csv f = StringIO() writer = csv.writer(f) for r in res: row = [] for v in r: if isinstance(v, basestring): v = v.encode(u'utf-8') row.append(v) writer.writerow(row) f.seek(0) frappe.response[u'result'] = unicode(f.read(), u'utf-8') frappe.response[u'type'] = u'csv' frappe.response[u'doctype'] = dt.replace(u' ', u'')
[ "def", "make_csv_output", "(", "res", ",", "dt", ")", ":", "import", "frappe", "from", "cStringIO", "import", "StringIO", "import", "csv", "f", "=", "StringIO", "(", ")", "writer", "=", "csv", ".", "writer", "(", "f", ")", "for", "r", "in", "res", ":", "row", "=", "[", "]", "for", "v", "in", "r", ":", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "v", "=", "v", ".", "encode", "(", "u'utf-8'", ")", "row", ".", "append", "(", "v", ")", "writer", ".", "writerow", "(", "row", ")", "f", ".", "seek", "(", "0", ")", "frappe", ".", "response", "[", "u'result'", "]", "=", "unicode", "(", "f", ".", "read", "(", ")", ",", "u'utf-8'", ")", "frappe", ".", "response", "[", "u'type'", "]", "=", "u'csv'", "frappe", ".", "response", "[", "u'doctype'", "]", "=", "dt", ".", "replace", "(", "u' '", ",", "u''", ")" ]
send method response as downloadable csv file .
train
false
3,854
@pick_context_manager_writer def flavor_access_add(context, flavor_id, project_id): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) access_ref = models.InstanceTypeProjects() access_ref.update({'instance_type_id': instance_type_id, 'project_id': project_id}) try: access_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.FlavorAccessExists(flavor_id=flavor_id, project_id=project_id) return access_ref
[ "@", "pick_context_manager_writer", "def", "flavor_access_add", "(", "context", ",", "flavor_id", ",", "project_id", ")", ":", "instance_type_id", "=", "_flavor_get_id_from_flavor", "(", "context", ",", "flavor_id", ")", "access_ref", "=", "models", ".", "InstanceTypeProjects", "(", ")", "access_ref", ".", "update", "(", "{", "'instance_type_id'", ":", "instance_type_id", ",", "'project_id'", ":", "project_id", "}", ")", "try", ":", "access_ref", ".", "save", "(", "context", ".", "session", ")", "except", "db_exc", ".", "DBDuplicateEntry", ":", "raise", "exception", ".", "FlavorAccessExists", "(", "flavor_id", "=", "flavor_id", ",", "project_id", "=", "project_id", ")", "return", "access_ref" ]
add flavor access for project .
train
false
3,855
def lastmodified(date_obj): web.header('Last-Modified', net.httpdate(date_obj))
[ "def", "lastmodified", "(", "date_obj", ")", ":", "web", ".", "header", "(", "'Last-Modified'", ",", "net", ".", "httpdate", "(", "date_obj", ")", ")" ]
outputs a last-modified header for datetime .
train
false
3,857
def re_render_content_for_management_command(message): assert Message.need_to_render_content(message.rendered_content, message.rendered_content_version, bugdown.version) rendered_content = render_markdown(message, message.content) message.rendered_content = rendered_content message.rendered_content_version = bugdown.version message.save_rendered_content()
[ "def", "re_render_content_for_management_command", "(", "message", ")", ":", "assert", "Message", ".", "need_to_render_content", "(", "message", ".", "rendered_content", ",", "message", ".", "rendered_content_version", ",", "bugdown", ".", "version", ")", "rendered_content", "=", "render_markdown", "(", "message", ",", "message", ".", "content", ")", "message", ".", "rendered_content", "=", "rendered_content", "message", ".", "rendered_content_version", "=", "bugdown", ".", "version", "message", ".", "save_rendered_content", "(", ")" ]
please avoid using this function .
train
false
3,859
def trigrams(sequence, **kwargs): for item in ngrams(sequence, 3, **kwargs): (yield item)
[ "def", "trigrams", "(", "sequence", ",", "**", "kwargs", ")", ":", "for", "item", "in", "ngrams", "(", "sequence", ",", "3", ",", "**", "kwargs", ")", ":", "(", "yield", "item", ")" ]
return the trigrams generated from a sequence of items .
train
false
3,860
def heap_sort(unsorted): n = len(unsorted) for i in range(((n // 2) - 1), (-1), (-1)): heapify(unsorted, i, n) for i in range((n - 1), 0, (-1)): (unsorted[0], unsorted[i]) = (unsorted[i], unsorted[0]) heapify(unsorted, 0, i) return unsorted
[ "def", "heap_sort", "(", "unsorted", ")", ":", "n", "=", "len", "(", "unsorted", ")", "for", "i", "in", "range", "(", "(", "(", "n", "//", "2", ")", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "heapify", "(", "unsorted", ",", "i", ",", "n", ")", "for", "i", "in", "range", "(", "(", "n", "-", "1", ")", ",", "0", ",", "(", "-", "1", ")", ")", ":", "(", "unsorted", "[", "0", "]", ",", "unsorted", "[", "i", "]", ")", "=", "(", "unsorted", "[", "i", "]", ",", "unsorted", "[", "0", "]", ")", "heapify", "(", "unsorted", ",", "0", ",", "i", ")", "return", "unsorted" ]
pure implementation of the heap sort algorithm in python .
train
false
3,861
def cmServicePrompt(): a = TpPd(pd=5) b = MessageType(mesType=37) c = PdAndSapi() packet = ((a / b) / c) return packet
[ "def", "cmServicePrompt", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "5", ")", "b", "=", "MessageType", "(", "mesType", "=", "37", ")", "c", "=", "PdAndSapi", "(", ")", "packet", "=", "(", "(", "a", "/", "b", ")", "/", "c", ")", "return", "packet" ]
cm service prompt section 9 .
train
true
3,862
def print_column_headers(results): print('Column Headers:') headers = results.get('columnHeaders') for header in headers: print((' DCTB %s name: = %s' % (header.get('columnType').title(), header.get('name')))) print((' DCTB Column Type = %s' % header.get('columnType'))) print((' DCTB Data Type = %s' % header.get('dataType'))) print()
[ "def", "print_column_headers", "(", "results", ")", ":", "print", "(", "'Column Headers:'", ")", "headers", "=", "results", ".", "get", "(", "'columnHeaders'", ")", "for", "header", "in", "headers", ":", "print", "(", "(", "' DCTB %s name: = %s'", "%", "(", "header", ".", "get", "(", "'columnType'", ")", ".", "title", "(", ")", ",", "header", ".", "get", "(", "'name'", ")", ")", ")", ")", "print", "(", "(", "' DCTB Column Type = %s'", "%", "header", ".", "get", "(", "'columnType'", ")", ")", ")", "print", "(", "(", "' DCTB Data Type = %s'", "%", "header", ".", "get", "(", "'dataType'", ")", ")", ")", "print", "(", ")" ]
prints the information for each column .
train
false
3,863
def get_start_end(sequence, skiplist=('-', '?')): length = len(sequence) if (length == 0): return (None, None) end = (length - 1) while ((end >= 0) and (sequence[end] in skiplist)): end -= 1 start = 0 while ((start < length) and (sequence[start] in skiplist)): start += 1 if ((start == length) and (end == (-1))): return ((-1), (-1)) else: return (start, end)
[ "def", "get_start_end", "(", "sequence", ",", "skiplist", "=", "(", "'-'", ",", "'?'", ")", ")", ":", "length", "=", "len", "(", "sequence", ")", "if", "(", "length", "==", "0", ")", ":", "return", "(", "None", ",", "None", ")", "end", "=", "(", "length", "-", "1", ")", "while", "(", "(", "end", ">=", "0", ")", "and", "(", "sequence", "[", "end", "]", "in", "skiplist", ")", ")", ":", "end", "-=", "1", "start", "=", "0", "while", "(", "(", "start", "<", "length", ")", "and", "(", "sequence", "[", "start", "]", "in", "skiplist", ")", ")", ":", "start", "+=", "1", "if", "(", "(", "start", "==", "length", ")", "and", "(", "end", "==", "(", "-", "1", ")", ")", ")", ":", "return", "(", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", "else", ":", "return", "(", "start", ",", "end", ")" ]
return position of first and last character which is not in skiplist .
train
false
3,864
def osc_ostlist(directory, fs): ostlist = [] for ost in os.listdir(directory): if (fs in ost): fir = ost.find('-') sec = ost.find('-', (fir + 1)) thrd = ost.find('-', (sec + 1)) ost_name = ost[(fir + 1):sec] if (ost_name not in ostlist): ostlist.append(ost_name) (yield ost_name)
[ "def", "osc_ostlist", "(", "directory", ",", "fs", ")", ":", "ostlist", "=", "[", "]", "for", "ost", "in", "os", ".", "listdir", "(", "directory", ")", ":", "if", "(", "fs", "in", "ost", ")", ":", "fir", "=", "ost", ".", "find", "(", "'-'", ")", "sec", "=", "ost", ".", "find", "(", "'-'", ",", "(", "fir", "+", "1", ")", ")", "thrd", "=", "ost", ".", "find", "(", "'-'", ",", "(", "sec", "+", "1", ")", ")", "ost_name", "=", "ost", "[", "(", "fir", "+", "1", ")", ":", "sec", "]", "if", "(", "ost_name", "not", "in", "ostlist", ")", ":", "ostlist", ".", "append", "(", "ost_name", ")", "(", "yield", "ost_name", ")" ]
return ost names based on folder names in osc directory .
train
false
3,866
def pickleDumpDict(name, d): try: f = open((name + '.pickle'), 'w') pickle.dump(d, f) f.close() return True except Exception as e: print(('Error writing into', name, ':', str(e))) return False
[ "def", "pickleDumpDict", "(", "name", ",", "d", ")", ":", "try", ":", "f", "=", "open", "(", "(", "name", "+", "'.pickle'", ")", ",", "'w'", ")", "pickle", ".", "dump", "(", "d", ",", "f", ")", "f", ".", "close", "(", ")", "return", "True", "except", "Exception", "as", "e", ":", "print", "(", "(", "'Error writing into'", ",", "name", ",", "':'", ",", "str", "(", "e", ")", ")", ")", "return", "False" ]
pickle-dump a variable into a file .
train
false
3,870
def getMinimumYByPath(path): minimumYByPath = path[0].y for point in path: minimumYByPath = min(minimumYByPath, point.y) return minimumYByPath
[ "def", "getMinimumYByPath", "(", "path", ")", ":", "minimumYByPath", "=", "path", "[", "0", "]", ".", "y", "for", "point", "in", "path", ":", "minimumYByPath", "=", "min", "(", "minimumYByPath", ",", "point", ".", "y", ")", "return", "minimumYByPath" ]
get path with overhangs removed or filled in .
train
false
3,871
def volunteer_award(): return s3_rest_controller()
[ "def", "volunteer_award", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
used for returning options to the s3popuplink popup .
train
false
3,875
def get_device_by_name_or_pk(name): if re.match(DEVICE_BY_PK_RE, name): pk = name.strip('{}') device = Device.objects.get(pk=pk) else: device = Device.objects.get(name=name) return device
[ "def", "get_device_by_name_or_pk", "(", "name", ")", ":", "if", "re", ".", "match", "(", "DEVICE_BY_PK_RE", ",", "name", ")", ":", "pk", "=", "name", ".", "strip", "(", "'{}'", ")", "device", "=", "Device", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", "else", ":", "device", "=", "Device", ".", "objects", ".", "get", "(", "name", "=", "name", ")", "return", "device" ]
attempt to retrieve a device by either its name or primary key .
train
false
3,876
def export_module_json(doc, is_standard, module): if ((not frappe.flags.in_import) and getattr(frappe.get_conf(), u'developer_mode', 0) and is_standard): from frappe.modules.export_file import export_to_files from frappe.modules import get_module_path export_to_files(record_list=[[doc.doctype, doc.name]], record_module=module) path = os.path.join(get_module_path(module), scrub(doc.doctype), scrub(doc.name), scrub(doc.name)) return path
[ "def", "export_module_json", "(", "doc", ",", "is_standard", ",", "module", ")", ":", "if", "(", "(", "not", "frappe", ".", "flags", ".", "in_import", ")", "and", "getattr", "(", "frappe", ".", "get_conf", "(", ")", ",", "u'developer_mode'", ",", "0", ")", "and", "is_standard", ")", ":", "from", "frappe", ".", "modules", ".", "export_file", "import", "export_to_files", "from", "frappe", ".", "modules", "import", "get_module_path", "export_to_files", "(", "record_list", "=", "[", "[", "doc", ".", "doctype", ",", "doc", ".", "name", "]", "]", ",", "record_module", "=", "module", ")", "path", "=", "os", ".", "path", ".", "join", "(", "get_module_path", "(", "module", ")", ",", "scrub", "(", "doc", ".", "doctype", ")", ",", "scrub", "(", "doc", ".", "name", ")", ",", "scrub", "(", "doc", ".", "name", ")", ")", "return", "path" ]
make a folder for the given doc and add its json file .
train
false
3,877
@task() @timeit def index_chunk_task(write_index, batch_id, rec_id, chunk): (cls_path, id_list) = chunk cls = from_class_path(cls_path) rec = None from kitsune.search.models import Record try: pin_this_thread() rec = Record.objects.get(pk=rec_id) rec.start_time = datetime.datetime.now() rec.message = (u'Reindexing into %s' % write_index) rec.status = Record.STATUS_IN_PROGRESS rec.save() index_chunk(cls, id_list, reraise=True) rec.mark_success() except Exception: if (rec is not None): rec.mark_fail((u'Errored out %s %s' % (sys.exc_type, sys.exc_value))) log.exception('Error while indexing a chunk') raise IndexingTaskError() finally: unpin_this_thread()
[ "@", "task", "(", ")", "@", "timeit", "def", "index_chunk_task", "(", "write_index", ",", "batch_id", ",", "rec_id", ",", "chunk", ")", ":", "(", "cls_path", ",", "id_list", ")", "=", "chunk", "cls", "=", "from_class_path", "(", "cls_path", ")", "rec", "=", "None", "from", "kitsune", ".", "search", ".", "models", "import", "Record", "try", ":", "pin_this_thread", "(", ")", "rec", "=", "Record", ".", "objects", ".", "get", "(", "pk", "=", "rec_id", ")", "rec", ".", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "rec", ".", "message", "=", "(", "u'Reindexing into %s'", "%", "write_index", ")", "rec", ".", "status", "=", "Record", ".", "STATUS_IN_PROGRESS", "rec", ".", "save", "(", ")", "index_chunk", "(", "cls", ",", "id_list", ",", "reraise", "=", "True", ")", "rec", ".", "mark_success", "(", ")", "except", "Exception", ":", "if", "(", "rec", "is", "not", "None", ")", ":", "rec", ".", "mark_fail", "(", "(", "u'Errored out %s %s'", "%", "(", "sys", ".", "exc_type", ",", "sys", ".", "exc_value", ")", ")", ")", "log", ".", "exception", "(", "'Error while indexing a chunk'", ")", "raise", "IndexingTaskError", "(", ")", "finally", ":", "unpin_this_thread", "(", ")" ]
index a chunk of things .
train
false
3,878
@env.catch_exceptions def enable_virtualenv(): path = env.var('g:pymode_virtualenv_path') path = os.path.abspath(path) enabled = env.var('g:pymode_virtualenv_enabled') if (path == enabled): env.message(('Virtualenv %s already enabled.' % path)) return env.stop() activate_this = os.path.join(os.path.join(path, 'bin'), 'activate_this.py') if (not os.path.exists(activate_this)): activate_this = os.path.join(os.path.join(path, 'Scripts'), 'activate_this.py') try: with open(activate_this) as f: source = f.read() exec compile(source, activate_this, 'exec') in dict(__file__=activate_this) except IOError: _activate_env_from_path(path) env.message(('Activate virtualenv: ' + path)) env.let('g:pymode_virtualenv_enabled', path) return True
[ "@", "env", ".", "catch_exceptions", "def", "enable_virtualenv", "(", ")", ":", "path", "=", "env", ".", "var", "(", "'g:pymode_virtualenv_path'", ")", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "enabled", "=", "env", ".", "var", "(", "'g:pymode_virtualenv_enabled'", ")", "if", "(", "path", "==", "enabled", ")", ":", "env", ".", "message", "(", "(", "'Virtualenv %s already enabled.'", "%", "path", ")", ")", "return", "env", ".", "stop", "(", ")", "activate_this", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'bin'", ")", ",", "'activate_this.py'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "activate_this", ")", ")", ":", "activate_this", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'Scripts'", ")", ",", "'activate_this.py'", ")", "try", ":", "with", "open", "(", "activate_this", ")", "as", "f", ":", "source", "=", "f", ".", "read", "(", ")", "exec", "compile", "(", "source", ",", "activate_this", ",", "'exec'", ")", "in", "dict", "(", "__file__", "=", "activate_this", ")", "except", "IOError", ":", "_activate_env_from_path", "(", "path", ")", "env", ".", "message", "(", "(", "'Activate virtualenv: '", "+", "path", ")", ")", "env", ".", "let", "(", "'g:pymode_virtualenv_enabled'", ",", "path", ")", "return", "True" ]
enable virtualenv for vim .
train
false
3,879
@receiver(thread_edited) @receiver(thread_deleted) @receiver(comment_edited) @receiver(comment_deleted) def post_edit_delete_handler(sender, **kwargs): post = kwargs['post'] handle_activity(kwargs['user'], post, long(post.user_id))
[ "@", "receiver", "(", "thread_edited", ")", "@", "receiver", "(", "thread_deleted", ")", "@", "receiver", "(", "comment_edited", ")", "@", "receiver", "(", "comment_deleted", ")", "def", "post_edit_delete_handler", "(", "sender", ",", "**", "kwargs", ")", ":", "post", "=", "kwargs", "[", "'post'", "]", "handle_activity", "(", "kwargs", "[", "'user'", "]", ",", "post", ",", "long", "(", "post", ".", "user_id", ")", ")" ]
update the users last activity date upon editing or deleting a post .
train
false
3,880
def is_conemu_ansi(): return (is_windows() and (os.environ.get(u'ConEmuANSI', u'OFF') == u'ON'))
[ "def", "is_conemu_ansi", "(", ")", ":", "return", "(", "is_windows", "(", ")", "and", "(", "os", ".", "environ", ".", "get", "(", "u'ConEmuANSI'", ",", "u'OFF'", ")", "==", "u'ON'", ")", ")" ]
true when the conemu windows console is used .
train
false
3,881
def ck_browse(): table = s3db.doc_ckeditor set = db((table.id > 0)) rows = set.select(orderby=table.title) return dict(rows=rows, cknum=request.vars.CKEditorFuncNum)
[ "def", "ck_browse", "(", ")", ":", "table", "=", "s3db", ".", "doc_ckeditor", "set", "=", "db", "(", "(", "table", ".", "id", ">", "0", ")", ")", "rows", "=", "set", ".", "select", "(", "orderby", "=", "table", ".", "title", ")", "return", "dict", "(", "rows", "=", "rows", ",", "cknum", "=", "request", ".", "vars", ".", "CKEditorFuncNum", ")" ]
controller to handle uploads to ckeditor .
train
false
3,882
def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight', dangling=None): import numpy as np if (len(G) == 0): return {} M = google_matrix(G, alpha, personalization=personalization, weight=weight, dangling=dangling) (eigenvalues, eigenvectors) = np.linalg.eig(M.T) ind = np.argmax(eigenvalues) largest = np.array(eigenvectors[:, ind]).flatten().real norm = float(largest.sum()) return dict(zip(G, map(float, (largest / norm))))
[ "def", "pagerank_numpy", "(", "G", ",", "alpha", "=", "0.85", ",", "personalization", "=", "None", ",", "weight", "=", "'weight'", ",", "dangling", "=", "None", ")", ":", "import", "numpy", "as", "np", "if", "(", "len", "(", "G", ")", "==", "0", ")", ":", "return", "{", "}", "M", "=", "google_matrix", "(", "G", ",", "alpha", ",", "personalization", "=", "personalization", ",", "weight", "=", "weight", ",", "dangling", "=", "dangling", ")", "(", "eigenvalues", ",", "eigenvectors", ")", "=", "np", ".", "linalg", ".", "eig", "(", "M", ".", "T", ")", "ind", "=", "np", ".", "argmax", "(", "eigenvalues", ")", "largest", "=", "np", ".", "array", "(", "eigenvectors", "[", ":", ",", "ind", "]", ")", ".", "flatten", "(", ")", ".", "real", "norm", "=", "float", "(", "largest", ".", "sum", "(", ")", ")", "return", "dict", "(", "zip", "(", "G", ",", "map", "(", "float", ",", "(", "largest", "/", "norm", ")", ")", ")", ")" ]
return the pagerank of the nodes in the graph .
train
false
3,883
def _make_api_request_no_retry(http, http_request, redirections=_REDIRECTIONS): connection_type = None if getattr(http, 'connections', None): url_scheme = parse.urlsplit(http_request.url).scheme if (url_scheme and (url_scheme in http.connections)): connection_type = http.connections[url_scheme] new_debuglevel = (4 if (httplib2.debuglevel == 4) else 0) with _httplib2_debug_level(http_request, new_debuglevel, http=http): (info, content) = http.request(str(http_request.url), method=str(http_request.http_method), body=http_request.body, headers=http_request.headers, redirections=redirections, connection_type=connection_type) if (info is None): raise RequestError() response = Response(info, content, http_request.url) _check_response(response) return response
[ "def", "_make_api_request_no_retry", "(", "http", ",", "http_request", ",", "redirections", "=", "_REDIRECTIONS", ")", ":", "connection_type", "=", "None", "if", "getattr", "(", "http", ",", "'connections'", ",", "None", ")", ":", "url_scheme", "=", "parse", ".", "urlsplit", "(", "http_request", ".", "url", ")", ".", "scheme", "if", "(", "url_scheme", "and", "(", "url_scheme", "in", "http", ".", "connections", ")", ")", ":", "connection_type", "=", "http", ".", "connections", "[", "url_scheme", "]", "new_debuglevel", "=", "(", "4", "if", "(", "httplib2", ".", "debuglevel", "==", "4", ")", "else", "0", ")", "with", "_httplib2_debug_level", "(", "http_request", ",", "new_debuglevel", ",", "http", "=", "http", ")", ":", "(", "info", ",", "content", ")", "=", "http", ".", "request", "(", "str", "(", "http_request", ".", "url", ")", ",", "method", "=", "str", "(", "http_request", ".", "http_method", ")", ",", "body", "=", "http_request", ".", "body", ",", "headers", "=", "http_request", ".", "headers", ",", "redirections", "=", "redirections", ",", "connection_type", "=", "connection_type", ")", "if", "(", "info", "is", "None", ")", ":", "raise", "RequestError", "(", ")", "response", "=", "Response", "(", "info", ",", "content", ",", "http_request", ".", "url", ")", "_check_response", "(", "response", ")", "return", "response" ]
send an http request via the given http instance .
train
false
3,884
def get_path_names(): return _SCHEME_KEYS
[ "def", "get_path_names", "(", ")", ":", "return", "_SCHEME_KEYS" ]
return a tuple containing the paths names .
train
false
3,885
def parse_qiime_config_file(qiime_config_file): result = {} for line in qiime_config_file: line = line.strip() if ((not line) or line.startswith('#')): continue fields = line.split() param_id = fields[0] param_value = (expandvars(' '.join(fields[1:])) or None) result[param_id] = param_value return result
[ "def", "parse_qiime_config_file", "(", "qiime_config_file", ")", ":", "result", "=", "{", "}", "for", "line", "in", "qiime_config_file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "(", "(", "not", "line", ")", "or", "line", ".", "startswith", "(", "'#'", ")", ")", ":", "continue", "fields", "=", "line", ".", "split", "(", ")", "param_id", "=", "fields", "[", "0", "]", "param_value", "=", "(", "expandvars", "(", "' '", ".", "join", "(", "fields", "[", "1", ":", "]", ")", ")", "or", "None", ")", "result", "[", "param_id", "]", "=", "param_value", "return", "result" ]
parse lines in a qiime_config file .
train
false
3,888
def dup_gcdex(f, g, K): (s, h) = dup_half_gcdex(f, g, K) F = dup_sub_mul(h, s, f, K) t = dup_quo(F, g, K) return (s, t, h)
[ "def", "dup_gcdex", "(", "f", ",", "g", ",", "K", ")", ":", "(", "s", ",", "h", ")", "=", "dup_half_gcdex", "(", "f", ",", "g", ",", "K", ")", "F", "=", "dup_sub_mul", "(", "h", ",", "s", ",", "f", ",", "K", ")", "t", "=", "dup_quo", "(", "F", ",", "g", ",", "K", ")", "return", "(", "s", ",", "t", ",", "h", ")" ]
extended euclidean algorithm in f[x] .
train
false
3,890
def merge_inner(clsdict): samelist = False done = {} while (not samelist): samelist = True classlist = clsdict.keys() for classname in classlist: parts_name = classname.rsplit('$', 1) if (len(parts_name) > 1): (mainclass, innerclass) = parts_name innerclass = innerclass[:(-1)] mainclass += ';' if (mainclass in clsdict): clsdict[mainclass].add_subclass(innerclass, clsdict[classname]) clsdict[classname].name = innerclass done[classname] = clsdict[classname] del clsdict[classname] samelist = False elif (mainclass in done): cls = done[mainclass] cls.add_subclass(innerclass, clsdict[classname]) clsdict[classname].name = innerclass done[classname] = done[mainclass] del clsdict[classname] samelist = False
[ "def", "merge_inner", "(", "clsdict", ")", ":", "samelist", "=", "False", "done", "=", "{", "}", "while", "(", "not", "samelist", ")", ":", "samelist", "=", "True", "classlist", "=", "clsdict", ".", "keys", "(", ")", "for", "classname", "in", "classlist", ":", "parts_name", "=", "classname", ".", "rsplit", "(", "'$'", ",", "1", ")", "if", "(", "len", "(", "parts_name", ")", ">", "1", ")", ":", "(", "mainclass", ",", "innerclass", ")", "=", "parts_name", "innerclass", "=", "innerclass", "[", ":", "(", "-", "1", ")", "]", "mainclass", "+=", "';'", "if", "(", "mainclass", "in", "clsdict", ")", ":", "clsdict", "[", "mainclass", "]", ".", "add_subclass", "(", "innerclass", ",", "clsdict", "[", "classname", "]", ")", "clsdict", "[", "classname", "]", ".", "name", "=", "innerclass", "done", "[", "classname", "]", "=", "clsdict", "[", "classname", "]", "del", "clsdict", "[", "classname", "]", "samelist", "=", "False", "elif", "(", "mainclass", "in", "done", ")", ":", "cls", "=", "done", "[", "mainclass", "]", "cls", ".", "add_subclass", "(", "innerclass", ",", "clsdict", "[", "classname", "]", ")", "clsdict", "[", "classname", "]", ".", "name", "=", "innerclass", "done", "[", "classname", "]", "=", "done", "[", "mainclass", "]", "del", "clsdict", "[", "classname", "]", "samelist", "=", "False" ]
merge the inner class of a class: e .
train
true
3,891
def make_tensor(dim): raise NotImplementedError('TODO: implement this function.')
[ "def", "make_tensor", "(", "dim", ")", ":", "raise", "NotImplementedError", "(", "'TODO: implement this function.'", ")" ]
returns a new theano tensor with no broadcastable dimensions .
train
false
3,892
def get_signed_purchase_params(cart, callback_url=None, extra_data=None): return sign(get_purchase_params(cart, callback_url=callback_url, extra_data=extra_data))
[ "def", "get_signed_purchase_params", "(", "cart", ",", "callback_url", "=", "None", ",", "extra_data", "=", "None", ")", ":", "return", "sign", "(", "get_purchase_params", "(", "cart", ",", "callback_url", "=", "callback_url", ",", "extra_data", "=", "extra_data", ")", ")" ]
return the parameters to send to the current payment processor .
train
false
3,893
def getSliceDictionary(xmlElement): for metadataElement in xmlElement.getChildrenWithClassName('metadata'): for child in metadataElement.children: if (child.className.lower() == 'slice:layers'): return child.attributeDictionary return {}
[ "def", "getSliceDictionary", "(", "xmlElement", ")", ":", "for", "metadataElement", "in", "xmlElement", ".", "getChildrenWithClassName", "(", "'metadata'", ")", ":", "for", "child", "in", "metadataElement", ".", "children", ":", "if", "(", "child", ".", "className", ".", "lower", "(", ")", "==", "'slice:layers'", ")", ":", "return", "child", ".", "attributeDictionary", "return", "{", "}" ]
get the metadata slice attribute dictionary .
train
false
3,894
def set_default_app(app): global default_app default_app = app
[ "def", "set_default_app", "(", "app", ")", ":", "global", "default_app", "default_app", "=", "app" ]
set default app .
train
false
3,895
def after_nearest_workday(dt): return next_workday(nearest_workday(dt))
[ "def", "after_nearest_workday", "(", "dt", ")", ":", "return", "next_workday", "(", "nearest_workday", "(", "dt", ")", ")" ]
returns next workday after nearest workday needed for boxing day or multiple holidays in a series .
train
false
3,897
def add_time_units(time, unit, amount): args = {} if (unit == 'hour'): args['hours'] = amount elif (unit == 'day'): args['days'] = amount elif (unit == 'week'): args['days'] = (amount * 7) elif (unit == 'month'): args['months'] = amount elif (unit == 'quarter'): args['months'] = (amount * 3) elif (unit == 'year'): args['years'] = amount else: raise ArgumentError('Unknown unit %s for subtraction.') return (time + relativedelta(**args))
[ "def", "add_time_units", "(", "time", ",", "unit", ",", "amount", ")", ":", "args", "=", "{", "}", "if", "(", "unit", "==", "'hour'", ")", ":", "args", "[", "'hours'", "]", "=", "amount", "elif", "(", "unit", "==", "'day'", ")", ":", "args", "[", "'days'", "]", "=", "amount", "elif", "(", "unit", "==", "'week'", ")", ":", "args", "[", "'days'", "]", "=", "(", "amount", "*", "7", ")", "elif", "(", "unit", "==", "'month'", ")", ":", "args", "[", "'months'", "]", "=", "amount", "elif", "(", "unit", "==", "'quarter'", ")", ":", "args", "[", "'months'", "]", "=", "(", "amount", "*", "3", ")", "elif", "(", "unit", "==", "'year'", ")", ":", "args", "[", "'years'", "]", "=", "amount", "else", ":", "raise", "ArgumentError", "(", "'Unknown unit %s for subtraction.'", ")", "return", "(", "time", "+", "relativedelta", "(", "**", "args", ")", ")" ]
subtract amount number of units from datetime object time .
train
false