id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
24,438
def localised_number(number): return numbers.format_number(number, locale=i18n.get_lang())
[ "def", "localised_number", "(", "number", ")", ":", "return", "numbers", ".", "format_number", "(", "number", ",", "locale", "=", "i18n", ".", "get_lang", "(", ")", ")" ]
returns a localised unicode representation of number .
train
false
24,439
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
create a new blowfish cipher .
train
false
24,442
def process_get_tags_by_filter(http_resp, tags, yield_text, processes, hash_string, debug): pid = multiprocessing.current_process().pid processes[hash_string] = pid document_parser = DocumentParser(http_resp) if (not hasattr(document_parser, 'get_tags_by_filter')): return [] filtered_tags = [] for tag in document_parser.get_tags_by_filter(tags, yield_text=yield_text): filtered_tags.append(tag) return filtered_tags
[ "def", "process_get_tags_by_filter", "(", "http_resp", ",", "tags", ",", "yield_text", ",", "processes", ",", "hash_string", ",", "debug", ")", ":", "pid", "=", "multiprocessing", ".", "current_process", "(", ")", ".", "pid", "processes", "[", "hash_string", "]", "=", "pid", "document_parser", "=", "DocumentParser", "(", "http_resp", ")", "if", "(", "not", "hasattr", "(", "document_parser", ",", "'get_tags_by_filter'", ")", ")", ":", "return", "[", "]", "filtered_tags", "=", "[", "]", "for", "tag", "in", "document_parser", ".", "get_tags_by_filter", "(", "tags", ",", "yield_text", "=", "yield_text", ")", ":", "filtered_tags", ".", "append", "(", "tag", ")", "return", "filtered_tags" ]
simple wrapper to get the current process id and store it in a shared object so we can kill the process if needed .
train
false
24,443
def cellnameabs(rowx, colx): return ('$%s$%d' % (colname(colx), (rowx + 1)))
[ "def", "cellnameabs", "(", "rowx", ",", "colx", ")", ":", "return", "(", "'$%s$%d'", "%", "(", "colname", "(", "colx", ")", ",", "(", "rowx", "+", "1", ")", ")", ")" ]
utility function: => $h$6 .
train
false
24,444
def gauss_legendre(n, n_digits): x = Dummy('x') p = legendre_poly(n, x, polys=True) pd = p.diff(x) xi = [] w = [] for r in p.real_roots(): if isinstance(r, RootOf): r = r.eval_rational((S(1) / (10 ** (n_digits + 2)))) xi.append(r.n(n_digits)) w.append((2 / ((1 - (r ** 2)) * (pd.subs(x, r) ** 2))).n(n_digits)) return (xi, w)
[ "def", "gauss_legendre", "(", "n", ",", "n_digits", ")", ":", "x", "=", "Dummy", "(", "'x'", ")", "p", "=", "legendre_poly", "(", "n", ",", "x", ",", "polys", "=", "True", ")", "pd", "=", "p", ".", "diff", "(", "x", ")", "xi", "=", "[", "]", "w", "=", "[", "]", "for", "r", "in", "p", ".", "real_roots", "(", ")", ":", "if", "isinstance", "(", "r", ",", "RootOf", ")", ":", "r", "=", "r", ".", "eval_rational", "(", "(", "S", "(", "1", ")", "/", "(", "10", "**", "(", "n_digits", "+", "2", ")", ")", ")", ")", "xi", ".", "append", "(", "r", ".", "n", "(", "n_digits", ")", ")", "w", ".", "append", "(", "(", "2", "/", "(", "(", "1", "-", "(", "r", "**", "2", ")", ")", "*", "(", "pd", ".", "subs", "(", "x", ",", "r", ")", "**", "2", ")", ")", ")", ".", "n", "(", "n_digits", ")", ")", "return", "(", "xi", ",", "w", ")" ]
computes the gauss-legendre quadrature [1]_ points and weights .
train
false
24,445
def reduce_to_map(records, key_field, value_field): map_fields = {key_field, value_field} result_map = {} first_record = None for record in records: r = record.serialize() if (first_record is None): first_record = record first_record_items = set(r.items()) continue diff = dict(first_record_items.difference(r.items())) different_keys = set(diff.keys()).difference(map_fields) if different_keys: raise ValueError('Unexpected related record found. \nReference Record: {}, \nDifferent Record: {}, \nDifferent Keys: {}'.format(first_record, record, different_keys)) key = r[key_field] value = r[value_field] assert (key not in result_map) result_map[key] = value return result_map
[ "def", "reduce_to_map", "(", "records", ",", "key_field", ",", "value_field", ")", ":", "map_fields", "=", "{", "key_field", ",", "value_field", "}", "result_map", "=", "{", "}", "first_record", "=", "None", "for", "record", "in", "records", ":", "r", "=", "record", ".", "serialize", "(", ")", "if", "(", "first_record", "is", "None", ")", ":", "first_record", "=", "record", "first_record_items", "=", "set", "(", "r", ".", "items", "(", ")", ")", "continue", "diff", "=", "dict", "(", "first_record_items", ".", "difference", "(", "r", ".", "items", "(", ")", ")", ")", "different_keys", "=", "set", "(", "diff", ".", "keys", "(", ")", ")", ".", "difference", "(", "map_fields", ")", "if", "different_keys", ":", "raise", "ValueError", "(", "'Unexpected related record found. \\nReference Record: {}, \\nDifferent Record: {}, \\nDifferent Keys: {}'", ".", "format", "(", "first_record", ",", "record", ",", "different_keys", ")", ")", "key", "=", "r", "[", "key_field", "]", "value", "=", "r", "[", "value_field", "]", "assert", "(", "key", "not", "in", "result_map", ")", "result_map", "[", "key", "]", "=", "value", "return", "result_map" ]
reduce the records to a dict of key_field: value_field extracted from each record .
train
false
24,446
def get_folder_size(path): if os.path.exists(path): return flt(subprocess.check_output([u'du', u'-ms', path]).split()[0], 2)
[ "def", "get_folder_size", "(", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "flt", "(", "subprocess", ".", "check_output", "(", "[", "u'du'", ",", "u'-ms'", ",", "path", "]", ")", ".", "split", "(", ")", "[", "0", "]", ",", "2", ")" ]
returns folder size in mb if it exists .
train
false
24,447
def _OSPFv3_LSAGuessPayloadClass(p, **kargs): cls = conf.raw_layer if (len(p) >= 6): typ = struct.unpack('!H', p[2:4])[0] clsname = _OSPFv3_LSclasses.get(typ, 'Raw') cls = globals()[clsname] return cls(p, **kargs)
[ "def", "_OSPFv3_LSAGuessPayloadClass", "(", "p", ",", "**", "kargs", ")", ":", "cls", "=", "conf", ".", "raw_layer", "if", "(", "len", "(", "p", ")", ">=", "6", ")", ":", "typ", "=", "struct", ".", "unpack", "(", "'!H'", ",", "p", "[", "2", ":", "4", "]", ")", "[", "0", "]", "clsname", "=", "_OSPFv3_LSclasses", ".", "get", "(", "typ", ",", "'Raw'", ")", "cls", "=", "globals", "(", ")", "[", "clsname", "]", "return", "cls", "(", "p", ",", "**", "kargs", ")" ]
guess the correct ospfv3 lsa class for a given payload .
train
true
24,448
def rs_cosh(p, x, prec): if rs_is_puiseux(p, x): return rs_puiseux(rs_cosh, p, x, prec) t = rs_exp(p, x, prec) t1 = rs_series_inversion(t, x, prec) return ((t + t1) / 2)
[ "def", "rs_cosh", "(", "p", ",", "x", ",", "prec", ")", ":", "if", "rs_is_puiseux", "(", "p", ",", "x", ")", ":", "return", "rs_puiseux", "(", "rs_cosh", ",", "p", ",", "x", ",", "prec", ")", "t", "=", "rs_exp", "(", "p", ",", "x", ",", "prec", ")", "t1", "=", "rs_series_inversion", "(", "t", ",", "x", ",", "prec", ")", "return", "(", "(", "t", "+", "t1", ")", "/", "2", ")" ]
hyperbolic cosine of a series return the series expansion of the cosh of p .
train
false
24,450
def check_file_content(path, expected_content): with open(path) as input: return (expected_content == input.read())
[ "def", "check_file_content", "(", "path", ",", "expected_content", ")", ":", "with", "open", "(", "path", ")", "as", "input", ":", "return", "(", "expected_content", "==", "input", ".", "read", "(", ")", ")" ]
check file has expected content .
train
false
24,453
def p_type_qualifier_list_2(t): pass
[ "def", "p_type_qualifier_list_2", "(", "t", ")", ":", "pass" ]
type_qualifier_list : type_qualifier_list type_qualifier .
train
false
24,454
def image_absent(name): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if (name not in __salt__['imgadm.list']()): ret['result'] = True ret['comment'] = 'image {0} is absent'.format(name) elif (name in __salt__['vmadm.list'](order='image_uuid')): ret['result'] = False ret['comment'] = 'image {0} currently in use by a vm'.format(name) else: if __opts__['test']: ret['result'] = True else: __salt__['imgadm.delete'](name) ret['result'] = (name not in __salt__['imgadm.list']()) ret['comment'] = 'image {0} deleted'.format(name) ret['changes'][name] = None return ret
[ "def", "image_absent", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "(", "name", "not", "in", "__salt__", "[", "'imgadm.list'", "]", "(", ")", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'image {0} is absent'", ".", "format", "(", "name", ")", "elif", "(", "name", "in", "__salt__", "[", "'vmadm.list'", "]", "(", "order", "=", "'image_uuid'", ")", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'image {0} currently in use by a vm'", ".", "format", "(", "name", ")", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "True", "else", ":", "__salt__", "[", "'imgadm.delete'", "]", "(", "name", ")", "ret", "[", "'result'", "]", "=", "(", "name", "not", "in", "__salt__", "[", "'imgadm.list'", "]", "(", ")", ")", "ret", "[", "'comment'", "]", "=", "'image {0} deleted'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "None", "return", "ret" ]
ensure image is absent on the computenode name : string uuid of image .
train
false
24,455
def safe_write(filename, contents): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): os.makedirs(dirname) with open(filename, 'w') as fh: fh.write(contents.encode('utf-8', 'ignore')) fh.close()
[ "def", "safe_write", "(", "filename", ",", "contents", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ")", ":", "os", ".", "makedirs", "(", "dirname", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "contents", ".", "encode", "(", "'utf-8'", ",", "'ignore'", ")", ")", "fh", ".", "close", "(", ")" ]
normalize and write to filename write contents to the given filename .
train
false
24,456
def get_datacenter(conn): datacenter_id = get_datacenter_id() for item in conn.list_datacenters()['items']: if (item['id'] == datacenter_id): return item raise SaltCloudNotFound("The specified datacenter '{0}' could not be found.".format(datacenter_id))
[ "def", "get_datacenter", "(", "conn", ")", ":", "datacenter_id", "=", "get_datacenter_id", "(", ")", "for", "item", "in", "conn", ".", "list_datacenters", "(", ")", "[", "'items'", "]", ":", "if", "(", "item", "[", "'id'", "]", "==", "datacenter_id", ")", ":", "return", "item", "raise", "SaltCloudNotFound", "(", "\"The specified datacenter '{0}' could not be found.\"", ".", "format", "(", "datacenter_id", ")", ")" ]
return the datacenter from the config provider datacenter id .
train
true
24,457
def test_raw_static_check(): path = '"/static/foo.png?raw"' assert_equals(path, replace_static_urls(path, DATA_DIRECTORY)) text = 'text <tag a="/static/js/capa/protex/protex.nocache.js?raw"/><div class="' assert_equals(path, replace_static_urls(path, text))
[ "def", "test_raw_static_check", "(", ")", ":", "path", "=", "'\"/static/foo.png?raw\"'", "assert_equals", "(", "path", ",", "replace_static_urls", "(", "path", ",", "DATA_DIRECTORY", ")", ")", "text", "=", "'text <tag a=\"/static/js/capa/protex/protex.nocache.js?raw\"/><div class=\"'", "assert_equals", "(", "path", ",", "replace_static_urls", "(", "path", ",", "text", ")", ")" ]
make sure replace_static_urls leaves alone things that end in .
train
false
24,458
def _repr_pairs(dump, tag, sequence, flow_style=None): import yaml value = [] node = yaml.SequenceNode(tag, value, flow_style=flow_style) if (dump.alias_key is not None): dump.represented_objects[dump.alias_key] = node best_style = True for (key, val) in sequence: item = dump.represent_data({key: val}) if (not (isinstance(item, yaml.ScalarNode) and (not item.style))): best_style = False value.append(item) if (flow_style is None): if (dump.default_flow_style is not None): node.flow_style = dump.default_flow_style else: node.flow_style = best_style return node
[ "def", "_repr_pairs", "(", "dump", ",", "tag", ",", "sequence", ",", "flow_style", "=", "None", ")", ":", "import", "yaml", "value", "=", "[", "]", "node", "=", "yaml", ".", "SequenceNode", "(", "tag", ",", "value", ",", "flow_style", "=", "flow_style", ")", "if", "(", "dump", ".", "alias_key", "is", "not", "None", ")", ":", "dump", ".", "represented_objects", "[", "dump", ".", "alias_key", "]", "=", "node", "best_style", "=", "True", "for", "(", "key", ",", "val", ")", "in", "sequence", ":", "item", "=", "dump", ".", "represent_data", "(", "{", "key", ":", "val", "}", ")", "if", "(", "not", "(", "isinstance", "(", "item", ",", "yaml", ".", "ScalarNode", ")", "and", "(", "not", "item", ".", "style", ")", ")", ")", ":", "best_style", "=", "False", "value", ".", "append", "(", "item", ")", "if", "(", "flow_style", "is", "None", ")", ":", "if", "(", "dump", ".", "default_flow_style", "is", "not", "None", ")", ":", "node", ".", "flow_style", "=", "dump", ".", "default_flow_style", "else", ":", "node", ".", "flow_style", "=", "best_style", "return", "node" ]
this is the same code as baserepresenter .
train
false
24,459
def _get_moto_version(): try: return LooseVersion(moto.__version__) except AttributeError: try: return LooseVersion(pkg_resources.get_distribution('moto').version) except DistributionNotFound: return False
[ "def", "_get_moto_version", "(", ")", ":", "try", ":", "return", "LooseVersion", "(", "moto", ".", "__version__", ")", "except", "AttributeError", ":", "try", ":", "return", "LooseVersion", "(", "pkg_resources", ".", "get_distribution", "(", "'moto'", ")", ".", "version", ")", "except", "DistributionNotFound", ":", "return", "False" ]
returns the moto version .
train
false
24,460
def get_activity(request, n_entries=30, cutoff_hours=10): cutoff_dt = (now() - datetime.timedelta(hours=cutoff_hours)) activities = [] for module in get_modules(): for activity in islice(module.get_activity(request, cutoff=cutoff_dt), n_entries): heappush(activities, ((- time.mktime(activity.datetime.timetuple())), activity)) out = [] while (activities and (len(out) < n_entries)): out.append(heappop(activities)[1]) return out
[ "def", "get_activity", "(", "request", ",", "n_entries", "=", "30", ",", "cutoff_hours", "=", "10", ")", ":", "cutoff_dt", "=", "(", "now", "(", ")", "-", "datetime", ".", "timedelta", "(", "hours", "=", "cutoff_hours", ")", ")", "activities", "=", "[", "]", "for", "module", "in", "get_modules", "(", ")", ":", "for", "activity", "in", "islice", "(", "module", ".", "get_activity", "(", "request", ",", "cutoff", "=", "cutoff_dt", ")", ",", "n_entries", ")", ":", "heappush", "(", "activities", ",", "(", "(", "-", "time", ".", "mktime", "(", "activity", ".", "datetime", ".", "timetuple", "(", ")", ")", ")", ",", "activity", ")", ")", "out", "=", "[", "]", "while", "(", "activities", "and", "(", "len", "(", "out", ")", "<", "n_entries", ")", ")", ":", "out", ".", "append", "(", "heappop", "(", "activities", ")", "[", "1", "]", ")", "return", "out" ]
get activity objects from all modules as a list in latest-first order .
train
false
24,462
def git(registry, xml_parent, data): mappings = [('push-merge', 'pushMerge', False), ('push-only-if-success', 'pushOnlyIfSuccess', True), ('force-push', 'forcePush', False)] tag_mappings = [('remote', 'targetRepoName', 'origin'), ('name', 'tagName', None), ('message', 'tagMessage', ''), ('create-tag', 'createTag', False), ('update-tag', 'updateTag', False)] branch_mappings = [('remote', 'targetRepoName', 'origin'), ('name', 'branchName', None)] note_mappings = [('remote', 'targetRepoName', 'origin'), ('message', 'noteMsg', None), ('namespace', 'noteNamespace', 'master'), ('replace-note', 'noteReplace', False)] top = XML.SubElement(xml_parent, 'hudson.plugins.git.GitPublisher') XML.SubElement(top, 'configVersion').text = '2' helpers.convert_mapping_to_xml(top, data, mappings, fail_required=True) tags = data.get('tags', []) if tags: xml_tags = XML.SubElement(top, 'tagsToPush') for tag in tags: xml_tag = XML.SubElement(xml_tags, 'hudson.plugins.git.GitPublisher_-TagToPush') helpers.convert_mapping_to_xml(xml_tag, tag['tag'], tag_mappings, fail_required=True) branches = data.get('branches', []) if branches: xml_branches = XML.SubElement(top, 'branchesToPush') for branch in branches: xml_branch = XML.SubElement(xml_branches, 'hudson.plugins.git.GitPublisher_-BranchToPush') helpers.convert_mapping_to_xml(xml_branch, branch['branch'], branch_mappings, fail_required=True) notes = data.get('notes', []) if notes: xml_notes = XML.SubElement(top, 'notesToPush') for note in notes: xml_note = XML.SubElement(xml_notes, 'hudson.plugins.git.GitPublisher_-NoteToPush') helpers.convert_mapping_to_xml(xml_note, note['note'], note_mappings, fail_required=True)
[ "def", "git", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "mappings", "=", "[", "(", "'push-merge'", ",", "'pushMerge'", ",", "False", ")", ",", "(", "'push-only-if-success'", ",", "'pushOnlyIfSuccess'", ",", "True", ")", ",", "(", "'force-push'", ",", "'forcePush'", ",", "False", ")", "]", "tag_mappings", "=", "[", "(", "'remote'", ",", "'targetRepoName'", ",", "'origin'", ")", ",", "(", "'name'", ",", "'tagName'", ",", "None", ")", ",", "(", "'message'", ",", "'tagMessage'", ",", "''", ")", ",", "(", "'create-tag'", ",", "'createTag'", ",", "False", ")", ",", "(", "'update-tag'", ",", "'updateTag'", ",", "False", ")", "]", "branch_mappings", "=", "[", "(", "'remote'", ",", "'targetRepoName'", ",", "'origin'", ")", ",", "(", "'name'", ",", "'branchName'", ",", "None", ")", "]", "note_mappings", "=", "[", "(", "'remote'", ",", "'targetRepoName'", ",", "'origin'", ")", ",", "(", "'message'", ",", "'noteMsg'", ",", "None", ")", ",", "(", "'namespace'", ",", "'noteNamespace'", ",", "'master'", ")", ",", "(", "'replace-note'", ",", "'noteReplace'", ",", "False", ")", "]", "top", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.git.GitPublisher'", ")", "XML", ".", "SubElement", "(", "top", ",", "'configVersion'", ")", ".", "text", "=", "'2'", "helpers", ".", "convert_mapping_to_xml", "(", "top", ",", "data", ",", "mappings", ",", "fail_required", "=", "True", ")", "tags", "=", "data", ".", "get", "(", "'tags'", ",", "[", "]", ")", "if", "tags", ":", "xml_tags", "=", "XML", ".", "SubElement", "(", "top", ",", "'tagsToPush'", ")", "for", "tag", "in", "tags", ":", "xml_tag", "=", "XML", ".", "SubElement", "(", "xml_tags", ",", "'hudson.plugins.git.GitPublisher_-TagToPush'", ")", "helpers", ".", "convert_mapping_to_xml", "(", "xml_tag", ",", "tag", "[", "'tag'", "]", ",", "tag_mappings", ",", "fail_required", "=", "True", ")", "branches", "=", "data", ".", "get", "(", "'branches'", ",", "[", "]", ")", "if", "branches", ":", "xml_branches", "=", "XML", ".", "SubElement", "(", "top", ",", "'branchesToPush'", ")", "for", "branch", "in", "branches", ":", "xml_branch", "=", "XML", ".", "SubElement", "(", "xml_branches", ",", "'hudson.plugins.git.GitPublisher_-BranchToPush'", ")", "helpers", ".", "convert_mapping_to_xml", "(", "xml_branch", ",", "branch", "[", "'branch'", "]", ",", "branch_mappings", ",", "fail_required", "=", "True", ")", "notes", "=", "data", ".", "get", "(", "'notes'", ",", "[", "]", ")", "if", "notes", ":", "xml_notes", "=", "XML", ".", "SubElement", "(", "top", ",", "'notesToPush'", ")", "for", "note", "in", "notes", ":", "xml_note", "=", "XML", ".", "SubElement", "(", "xml_notes", ",", "'hudson.plugins.git.GitPublisher_-NoteToPush'", ")", "helpers", ".", "convert_mapping_to_xml", "(", "xml_note", ",", "note", "[", "'note'", "]", ",", "note_mappings", ",", "fail_required", "=", "True", ")" ]
most git commands play nicer without a tty .
train
false
24,463
def test_dot22scalar_cast(): A = T.dmatrix() for scalar_int_type in T.int_dtypes: y = T.scalar(dtype=scalar_int_type) f = theano.function([A, y], (T.dot(A, A) * y), mode=mode_blas_opt) assert (_dot22scalar in [x.op for x in f.maker.fgraph.toposort()]) A = T.fmatrix() for scalar_int_type in T.int_dtypes: y = T.scalar(dtype=scalar_int_type) f = theano.function([A, y], (T.dot(A, A) * y), mode=mode_blas_opt) if (scalar_int_type in ['int32', 'int64']): assert (_dot22 in [x.op for x in f.maker.fgraph.toposort()]) else: assert (_dot22scalar in [x.op for x in f.maker.fgraph.toposort()])
[ "def", "test_dot22scalar_cast", "(", ")", ":", "A", "=", "T", ".", "dmatrix", "(", ")", "for", "scalar_int_type", "in", "T", ".", "int_dtypes", ":", "y", "=", "T", ".", "scalar", "(", "dtype", "=", "scalar_int_type", ")", "f", "=", "theano", ".", "function", "(", "[", "A", ",", "y", "]", ",", "(", "T", ".", "dot", "(", "A", ",", "A", ")", "*", "y", ")", ",", "mode", "=", "mode_blas_opt", ")", "assert", "(", "_dot22scalar", "in", "[", "x", ".", "op", "for", "x", "in", "f", ".", "maker", ".", "fgraph", ".", "toposort", "(", ")", "]", ")", "A", "=", "T", ".", "fmatrix", "(", ")", "for", "scalar_int_type", "in", "T", ".", "int_dtypes", ":", "y", "=", "T", ".", "scalar", "(", "dtype", "=", "scalar_int_type", ")", "f", "=", "theano", ".", "function", "(", "[", "A", ",", "y", "]", ",", "(", "T", ".", "dot", "(", "A", ",", "A", ")", "*", "y", ")", ",", "mode", "=", "mode_blas_opt", ")", "if", "(", "scalar_int_type", "in", "[", "'int32'", ",", "'int64'", "]", ")", ":", "assert", "(", "_dot22", "in", "[", "x", ".", "op", "for", "x", "in", "f", ".", "maker", ".", "fgraph", ".", "toposort", "(", ")", "]", ")", "else", ":", "assert", "(", "_dot22scalar", "in", "[", "x", ".", "op", "for", "x", "in", "f", ".", "maker", ".", "fgraph", ".", "toposort", "(", ")", "]", ")" ]
test that in dot22_to_dot22scalar we properly cast integers to floats .
train
false
24,465
def guess_scheme(url): parts = urlparse(url) if parts.scheme: return url if re.match('^ # start with...\n (\n \\. # ...a single dot,\n (\n \\. | [^/\\.]+ # optionally followed by\n )? # either a second dot or some characters\n )? # optional match of ".", ".." or ".blabla"\n / # at least one "/" for a file path,\n . # and something after the "/"\n ', parts.path, flags=re.VERBOSE): return any_to_uri(url) else: return add_http_if_no_scheme(url)
[ "def", "guess_scheme", "(", "url", ")", ":", "parts", "=", "urlparse", "(", "url", ")", "if", "parts", ".", "scheme", ":", "return", "url", "if", "re", ".", "match", "(", "'^ # start with...\\n (\\n \\\\. # ...a single dot,\\n (\\n \\\\. | [^/\\\\.]+ # optionally followed by\\n )? # either a second dot or some characters\\n )? # optional match of \".\", \"..\" or \".blabla\"\\n / # at least one \"/\" for a file path,\\n . # and something after the \"/\"\\n '", ",", "parts", ".", "path", ",", "flags", "=", "re", ".", "VERBOSE", ")", ":", "return", "any_to_uri", "(", "url", ")", "else", ":", "return", "add_http_if_no_scheme", "(", "url", ")" ]
return a guess for whether wsgi .
train
false
24,466
@depends(element_container=const(None)) def get_element(obj, element_container=None): if isinstance(obj, basestring): if (element_container is None): raise RuntimeError('Control elements can only be accessed by name, if an element container is available') match = _element_list_access_expr.match(obj) if match: name = match.group(1) index = int(match.group(2)) obj = getattr(element_container, name)[index] else: obj = getattr(element_container, obj) return obj
[ "@", "depends", "(", "element_container", "=", "const", "(", "None", ")", ")", "def", "get_element", "(", "obj", ",", "element_container", "=", "None", ")", ":", "if", "isinstance", "(", "obj", ",", "basestring", ")", ":", "if", "(", "element_container", "is", "None", ")", ":", "raise", "RuntimeError", "(", "'Control elements can only be accessed by name, if an element container is available'", ")", "match", "=", "_element_list_access_expr", ".", "match", "(", "obj", ")", "if", "match", ":", "name", "=", "match", ".", "group", "(", "1", ")", "index", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "obj", "=", "getattr", "(", "element_container", ",", "name", ")", "[", "index", "]", "else", ":", "obj", "=", "getattr", "(", "element_container", ",", "obj", ")", "return", "obj" ]
function for receiving a control element by name .
train
false
24,467
def diff_prettyHtml(self, diffs): html = [] ct = 1 for (op, data) in diffs: text = data.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\n', '<br>') if (op == self.DIFF_INSERT): html.append(('<ins style="background:#e6ffe6;">%s</ins>' % text)) elif (op == self.DIFF_DELETE): html.append(('<del style="background:#ffe6e6;">%s</del>' % text)) elif (op == self.DIFF_EQUAL): html.append(('<span>%s</span>' % text)) return ''.join(html)
[ "def", "diff_prettyHtml", "(", "self", ",", "diffs", ")", ":", "html", "=", "[", "]", "ct", "=", "1", "for", "(", "op", ",", "data", ")", "in", "diffs", ":", "text", "=", "data", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", ".", "replace", "(", "'\\n'", ",", "'<br>'", ")", "if", "(", "op", "==", "self", ".", "DIFF_INSERT", ")", ":", "html", ".", "append", "(", "(", "'<ins style=\"background:#e6ffe6;\">%s</ins>'", "%", "text", ")", ")", "elif", "(", "op", "==", "self", ".", "DIFF_DELETE", ")", ":", "html", ".", "append", "(", "(", "'<del style=\"background:#ffe6e6;\">%s</del>'", "%", "text", ")", ")", "elif", "(", "op", "==", "self", ".", "DIFF_EQUAL", ")", ":", "html", ".", "append", "(", "(", "'<span>%s</span>'", "%", "text", ")", ")", "return", "''", ".", "join", "(", "html", ")" ]
convert a diff array into a pretty html report .
train
true
24,468
def random_unif(rng, dim, low=1, high=10): return rng.uniform(low, high, size=dim).astype(theano.config.floatX)
[ "def", "random_unif", "(", "rng", ",", "dim", ",", "low", "=", "1", ",", "high", "=", "10", ")", ":", "return", "rng", ".", "uniform", "(", "low", ",", "high", ",", "size", "=", "dim", ")", ".", "astype", "(", "theano", ".", "config", ".", "floatX", ")" ]
generate some floatx uniform random numbers .
train
false
24,469
def canonicClassString(x): if isinstance(x, object): return repr(x.__class__).split("'")[1] else: return repr(x.__class__)
[ "def", "canonicClassString", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "object", ")", ":", "return", "repr", "(", "x", ".", "__class__", ")", ".", "split", "(", "\"'\"", ")", "[", "1", "]", "else", ":", "return", "repr", "(", "x", ".", "__class__", ")" ]
the __class__ attribute changed from old-style to new-style classes .
train
false
24,470
def map_input(data, tables=None): result = [] for char in data: replacement = None for mapping in tables: replacement = mapping(char) if (replacement is not None): break if (replacement is None): replacement = char result.append(replacement) return u''.join(result)
[ "def", "map_input", "(", "data", ",", "tables", "=", "None", ")", ":", "result", "=", "[", "]", "for", "char", "in", "data", ":", "replacement", "=", "None", "for", "mapping", "in", "tables", ":", "replacement", "=", "mapping", "(", "char", ")", "if", "(", "replacement", "is", "not", "None", ")", ":", "break", "if", "(", "replacement", "is", "None", ")", ":", "replacement", "=", "char", "result", ".", "append", "(", "replacement", ")", "return", "u''", ".", "join", "(", "result", ")" ]
each character in the input stream must be checked against a mapping table .
train
false
24,471
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) @utils.arg('attachment_id', metavar='<volume>', help=_('ID of the volume to detach.')) def do_volume_detach(cs, args): cs.volumes.delete_server_volume(_find_server(cs, args.server).id, args.attachment_id)
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "@", "utils", ".", "arg", "(", "'attachment_id'", ",", "metavar", "=", "'<volume>'", ",", "help", "=", "_", "(", "'ID of the volume to detach.'", ")", ")", "def", "do_volume_detach", "(", "cs", ",", "args", ")", ":", "cs", ".", "volumes", ".", "delete_server_volume", "(", "_find_server", "(", "cs", ",", "args", ".", "server", ")", ".", "id", ",", "args", ".", "attachment_id", ")" ]
detach a volume from a server .
train
false
24,472
def AddArch(output, arch): (output, extension) = os.path.splitext(output) return ('%s.%s%s' % (output, arch, extension))
[ "def", "AddArch", "(", "output", ",", "arch", ")", ":", "(", "output", ",", "extension", ")", "=", "os", ".", "path", ".", "splitext", "(", "output", ")", "return", "(", "'%s.%s%s'", "%", "(", "output", ",", "arch", ",", "extension", ")", ")" ]
adds an arch string to an output path .
train
false
24,473
def _ads_email(body, from_name, kind): Email.handler.add_to_queue(None, g.ads_email, from_name, g.ads_email, kind, body=body)
[ "def", "_ads_email", "(", "body", ",", "from_name", ",", "kind", ")", ":", "Email", ".", "handler", ".", "add_to_queue", "(", "None", ",", "g", ".", "ads_email", ",", "from_name", ",", "g", ".", "ads_email", ",", "kind", ",", "body", "=", "body", ")" ]
for sending email to ads .
train
false
24,474
def convert_to_string(msg): module_object = msg.module if msg.obj: module_object += ('.%s' % msg.obj) return ('(%s) %s [%d]: %s' % (msg.C, module_object, msg.line, msg.msg))
[ "def", "convert_to_string", "(", "msg", ")", ":", "module_object", "=", "msg", ".", "module", "if", "msg", ".", "obj", ":", "module_object", "+=", "(", "'.%s'", "%", "msg", ".", "obj", ")", "return", "(", "'(%s) %s [%d]: %s'", "%", "(", "msg", ".", "C", ",", "module_object", ",", "msg", ".", "line", ",", "msg", ".", "msg", ")", ")" ]
make a string representation of a message .
train
false
24,476
def astar_path_length(G, source, target, heuristic=None, weight='weight'): if ((source not in G) or (target not in G)): msg = 'Either source {} or target {} is not in G' raise nx.NodeNotFound(msg.format(source, target)) path = astar_path(G, source, target, heuristic, weight) return sum((G[u][v].get(weight, 1) for (u, v) in zip(path[:(-1)], path[1:])))
[ "def", "astar_path_length", "(", "G", ",", "source", ",", "target", ",", "heuristic", "=", "None", ",", "weight", "=", "'weight'", ")", ":", "if", "(", "(", "source", "not", "in", "G", ")", "or", "(", "target", "not", "in", "G", ")", ")", ":", "msg", "=", "'Either source {} or target {} is not in G'", "raise", "nx", ".", "NodeNotFound", "(", "msg", ".", "format", "(", "source", ",", "target", ")", ")", "path", "=", "astar_path", "(", "G", ",", "source", ",", "target", ",", "heuristic", ",", "weight", ")", "return", "sum", "(", "(", "G", "[", "u", "]", "[", "v", "]", ".", "get", "(", "weight", ",", "1", ")", "for", "(", "u", ",", "v", ")", "in", "zip", "(", "path", "[", ":", "(", "-", "1", ")", "]", ",", "path", "[", "1", ":", "]", ")", ")", ")" ]
return the length of the shortest path between source and target using the a* algorithm .
train
false
24,478
def MinimalQualParser(infile, value_cast_f=int, full_header=False): for rec in FastaFinder(infile): curr_id = rec[0][1:] curr_qual = ' '.join(rec[1:]) try: parts = asarray(curr_qual.split(), dtype=value_cast_f) except ValueError: raise QiimeParseError('Invalid qual file. Check the format of the qual files.') if full_header: curr_pid = curr_id else: curr_pid = curr_id.split()[0] (yield (curr_pid, parts))
[ "def", "MinimalQualParser", "(", "infile", ",", "value_cast_f", "=", "int", ",", "full_header", "=", "False", ")", ":", "for", "rec", "in", "FastaFinder", "(", "infile", ")", ":", "curr_id", "=", "rec", "[", "0", "]", "[", "1", ":", "]", "curr_qual", "=", "' '", ".", "join", "(", "rec", "[", "1", ":", "]", ")", "try", ":", "parts", "=", "asarray", "(", "curr_qual", ".", "split", "(", ")", ",", "dtype", "=", "value_cast_f", ")", "except", "ValueError", ":", "raise", "QiimeParseError", "(", "'Invalid qual file. Check the format of the qual files.'", ")", "if", "full_header", ":", "curr_pid", "=", "curr_id", "else", ":", "curr_pid", "=", "curr_id", ".", "split", "(", ")", "[", "0", "]", "(", "yield", "(", "curr_pid", ",", "parts", ")", ")" ]
yield quality scores .
train
false
24,479
def create_keyspace_simple(name, replication_factor, durable_writes=True, connections=None): _create_keyspace(name, durable_writes, 'SimpleStrategy', {'replication_factor': replication_factor}, connections=connections)
[ "def", "create_keyspace_simple", "(", "name", ",", "replication_factor", ",", "durable_writes", "=", "True", ",", "connections", "=", "None", ")", ":", "_create_keyspace", "(", "name", ",", "durable_writes", ",", "'SimpleStrategy'", ",", "{", "'replication_factor'", ":", "replication_factor", "}", ",", "connections", "=", "connections", ")" ]
creates a keyspace with simplestrategy for replica placement if the keyspace already exists .
train
true
24,481
def parse_fuzzy_compare(source): if source.match('<='): return True elif source.match('<'): return False else: return None
[ "def", "parse_fuzzy_compare", "(", "source", ")", ":", "if", "source", ".", "match", "(", "'<='", ")", ":", "return", "True", "elif", "source", ".", "match", "(", "'<'", ")", ":", "return", "False", "else", ":", "return", "None" ]
parses a cost comparator .
train
false
24,482
def _format_is_iso(f): iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format excluded_formats = ['%Y%m%d', '%Y%m', '%Y'] for date_sep in [' ', '/', '\\', '-', '.', '']: for time_sep in [' ', 'T']: if (iso_template(date_sep=date_sep, time_sep=time_sep).startswith(f) and (f not in excluded_formats)): return True return False
[ "def", "_format_is_iso", "(", "f", ")", ":", "iso_template", "=", "'%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'", ".", "format", "excluded_formats", "=", "[", "'%Y%m%d'", ",", "'%Y%m'", ",", "'%Y'", "]", "for", "date_sep", "in", "[", "' '", ",", "'/'", ",", "'\\\\'", ",", "'-'", ",", "'.'", ",", "''", "]", ":", "for", "time_sep", "in", "[", "' '", ",", "'T'", "]", ":", "if", "(", "iso_template", "(", "date_sep", "=", "date_sep", ",", "time_sep", "=", "time_sep", ")", ".", "startswith", "(", "f", ")", "and", "(", "f", "not", "in", "excluded_formats", ")", ")", ":", "return", "True", "return", "False" ]
does format match the iso8601 set that can be handled by the c parser? generally of form yyyy-mm-ddthh:mm:ss - date separator can be different but must be consistent .
train
false
24,483
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialise module .
train
false
24,484
def analyze_name(name, canonical=None): original_n = name name = name.strip() res = {} imdbIndex = '' opi = name.rfind('(') cpi = name.rfind(')') if ((opi not in ((-1), 0)) and (cpi > opi)): if re_index.match(name[opi:(cpi + 1)]): imdbIndex = name[(opi + 1):cpi] name = name[:opi].rstrip() else: name = re_parentheses.sub('', name).strip() if (not name): raise IMDbParserError(('invalid name: "%s"' % original_n)) if (canonical is not None): if canonical: name = canonicalName(name) else: name = normalizeName(name) res['name'] = name if imdbIndex: res['imdbIndex'] = imdbIndex return res
[ "def", "analyze_name", "(", "name", ",", "canonical", "=", "None", ")", ":", "original_n", "=", "name", "name", "=", "name", ".", "strip", "(", ")", "res", "=", "{", "}", "imdbIndex", "=", "''", "opi", "=", "name", ".", "rfind", "(", "'('", ")", "cpi", "=", "name", ".", "rfind", "(", "')'", ")", "if", "(", "(", "opi", "not", "in", "(", "(", "-", "1", ")", ",", "0", ")", ")", "and", "(", "cpi", ">", "opi", ")", ")", ":", "if", "re_index", ".", "match", "(", "name", "[", "opi", ":", "(", "cpi", "+", "1", ")", "]", ")", ":", "imdbIndex", "=", "name", "[", "(", "opi", "+", "1", ")", ":", "cpi", "]", "name", "=", "name", "[", ":", "opi", "]", ".", "rstrip", "(", ")", "else", ":", "name", "=", "re_parentheses", ".", "sub", "(", "''", ",", "name", ")", ".", "strip", "(", ")", "if", "(", "not", "name", ")", ":", "raise", "IMDbParserError", "(", "(", "'invalid name: \"%s\"'", "%", "original_n", ")", ")", "if", "(", "canonical", "is", "not", "None", ")", ":", "if", "canonical", ":", "name", "=", "canonicalName", "(", "name", ")", "else", ":", "name", "=", "normalizeName", "(", "name", ")", "res", "[", "'name'", "]", "=", "name", "if", "imdbIndex", ":", "res", "[", "'imdbIndex'", "]", "=", "imdbIndex", "return", "res" ]
return a dictionary with the name and the optional imdbindex keys .
train
false
24,485
def pyopenssl_load_certificate(data): openssl_errors = [] for file_type in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1): try: return (OpenSSL.crypto.load_certificate(file_type, data), file_type) except OpenSSL.crypto.Error as error: openssl_errors.append(error) raise errors.Error('Unable to load: {0}'.format(','.join((str(error) for error in openssl_errors))))
[ "def", "pyopenssl_load_certificate", "(", "data", ")", ":", "openssl_errors", "=", "[", "]", "for", "file_type", "in", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "OpenSSL", ".", "crypto", ".", "FILETYPE_ASN1", ")", ":", "try", ":", "return", "(", "OpenSSL", ".", "crypto", ".", "load_certificate", "(", "file_type", ",", "data", ")", ",", "file_type", ")", "except", "OpenSSL", ".", "crypto", ".", "Error", "as", "error", ":", "openssl_errors", ".", "append", "(", "error", ")", "raise", "errors", ".", "Error", "(", "'Unable to load: {0}'", ".", "format", "(", "','", ".", "join", "(", "(", "str", "(", "error", ")", "for", "error", "in", "openssl_errors", ")", ")", ")", ")" ]
load pem/der certificate .
train
false
24,486
def _GetEntityGroup(ref): entity_group = entity_pb.Reference() entity_group.CopyFrom(ref) assert (entity_group.path().element_list()[0].has_id() or entity_group.path().element_list()[0].has_name()) del entity_group.path().element_list()[1:] return entity_group
[ "def", "_GetEntityGroup", "(", "ref", ")", ":", "entity_group", "=", "entity_pb", ".", "Reference", "(", ")", "entity_group", ".", "CopyFrom", "(", "ref", ")", "assert", "(", "entity_group", ".", "path", "(", ")", ".", "element_list", "(", ")", "[", "0", "]", ".", "has_id", "(", ")", "or", "entity_group", ".", "path", "(", ")", ".", "element_list", "(", ")", "[", "0", "]", ".", "has_name", "(", ")", ")", "del", "entity_group", ".", "path", "(", ")", ".", "element_list", "(", ")", "[", "1", ":", "]", "return", "entity_group" ]
returns the entity group key for the given reference .
train
false
24,487
def is_formatter_valid(formatter_name): try: formatter_class = select_formatter_class(formatter_name) return issubclass(formatter_class, Formatter) except (LookupError, ImportError, ValueError): return False
[ "def", "is_formatter_valid", "(", "formatter_name", ")", ":", "try", ":", "formatter_class", "=", "select_formatter_class", "(", "formatter_name", ")", "return", "issubclass", "(", "formatter_class", ",", "Formatter", ")", "except", "(", "LookupError", ",", "ImportError", ",", "ValueError", ")", ":", "return", "False" ]
checks if the formatter is known or loadable .
train
false
24,488
def rollback(): connection._rollback() set_clean()
[ "def", "rollback", "(", ")", ":", "connection", ".", "_rollback", "(", ")", "set_clean", "(", ")" ]
reverts project state to the last deploy .
train
false
24,490
@pytest.fixture(scope='module') def static_file_path(static_file_directory): return os.path.join(static_file_directory, 'test.file')
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'module'", ")", "def", "static_file_path", "(", "static_file_directory", ")", ":", "return", "os", ".", "path", ".", "join", "(", "static_file_directory", ",", "'test.file'", ")" ]
the path to the static file that we want to serve .
train
false
24,491
def mount_share(mount_path, export_path, export_type, options=None): utils.execute('mkdir', '-p', mount_path) mount_cmd = ['mount', '-t', export_type] if (options is not None): mount_cmd.extend(options) mount_cmd.extend([export_path, mount_path]) try: utils.execute(run_as_root=True, *mount_cmd) except processutils.ProcessExecutionError as exc: if ('Device or resource busy' in six.text_type(exc)): LOG.warning(_LW('%s is already mounted'), export_path) else: raise
[ "def", "mount_share", "(", "mount_path", ",", "export_path", ",", "export_type", ",", "options", "=", "None", ")", ":", "utils", ".", "execute", "(", "'mkdir'", ",", "'-p'", ",", "mount_path", ")", "mount_cmd", "=", "[", "'mount'", ",", "'-t'", ",", "export_type", "]", "if", "(", "options", "is", "not", "None", ")", ":", "mount_cmd", ".", "extend", "(", "options", ")", "mount_cmd", ".", "extend", "(", "[", "export_path", ",", "mount_path", "]", ")", "try", ":", "utils", ".", "execute", "(", "run_as_root", "=", "True", ",", "*", "mount_cmd", ")", "except", "processutils", ".", "ProcessExecutionError", "as", "exc", ":", "if", "(", "'Device or resource busy'", "in", "six", ".", "text_type", "(", "exc", ")", ")", ":", "LOG", ".", "warning", "(", "_LW", "(", "'%s is already mounted'", ")", ",", "export_path", ")", "else", ":", "raise" ]
mount a remote export to mount_path .
train
false
24,493
def track_description_list(head): r = [] if head: item = head while item: item = item.contents r.append((item.id, item.name)) item = item.next try: libvlc_track_description_release(head) except NameError: libvlc_track_description_list_release(head) return r
[ "def", "track_description_list", "(", "head", ")", ":", "r", "=", "[", "]", "if", "head", ":", "item", "=", "head", "while", "item", ":", "item", "=", "item", ".", "contents", "r", ".", "append", "(", "(", "item", ".", "id", ",", "item", ".", "name", ")", ")", "item", "=", "item", ".", "next", "try", ":", "libvlc_track_description_release", "(", "head", ")", "except", "NameError", ":", "libvlc_track_description_list_release", "(", "head", ")", "return", "r" ]
convert a trackdescription linked list to a python list .
train
true
24,495
def port_usage(port): global PORT_USES if (PORT_USES is None): config = conf.Config() config_path = os.path.join(os.path.dirname(__file__), 'ports.cfg') try: config.load(config_path) port_uses = {} for (key, value) in config.get('port', {}).items(): if key.isdigit(): port_uses[int(key)] = value elif ('-' in key): (min_port, max_port) = key.split('-', 1) for port_entry in range(int(min_port), (int(max_port) + 1)): port_uses[port_entry] = value else: raise ValueError(("'%s' is an invalid key" % key)) PORT_USES = port_uses except Exception as exc: log.warn(("BUG: stem failed to load its internal port descriptions from '%s': %s" % (config_path, exc))) if (not PORT_USES): return None if (isinstance(port, str) and port.isdigit()): port = int(port) return PORT_USES.get(port)
[ "def", "port_usage", "(", "port", ")", ":", "global", "PORT_USES", "if", "(", "PORT_USES", "is", "None", ")", ":", "config", "=", "conf", ".", "Config", "(", ")", "config_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'ports.cfg'", ")", "try", ":", "config", ".", "load", "(", "config_path", ")", "port_uses", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "config", ".", "get", "(", "'port'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "key", ".", "isdigit", "(", ")", ":", "port_uses", "[", "int", "(", "key", ")", "]", "=", "value", "elif", "(", "'-'", "in", "key", ")", ":", "(", "min_port", ",", "max_port", ")", "=", "key", ".", "split", "(", "'-'", ",", "1", ")", "for", "port_entry", "in", "range", "(", "int", "(", "min_port", ")", ",", "(", "int", "(", "max_port", ")", "+", "1", ")", ")", ":", "port_uses", "[", "port_entry", "]", "=", "value", "else", ":", "raise", "ValueError", "(", "(", "\"'%s' is an invalid key\"", "%", "key", ")", ")", "PORT_USES", "=", "port_uses", "except", "Exception", "as", "exc", ":", "log", ".", "warn", "(", "(", "\"BUG: stem failed to load its internal port descriptions from '%s': %s\"", "%", "(", "config_path", ",", "exc", ")", ")", ")", "if", "(", "not", "PORT_USES", ")", ":", "return", "None", "if", "(", "isinstance", "(", "port", ",", "str", ")", "and", "port", ".", "isdigit", "(", ")", ")", ":", "port", "=", "int", "(", "port", ")", "return", "PORT_USES", ".", "get", "(", "port", ")" ]
provides the common use of a given port .
train
false
24,496
@requires_auth('resource') def schema_item_endpoint(resource): resource_config = app.config['DOMAIN'].get(resource) if ((not resource_config) or (resource_config.get('internal_resource') is True)): return abort(404) return send_response(None, (resource_config['schema'],))
[ "@", "requires_auth", "(", "'resource'", ")", "def", "schema_item_endpoint", "(", "resource", ")", ":", "resource_config", "=", "app", ".", "config", "[", "'DOMAIN'", "]", ".", "get", "(", "resource", ")", "if", "(", "(", "not", "resource_config", ")", "or", "(", "resource_config", ".", "get", "(", "'internal_resource'", ")", "is", "True", ")", ")", ":", "return", "abort", "(", "404", ")", "return", "send_response", "(", "None", ",", "(", "resource_config", "[", "'schema'", "]", ",", ")", ")" ]
this endpoint is active when schema_endpoint != none .
train
false
24,497
def test_dotted_list(): entry = tokenize('(a b c . (d . e))')[0] assert (entry == HyCons(HySymbol('a'), HyCons(HySymbol('b'), HyCons(HySymbol('c'), HyCons(HySymbol('d'), HySymbol('e'))))))
[ "def", "test_dotted_list", "(", ")", ":", "entry", "=", "tokenize", "(", "'(a b c . (d . e))'", ")", "[", "0", "]", "assert", "(", "entry", "==", "HyCons", "(", "HySymbol", "(", "'a'", ")", ",", "HyCons", "(", "HySymbol", "(", "'b'", ")", ",", "HyCons", "(", "HySymbol", "(", "'c'", ")", ",", "HyCons", "(", "HySymbol", "(", "'d'", ")", ",", "HySymbol", "(", "'e'", ")", ")", ")", ")", ")", ")" ]
check that dotted lists get tokenized correctly .
train
false
24,500
def _get_cmd_completions(include_hidden, include_aliases, prefix=''): assert cmdutils.cmd_dict cmdlist = [] cmd_to_keys = objreg.get('key-config').get_reverse_bindings_for('normal') for obj in set(cmdutils.cmd_dict.values()): hide_debug = (obj.debug and (not objreg.get('args').debug)) hide_hidden = (obj.hide and (not include_hidden)) if (not (hide_debug or hide_hidden or obj.deprecated)): bindings = ', '.join(cmd_to_keys.get(obj.name, [])) cmdlist.append(((prefix + obj.name), obj.desc, bindings)) if include_aliases: for (name, cmd) in config.section('aliases').items(): bindings = ', '.join(cmd_to_keys.get(name, [])) cmdlist.append((name, "Alias for '{}'".format(cmd), bindings)) return cmdlist
[ "def", "_get_cmd_completions", "(", "include_hidden", ",", "include_aliases", ",", "prefix", "=", "''", ")", ":", "assert", "cmdutils", ".", "cmd_dict", "cmdlist", "=", "[", "]", "cmd_to_keys", "=", "objreg", ".", "get", "(", "'key-config'", ")", ".", "get_reverse_bindings_for", "(", "'normal'", ")", "for", "obj", "in", "set", "(", "cmdutils", ".", "cmd_dict", ".", "values", "(", ")", ")", ":", "hide_debug", "=", "(", "obj", ".", "debug", "and", "(", "not", "objreg", ".", "get", "(", "'args'", ")", ".", "debug", ")", ")", "hide_hidden", "=", "(", "obj", ".", "hide", "and", "(", "not", "include_hidden", ")", ")", "if", "(", "not", "(", "hide_debug", "or", "hide_hidden", "or", "obj", ".", "deprecated", ")", ")", ":", "bindings", "=", "', '", ".", "join", "(", "cmd_to_keys", ".", "get", "(", "obj", ".", "name", ",", "[", "]", ")", ")", "cmdlist", ".", "append", "(", "(", "(", "prefix", "+", "obj", ".", "name", ")", ",", "obj", ".", "desc", ",", "bindings", ")", ")", "if", "include_aliases", ":", "for", "(", "name", ",", "cmd", ")", "in", "config", ".", "section", "(", "'aliases'", ")", ".", "items", "(", ")", ":", "bindings", "=", "', '", ".", "join", "(", "cmd_to_keys", ".", "get", "(", "name", ",", "[", "]", ")", ")", "cmdlist", ".", "append", "(", "(", "name", ",", "\"Alias for '{}'\"", ".", "format", "(", "cmd", ")", ",", "bindings", ")", ")", "return", "cmdlist" ]
get a list of completions info for commands .
train
false
24,501
def merge_transforms(exp): if isinstance(exp, (Var, Removed)): return exp elif isinstance(exp, (Reduced, Sorted, Transformed)): prev = merge_transforms(exp.sub) if isinstance(prev, (Reduced, Sorted, Transformed)): B = exp.var.compute_value assert isinstance(B, Lookup) A = B.variable.compute_value assert isinstance(A, Lookup) new_var = DiscreteVariable(exp.var.name, values=exp.var.values, ordered=exp.var.ordered, compute_value=merge_lookup(A, B)) assert isinstance(prev.sub, Var) return Transformed(prev.sub, new_var) else: assert (prev is exp.sub) return exp else: raise TypeError
[ "def", "merge_transforms", "(", "exp", ")", ":", "if", "isinstance", "(", "exp", ",", "(", "Var", ",", "Removed", ")", ")", ":", "return", "exp", "elif", "isinstance", "(", "exp", ",", "(", "Reduced", ",", "Sorted", ",", "Transformed", ")", ")", ":", "prev", "=", "merge_transforms", "(", "exp", ".", "sub", ")", "if", "isinstance", "(", "prev", ",", "(", "Reduced", ",", "Sorted", ",", "Transformed", ")", ")", ":", "B", "=", "exp", ".", "var", ".", "compute_value", "assert", "isinstance", "(", "B", ",", "Lookup", ")", "A", "=", "B", ".", "variable", ".", "compute_value", "assert", "isinstance", "(", "A", ",", "Lookup", ")", "new_var", "=", "DiscreteVariable", "(", "exp", ".", "var", ".", "name", ",", "values", "=", "exp", ".", "var", ".", "values", ",", "ordered", "=", "exp", ".", "var", ".", "ordered", ",", "compute_value", "=", "merge_lookup", "(", "A", ",", "B", ")", ")", "assert", "isinstance", "(", "prev", ".", "sub", ",", "Var", ")", "return", "Transformed", "(", "prev", ".", "sub", ",", "new_var", ")", "else", ":", "assert", "(", "prev", "is", "exp", ".", "sub", ")", "return", "exp", "else", ":", "raise", "TypeError" ]
merge consecutive removed .
train
false
24,503
def write_vocab_and_sums(vocab, sums, vocab_filename, sums_filename): with open(os.path.join(FLAGS.output_dir, vocab_filename), 'w') as vocab_out: with open(os.path.join(FLAGS.output_dir, sums_filename), 'w') as sums_out: for (tok, cnt) in itertools.izip(vocab, sums): print >>vocab_out, tok print >>sums_out, cnt
[ "def", "write_vocab_and_sums", "(", "vocab", ",", "sums", ",", "vocab_filename", ",", "sums_filename", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "output_dir", ",", "vocab_filename", ")", ",", "'w'", ")", "as", "vocab_out", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "output_dir", ",", "sums_filename", ")", ",", "'w'", ")", "as", "sums_out", ":", "for", "(", "tok", ",", "cnt", ")", "in", "itertools", ".", "izip", "(", "vocab", ",", "sums", ")", ":", "print", ">>", "vocab_out", ",", "tok", "print", ">>", "sums_out", ",", "cnt" ]
writes vocabulary and marginal sum files .
train
false
24,504
@pytest.mark.skipif(u'not HAS_SCIPY') def test_subpixel_gauss_2D(): gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) values = discretize_model(gauss_2D, ((-1), 2), ((-1), 2), mode=u'integrate', factor=100) assert_allclose(values.sum(), ((2 * np.pi) * 0.01), atol=1e-05)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "u'not HAS_SCIPY'", ")", "def", "test_subpixel_gauss_2D", "(", ")", ":", "gauss_2D", "=", "Gaussian2D", "(", "1", ",", "0", ",", "0", ",", "0.1", ",", "0.1", ")", "values", "=", "discretize_model", "(", "gauss_2D", ",", "(", "(", "-", "1", ")", ",", "2", ")", ",", "(", "(", "-", "1", ")", ",", "2", ")", ",", "mode", "=", "u'integrate'", ",", "factor", "=", "100", ")", "assert_allclose", "(", "values", ".", "sum", "(", ")", ",", "(", "(", "2", "*", "np", ".", "pi", ")", "*", "0.01", ")", ",", "atol", "=", "1e-05", ")" ]
test subpixel accuracy of the oversample mode with gaussian 2d model .
train
false
24,505
def test_comphdu_bscale(tmpdir): filename1 = tmpdir.join('3hdus.fits').strpath filename2 = tmpdir.join('3hdus_comp.fits').strpath x = (np.random.random((100, 100)) * 100) x0 = fits.PrimaryHDU() x1 = fits.ImageHDU(np.array((x - 50), dtype=int), uint=True) x1.header['BZERO'] = 20331 x1.header['BSCALE'] = 2.3 hdus = fits.HDUList([x0, x1]) hdus.writeto(filename1) hdus = fits.open(filename1) hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32), header=hdus[1].header) hdus.writeto(filename2) hdus = fits.open(filename2) hdus[1].verify('exception')
[ "def", "test_comphdu_bscale", "(", "tmpdir", ")", ":", "filename1", "=", "tmpdir", ".", "join", "(", "'3hdus.fits'", ")", ".", "strpath", "filename2", "=", "tmpdir", ".", "join", "(", "'3hdus_comp.fits'", ")", ".", "strpath", "x", "=", "(", "np", ".", "random", ".", "random", "(", "(", "100", ",", "100", ")", ")", "*", "100", ")", "x0", "=", "fits", ".", "PrimaryHDU", "(", ")", "x1", "=", "fits", ".", "ImageHDU", "(", "np", ".", "array", "(", "(", "x", "-", "50", ")", ",", "dtype", "=", "int", ")", ",", "uint", "=", "True", ")", "x1", ".", "header", "[", "'BZERO'", "]", "=", "20331", "x1", ".", "header", "[", "'BSCALE'", "]", "=", "2.3", "hdus", "=", "fits", ".", "HDUList", "(", "[", "x0", ",", "x1", "]", ")", "hdus", ".", "writeto", "(", "filename1", ")", "hdus", "=", "fits", ".", "open", "(", "filename1", ")", "hdus", "[", "1", "]", "=", "fits", ".", "CompImageHDU", "(", "data", "=", "hdus", "[", "1", "]", ".", "data", ".", "astype", "(", "np", ".", "uint32", ")", ",", "header", "=", "hdus", "[", "1", "]", ".", "header", ")", "hdus", ".", "writeto", "(", "filename2", ")", "hdus", "=", "fits", ".", "open", "(", "filename2", ")", "hdus", "[", "1", "]", ".", "verify", "(", "'exception'", ")" ]
regression test for a bug that caused extensions that used bzero and bscale that got turned into compimagehdu to end up with bzero/bscale before the tfields .
train
false
24,507
def test_non_EarthLocation(): class EarthLocation2(EarthLocation, ): pass EarthLocation2._get_site_registry(force_builtin=True) el2 = EarthLocation2.of_site(u'greenwich') assert (type(el2) is EarthLocation2) assert (el2.info.name == u'Royal Observatory Greenwich')
[ "def", "test_non_EarthLocation", "(", ")", ":", "class", "EarthLocation2", "(", "EarthLocation", ",", ")", ":", "pass", "EarthLocation2", ".", "_get_site_registry", "(", "force_builtin", "=", "True", ")", "el2", "=", "EarthLocation2", ".", "of_site", "(", "u'greenwich'", ")", "assert", "(", "type", "(", "el2", ")", "is", "EarthLocation2", ")", "assert", "(", "el2", ".", "info", ".", "name", "==", "u'Royal Observatory Greenwich'", ")" ]
a regression test for a typo bug pointed out at the bottom of URL .
train
false
24,508
def check_file_contents_for_email_alerts(app): sa_session = app.model.context.current admin_users = app.config.get('admin_users', '').split(',') for repository in sa_session.query(app.model.Repository).filter((app.model.Repository.table.c.email_alerts != null())): email_alerts = json.loads(repository.email_alerts) for user_email in email_alerts: if (user_email in admin_users): return True return False
[ "def", "check_file_contents_for_email_alerts", "(", "app", ")", ":", "sa_session", "=", "app", ".", "model", ".", "context", ".", "current", "admin_users", "=", "app", ".", "config", ".", "get", "(", "'admin_users'", ",", "''", ")", ".", "split", "(", "','", ")", "for", "repository", "in", "sa_session", ".", "query", "(", "app", ".", "model", ".", "Repository", ")", ".", "filter", "(", "(", "app", ".", "model", ".", "Repository", ".", "table", ".", "c", ".", "email_alerts", "!=", "null", "(", ")", ")", ")", ":", "email_alerts", "=", "json", ".", "loads", "(", "repository", ".", "email_alerts", ")", "for", "user_email", "in", "email_alerts", ":", "if", "(", "user_email", "in", "admin_users", ")", ":", "return", "True", "return", "False" ]
see if any admin users have chosen to receive email alerts when a repository is updated .
train
false
24,509
def initialize_permissions(course_key, user_who_created_course): seed_permissions_roles(course_key) CourseEnrollment.enroll(user_who_created_course, course_key) assign_default_role(course_key, user_who_created_course)
[ "def", "initialize_permissions", "(", "course_key", ",", "user_who_created_course", ")", ":", "seed_permissions_roles", "(", "course_key", ")", "CourseEnrollment", ".", "enroll", "(", "user_who_created_course", ",", "course_key", ")", "assign_default_role", "(", "course_key", ",", "user_who_created_course", ")" ]
initializes a new course by enrolling the course creator as a student .
train
false
24,510
def mptt_result_list(cl): return {u'cl': cl, u'result_hidden_fields': list(result_hidden_fields(cl)), u'result_headers': list(result_headers(cl)), u'results': list(mptt_results(cl))}
[ "def", "mptt_result_list", "(", "cl", ")", ":", "return", "{", "u'cl'", ":", "cl", ",", "u'result_hidden_fields'", ":", "list", "(", "result_hidden_fields", "(", "cl", ")", ")", ",", "u'result_headers'", ":", "list", "(", "result_headers", "(", "cl", ")", ")", ",", "u'results'", ":", "list", "(", "mptt_results", "(", "cl", ")", ")", "}" ]
displays the headers and data list together .
train
false
24,511
def cached(method): @gen.coroutine def cached_method(self, *args, **kwargs): uri = self.request.path short_url = self.truncate(uri) if self.get_argument('flush_cache', False): app_log.info('flushing cache %s', short_url) (yield method(self, *args, **kwargs)) return pending_future = self.pending.get(uri, None) loop = IOLoop.current() if pending_future: app_log.info('Waiting for concurrent request at %s', short_url) tic = loop.time() (yield pending_future) toc = loop.time() app_log.info('Waited %.3fs for concurrent request at %s', (toc - tic), short_url) try: with time_block(('cache get %s' % short_url)): cached_pickle = (yield self.cache.get(self.cache_key)) if (cached_pickle is not None): cached = pickle.loads(cached_pickle) else: cached = None except Exception as e: app_log.error('Exception getting %s from cache', short_url, exc_info=True) cached = None if (cached is not None): app_log.debug('cache hit %s', short_url) for (key, value) in cached['headers'].items(): self.set_header(key, value) self.write(cached['body']) else: app_log.debug('cache miss %s', short_url) future = self.pending[uri] = Future() try: (yield method(self, *args, **kwargs)) finally: self.pending.pop(uri, None) future.set_result(None) return cached_method
[ "def", "cached", "(", "method", ")", ":", "@", "gen", ".", "coroutine", "def", "cached_method", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "uri", "=", "self", ".", "request", ".", "path", "short_url", "=", "self", ".", "truncate", "(", "uri", ")", "if", "self", ".", "get_argument", "(", "'flush_cache'", ",", "False", ")", ":", "app_log", ".", "info", "(", "'flushing cache %s'", ",", "short_url", ")", "(", "yield", "method", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ")", "return", "pending_future", "=", "self", ".", "pending", ".", "get", "(", "uri", ",", "None", ")", "loop", "=", "IOLoop", ".", "current", "(", ")", "if", "pending_future", ":", "app_log", ".", "info", "(", "'Waiting for concurrent request at %s'", ",", "short_url", ")", "tic", "=", "loop", ".", "time", "(", ")", "(", "yield", "pending_future", ")", "toc", "=", "loop", ".", "time", "(", ")", "app_log", ".", "info", "(", "'Waited %.3fs for concurrent request at %s'", ",", "(", "toc", "-", "tic", ")", ",", "short_url", ")", "try", ":", "with", "time_block", "(", "(", "'cache get %s'", "%", "short_url", ")", ")", ":", "cached_pickle", "=", "(", "yield", "self", ".", "cache", ".", "get", "(", "self", ".", "cache_key", ")", ")", "if", "(", "cached_pickle", "is", "not", "None", ")", ":", "cached", "=", "pickle", ".", "loads", "(", "cached_pickle", ")", "else", ":", "cached", "=", "None", "except", "Exception", "as", "e", ":", "app_log", ".", "error", "(", "'Exception getting %s from cache'", ",", "short_url", ",", "exc_info", "=", "True", ")", "cached", "=", "None", "if", "(", "cached", "is", "not", "None", ")", ":", "app_log", ".", "debug", "(", "'cache hit %s'", ",", "short_url", ")", "for", "(", "key", ",", "value", ")", "in", "cached", "[", "'headers'", "]", ".", "items", "(", ")", ":", "self", ".", "set_header", "(", "key", ",", "value", ")", "self", ".", "write", "(", "cached", "[", "'body'", "]", ")", "else", ":", "app_log", ".", "debug", "(", "'cache miss %s'", ",", "short_url", ")", "future", "=", "self", ".", "pending", "[", "uri", "]", "=", "Future", "(", ")", "try", ":", "(", "yield", "method", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ")", "finally", ":", "self", ".", "pending", ".", "pop", "(", "uri", ",", "None", ")", "future", ".", "set_result", "(", "None", ")", "return", "cached_method" ]
a caching decorator based on parameter objects .
train
false
24,512
def change_HTTPS_CERT(https_cert): if (https_cert == ''): sickbeard.HTTPS_CERT = '' return True if (ek(os.path.normpath, sickbeard.HTTPS_CERT) != ek(os.path.normpath, https_cert)): if helpers.makeDir(ek(os.path.dirname, ek(os.path.abspath, https_cert))): sickbeard.HTTPS_CERT = ek(os.path.normpath, https_cert) logger.log((u'Changed https cert path to ' + https_cert)) else: return False return True
[ "def", "change_HTTPS_CERT", "(", "https_cert", ")", ":", "if", "(", "https_cert", "==", "''", ")", ":", "sickbeard", ".", "HTTPS_CERT", "=", "''", "return", "True", "if", "(", "ek", "(", "os", ".", "path", ".", "normpath", ",", "sickbeard", ".", "HTTPS_CERT", ")", "!=", "ek", "(", "os", ".", "path", ".", "normpath", ",", "https_cert", ")", ")", ":", "if", "helpers", ".", "makeDir", "(", "ek", "(", "os", ".", "path", ".", "dirname", ",", "ek", "(", "os", ".", "path", ".", "abspath", ",", "https_cert", ")", ")", ")", ":", "sickbeard", ".", "HTTPS_CERT", "=", "ek", "(", "os", ".", "path", ".", "normpath", ",", "https_cert", ")", "logger", ".", "log", "(", "(", "u'Changed https cert path to '", "+", "https_cert", ")", ")", "else", ":", "return", "False", "return", "True" ]
replace https certificate file path .
train
false
24,515
def iter_resolve_and_parse_specs(rel_path, specs): for spec in specs: (spec_path, target_name) = parse_spec(spec, rel_path) (yield SingleAddress(spec_path, target_name))
[ "def", "iter_resolve_and_parse_specs", "(", "rel_path", ",", "specs", ")", ":", "for", "spec", "in", "specs", ":", "(", "spec_path", ",", "target_name", ")", "=", "parse_spec", "(", "spec", ",", "rel_path", ")", "(", "yield", "SingleAddress", "(", "spec_path", ",", "target_name", ")", ")" ]
given a relative path and set of input specs .
train
false
24,517
@core_helper def time_ago_from_timestamp(timestamp): datetime_ = _datestamp_to_datetime(timestamp) if (not datetime_): return _('Unknown') return formatters.localised_nice_date(datetime_, show_date=False)
[ "@", "core_helper", "def", "time_ago_from_timestamp", "(", "timestamp", ")", ":", "datetime_", "=", "_datestamp_to_datetime", "(", "timestamp", ")", "if", "(", "not", "datetime_", ")", ":", "return", "_", "(", "'Unknown'", ")", "return", "formatters", ".", "localised_nice_date", "(", "datetime_", ",", "show_date", "=", "False", ")" ]
returns a string like 5 months ago for a datetime relative to now .
train
false
24,518
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'): if is_categorical_dtype(items): return items.argsort(ascending=ascending) items = np.asanyarray(items) idx = np.arange(len(items)) mask = isnull(items) non_nans = items[(~ mask)] non_nan_idx = idx[(~ mask)] nan_idx = np.nonzero(mask)[0] if (not ascending): non_nans = non_nans[::(-1)] non_nan_idx = non_nan_idx[::(-1)] indexer = non_nan_idx[non_nans.argsort(kind=kind)] if (not ascending): indexer = indexer[::(-1)] if (na_position == 'last'): indexer = np.concatenate([indexer, nan_idx]) elif (na_position == 'first'): indexer = np.concatenate([nan_idx, indexer]) else: raise ValueError('invalid na_position: {!r}'.format(na_position)) return indexer
[ "def", "_nargsort", "(", "items", ",", "kind", "=", "'quicksort'", ",", "ascending", "=", "True", ",", "na_position", "=", "'last'", ")", ":", "if", "is_categorical_dtype", "(", "items", ")", ":", "return", "items", ".", "argsort", "(", "ascending", "=", "ascending", ")", "items", "=", "np", ".", "asanyarray", "(", "items", ")", "idx", "=", "np", ".", "arange", "(", "len", "(", "items", ")", ")", "mask", "=", "isnull", "(", "items", ")", "non_nans", "=", "items", "[", "(", "~", "mask", ")", "]", "non_nan_idx", "=", "idx", "[", "(", "~", "mask", ")", "]", "nan_idx", "=", "np", ".", "nonzero", "(", "mask", ")", "[", "0", "]", "if", "(", "not", "ascending", ")", ":", "non_nans", "=", "non_nans", "[", ":", ":", "(", "-", "1", ")", "]", "non_nan_idx", "=", "non_nan_idx", "[", ":", ":", "(", "-", "1", ")", "]", "indexer", "=", "non_nan_idx", "[", "non_nans", ".", "argsort", "(", "kind", "=", "kind", ")", "]", "if", "(", "not", "ascending", ")", ":", "indexer", "=", "indexer", "[", ":", ":", "(", "-", "1", ")", "]", "if", "(", "na_position", "==", "'last'", ")", ":", "indexer", "=", "np", ".", "concatenate", "(", "[", "indexer", ",", "nan_idx", "]", ")", "elif", "(", "na_position", "==", "'first'", ")", ":", "indexer", "=", "np", ".", "concatenate", "(", "[", "nan_idx", ",", "indexer", "]", ")", "else", ":", "raise", "ValueError", "(", "'invalid na_position: {!r}'", ".", "format", "(", "na_position", ")", ")", "return", "indexer" ]
this is intended to be a drop-in replacement for np .
train
false
24,521
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False, check_finite=True): if (mode not in ['full', 'qr', 'r', 'economic', 'raw']): raise ValueError("Mode argument should be one of ['full', 'r', 'economic', 'raw']") if check_finite: a1 = numpy.asarray_chkfinite(a) else: a1 = numpy.asarray(a) if (len(a1.shape) != 2): raise ValueError('expected 2D array') (M, N) = a1.shape overwrite_a = (overwrite_a or _datacopied(a1, a)) if pivoting: (geqp3,) = get_lapack_funcs(('geqp3',), (a1,)) (qr, jpvt, tau) = safecall(geqp3, 'geqp3', a1, overwrite_a=overwrite_a) jpvt -= 1 else: (geqrf,) = get_lapack_funcs(('geqrf',), (a1,)) (qr, tau) = safecall(geqrf, 'geqrf', a1, lwork=lwork, overwrite_a=overwrite_a) if ((mode not in ['economic', 'raw']) or (M < N)): R = numpy.triu(qr) else: R = numpy.triu(qr[:N, :]) if pivoting: Rj = (R, jpvt) else: Rj = (R,) if (mode == 'r'): return Rj elif (mode == 'raw'): return (((qr, tau),) + Rj) (gor_un_gqr,) = get_lapack_funcs(('orgqr',), (qr,)) if (M < N): (Q,) = safecall(gor_un_gqr, 'gorgqr/gungqr', qr[:, :M], tau, lwork=lwork, overwrite_a=1) elif (mode == 'economic'): (Q,) = safecall(gor_un_gqr, 'gorgqr/gungqr', qr, tau, lwork=lwork, overwrite_a=1) else: t = qr.dtype.char qqr = numpy.empty((M, M), dtype=t) qqr[:, :N] = qr (Q,) = safecall(gor_un_gqr, 'gorgqr/gungqr', qqr, tau, lwork=lwork, overwrite_a=1) return ((Q,) + Rj)
[ "def", "qr", "(", "a", ",", "overwrite_a", "=", "False", ",", "lwork", "=", "None", ",", "mode", "=", "'full'", ",", "pivoting", "=", "False", ",", "check_finite", "=", "True", ")", ":", "if", "(", "mode", "not", "in", "[", "'full'", ",", "'qr'", ",", "'r'", ",", "'economic'", ",", "'raw'", "]", ")", ":", "raise", "ValueError", "(", "\"Mode argument should be one of ['full', 'r', 'economic', 'raw']\"", ")", "if", "check_finite", ":", "a1", "=", "numpy", ".", "asarray_chkfinite", "(", "a", ")", "else", ":", "a1", "=", "numpy", ".", "asarray", "(", "a", ")", "if", "(", "len", "(", "a1", ".", "shape", ")", "!=", "2", ")", ":", "raise", "ValueError", "(", "'expected 2D array'", ")", "(", "M", ",", "N", ")", "=", "a1", ".", "shape", "overwrite_a", "=", "(", "overwrite_a", "or", "_datacopied", "(", "a1", ",", "a", ")", ")", "if", "pivoting", ":", "(", "geqp3", ",", ")", "=", "get_lapack_funcs", "(", "(", "'geqp3'", ",", ")", ",", "(", "a1", ",", ")", ")", "(", "qr", ",", "jpvt", ",", "tau", ")", "=", "safecall", "(", "geqp3", ",", "'geqp3'", ",", "a1", ",", "overwrite_a", "=", "overwrite_a", ")", "jpvt", "-=", "1", "else", ":", "(", "geqrf", ",", ")", "=", "get_lapack_funcs", "(", "(", "'geqrf'", ",", ")", ",", "(", "a1", ",", ")", ")", "(", "qr", ",", "tau", ")", "=", "safecall", "(", "geqrf", ",", "'geqrf'", ",", "a1", ",", "lwork", "=", "lwork", ",", "overwrite_a", "=", "overwrite_a", ")", "if", "(", "(", "mode", "not", "in", "[", "'economic'", ",", "'raw'", "]", ")", "or", "(", "M", "<", "N", ")", ")", ":", "R", "=", "numpy", ".", "triu", "(", "qr", ")", "else", ":", "R", "=", "numpy", ".", "triu", "(", "qr", "[", ":", "N", ",", ":", "]", ")", "if", "pivoting", ":", "Rj", "=", "(", "R", ",", "jpvt", ")", "else", ":", "Rj", "=", "(", "R", ",", ")", "if", "(", "mode", "==", "'r'", ")", ":", "return", "Rj", "elif", "(", "mode", "==", "'raw'", ")", ":", "return", "(", "(", "(", "qr", ",", "tau", ")", ",", ")", "+", "Rj", ")", "(", "gor_un_gqr", ",", ")", "=", "get_lapack_funcs", "(", "(", "'orgqr'", ",", ")", ",", "(", "qr", ",", ")", ")", "if", "(", "M", "<", "N", ")", ":", "(", "Q", ",", ")", "=", "safecall", "(", "gor_un_gqr", ",", "'gorgqr/gungqr'", ",", "qr", "[", ":", ",", ":", "M", "]", ",", "tau", ",", "lwork", "=", "lwork", ",", "overwrite_a", "=", "1", ")", "elif", "(", "mode", "==", "'economic'", ")", ":", "(", "Q", ",", ")", "=", "safecall", "(", "gor_un_gqr", ",", "'gorgqr/gungqr'", ",", "qr", ",", "tau", ",", "lwork", "=", "lwork", ",", "overwrite_a", "=", "1", ")", "else", ":", "t", "=", "qr", ".", "dtype", ".", "char", "qqr", "=", "numpy", ".", "empty", "(", "(", "M", ",", "M", ")", ",", "dtype", "=", "t", ")", "qqr", "[", ":", ",", ":", "N", "]", "=", "qr", "(", "Q", ",", ")", "=", "safecall", "(", "gor_un_gqr", ",", "'gorgqr/gungqr'", ",", "qqr", ",", "tau", ",", "lwork", "=", "lwork", ",", "overwrite_a", "=", "1", ")", "return", "(", "(", "Q", ",", ")", "+", "Rj", ")" ]
compute the qr factorization of a matrix .
train
false
24,522
def _sync_flush(f): f.flush() if hasattr(os, 'fsync'): os.fsync(f.fileno())
[ "def", "_sync_flush", "(", "f", ")", ":", "f", ".", "flush", "(", ")", "if", "hasattr", "(", "os", ",", "'fsync'", ")", ":", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")" ]
ensure changes to file f are physically on disk .
train
false
24,523
def dup_root_lower_bound(f, K): bound = dup_root_upper_bound(dup_reverse(f), K) if (bound is not None): return (1 / bound) else: return None
[ "def", "dup_root_lower_bound", "(", "f", ",", "K", ")", ":", "bound", "=", "dup_root_upper_bound", "(", "dup_reverse", "(", "f", ")", ",", "K", ")", "if", "(", "bound", "is", "not", "None", ")", ":", "return", "(", "1", "/", "bound", ")", "else", ":", "return", "None" ]
compute the lmq lower bound for the positive roots of f; lmq was developed by akritas-strzebonski-vigklas .
train
false
24,524
def to_nice_json(a, indent=4, *args, **kw): if (sys.version_info < (2, 7)): try: import simplejson except ImportError: pass else: try: major = int(simplejson.__version__.split('.')[0]) except: pass else: if (major >= 2): return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw) try: return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) except: return to_json(a, *args, **kw)
[ "def", "to_nice_json", "(", "a", ",", "indent", "=", "4", ",", "*", "args", ",", "**", "kw", ")", ":", "if", "(", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ")", ":", "try", ":", "import", "simplejson", "except", "ImportError", ":", "pass", "else", ":", "try", ":", "major", "=", "int", "(", "simplejson", ".", "__version__", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "except", ":", "pass", "else", ":", "if", "(", "major", ">=", "2", ")", ":", "return", "simplejson", ".", "dumps", "(", "a", ",", "indent", "=", "indent", ",", "sort_keys", "=", "True", ",", "*", "args", ",", "**", "kw", ")", "try", ":", "return", "json", ".", "dumps", "(", "a", ",", "indent", "=", "indent", ",", "sort_keys", "=", "True", ",", "cls", "=", "AnsibleJSONEncoder", ",", "*", "args", ",", "**", "kw", ")", "except", ":", "return", "to_json", "(", "a", ",", "*", "args", ",", "**", "kw", ")" ]
make verbose .
train
false
24,526
def start_tribler_core(base_path): from twisted.internet import reactor def on_tribler_shutdown(_): reactor.stop() def shutdown(session, *_): logging.info('Stopping Tribler core') session.shutdown().addCallback(on_tribler_shutdown) sys.path.insert(0, base_path) def start_tribler(): config = SessionStartupConfig().load() config.set_http_api_port(8085) config.set_http_api_enabled(True) process_checker = ProcessChecker() if process_checker.already_running: return session = Session(config) signal.signal(signal.SIGTERM, (lambda signum, stack: shutdown(session, signum, stack))) session.start() reactor.callWhenRunning(start_tribler) reactor.run()
[ "def", "start_tribler_core", "(", "base_path", ")", ":", "from", "twisted", ".", "internet", "import", "reactor", "def", "on_tribler_shutdown", "(", "_", ")", ":", "reactor", ".", "stop", "(", ")", "def", "shutdown", "(", "session", ",", "*", "_", ")", ":", "logging", ".", "info", "(", "'Stopping Tribler core'", ")", "session", ".", "shutdown", "(", ")", ".", "addCallback", "(", "on_tribler_shutdown", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "base_path", ")", "def", "start_tribler", "(", ")", ":", "config", "=", "SessionStartupConfig", "(", ")", ".", "load", "(", ")", "config", ".", "set_http_api_port", "(", "8085", ")", "config", ".", "set_http_api_enabled", "(", "True", ")", "process_checker", "=", "ProcessChecker", "(", ")", "if", "process_checker", ".", "already_running", ":", "return", "session", "=", "Session", "(", "config", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "(", "lambda", "signum", ",", "stack", ":", "shutdown", "(", "session", ",", "signum", ",", "stack", ")", ")", ")", "session", ".", "start", "(", ")", "reactor", ".", "callWhenRunning", "(", "start_tribler", ")", "reactor", ".", "run", "(", ")" ]
this method is invoked by multiprocessing when the tribler core is started and will start a tribler session .
train
false
24,528
def floyd_warshall(G, weight='weight'): return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
[ "def", "floyd_warshall", "(", "G", ",", "weight", "=", "'weight'", ")", ":", "return", "floyd_warshall_predecessor_and_distance", "(", "G", ",", "weight", "=", "weight", ")", "[", "1", "]" ]
find all-pairs shortest path lengths using floyds algorithm .
train
false
24,529
def _get_inst_data(inst): from .io.base import BaseRaw from .epochs import BaseEpochs from . import Evoked from .time_frequency.tfr import _BaseTFR if isinstance(inst, (BaseRaw, BaseEpochs)): if (not inst.preload): inst.load_data() return inst._data elif isinstance(inst, (Evoked, _BaseTFR)): return inst.data else: raise TypeError('The argument must be an instance of Raw, Epochs, Evoked, EpochsTFR or AverageTFR, got {0}.'.format(type(inst)))
[ "def", "_get_inst_data", "(", "inst", ")", ":", "from", ".", "io", ".", "base", "import", "BaseRaw", "from", ".", "epochs", "import", "BaseEpochs", "from", ".", "import", "Evoked", "from", ".", "time_frequency", ".", "tfr", "import", "_BaseTFR", "if", "isinstance", "(", "inst", ",", "(", "BaseRaw", ",", "BaseEpochs", ")", ")", ":", "if", "(", "not", "inst", ".", "preload", ")", ":", "inst", ".", "load_data", "(", ")", "return", "inst", ".", "_data", "elif", "isinstance", "(", "inst", ",", "(", "Evoked", ",", "_BaseTFR", ")", ")", ":", "return", "inst", ".", "data", "else", ":", "raise", "TypeError", "(", "'The argument must be an instance of Raw, Epochs, Evoked, EpochsTFR or AverageTFR, got {0}.'", ".", "format", "(", "type", "(", "inst", ")", ")", ")" ]
get data view from mne object instance like raw .
train
false
24,532
def is_connection_dropped(conn): sock = getattr(conn, 'sock', False) if (not sock): return False if (not poll): if (not select): return False try: return select([sock], [], [], 0.0)[0] except SocketError: return True p = poll() p.register(sock, POLLIN) for (fno, ev) in p.poll(0.0): if (fno == sock.fileno()): return True
[ "def", "is_connection_dropped", "(", "conn", ")", ":", "sock", "=", "getattr", "(", "conn", ",", "'sock'", ",", "False", ")", "if", "(", "not", "sock", ")", ":", "return", "False", "if", "(", "not", "poll", ")", ":", "if", "(", "not", "select", ")", ":", "return", "False", "try", ":", "return", "select", "(", "[", "sock", "]", ",", "[", "]", ",", "[", "]", ",", "0.0", ")", "[", "0", "]", "except", "SocketError", ":", "return", "True", "p", "=", "poll", "(", ")", "p", ".", "register", "(", "sock", ",", "POLLIN", ")", "for", "(", "fno", ",", "ev", ")", "in", "p", ".", "poll", "(", "0.0", ")", ":", "if", "(", "fno", "==", "sock", ".", "fileno", "(", ")", ")", ":", "return", "True" ]
returns true if the connection is dropped and should be closed .
train
true
24,533
def call_subprocess_Popen(command, **params): if (('stdout' in params) or ('stderr' in params)): raise TypeError("don't use stderr or stdout with call_subprocess_Popen") with open(os.devnull, 'wb') as null: params.setdefault('stdin', null) params['stdout'] = null params['stderr'] = null p = subprocess_Popen(command, **params) returncode = p.wait() return returncode
[ "def", "call_subprocess_Popen", "(", "command", ",", "**", "params", ")", ":", "if", "(", "(", "'stdout'", "in", "params", ")", "or", "(", "'stderr'", "in", "params", ")", ")", ":", "raise", "TypeError", "(", "\"don't use stderr or stdout with call_subprocess_Popen\"", ")", "with", "open", "(", "os", ".", "devnull", ",", "'wb'", ")", "as", "null", ":", "params", ".", "setdefault", "(", "'stdin'", ",", "null", ")", "params", "[", "'stdout'", "]", "=", "null", "params", "[", "'stderr'", "]", "=", "null", "p", "=", "subprocess_Popen", "(", "command", ",", "**", "params", ")", "returncode", "=", "p", ".", "wait", "(", ")", "return", "returncode" ]
calls subprocess_popen and discards the output .
train
false
24,534
def get_repository_url(distribution, flocker_version): distribution_to_url = {'centos-7': 'https://{archive_bucket}.s3.amazonaws.com/{key}/clusterhq-release$(rpm -E %dist).noarch.rpm'.format(archive_bucket=ARCHIVE_BUCKET, key='centos'), 'rhel-7': 'https://{archive_bucket}.s3.amazonaws.com/{key}/clusterhq-release$(rpm -E %dist).noarch.rpm'.format(archive_bucket=ARCHIVE_BUCKET, key='rhel'), 'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/$(lsb_release --release --short)/\\$(ARCH)'.format(archive_bucket=ARCHIVE_BUCKET, key=('ubuntu' + get_package_key_suffix(flocker_version))), 'ubuntu-16.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/$(lsb_release --release --short)/\\$(ARCH)'.format(archive_bucket=ARCHIVE_BUCKET, key=('ubuntu' + get_package_key_suffix(flocker_version)))} try: return distribution_to_url[distribution] except KeyError: raise UnsupportedDistribution()
[ "def", "get_repository_url", "(", "distribution", ",", "flocker_version", ")", ":", "distribution_to_url", "=", "{", "'centos-7'", ":", "'https://{archive_bucket}.s3.amazonaws.com/{key}/clusterhq-release$(rpm -E %dist).noarch.rpm'", ".", "format", "(", "archive_bucket", "=", "ARCHIVE_BUCKET", ",", "key", "=", "'centos'", ")", ",", "'rhel-7'", ":", "'https://{archive_bucket}.s3.amazonaws.com/{key}/clusterhq-release$(rpm -E %dist).noarch.rpm'", ".", "format", "(", "archive_bucket", "=", "ARCHIVE_BUCKET", ",", "key", "=", "'rhel'", ")", ",", "'ubuntu-14.04'", ":", "'https://{archive_bucket}.s3.amazonaws.com/{key}/$(lsb_release --release --short)/\\\\$(ARCH)'", ".", "format", "(", "archive_bucket", "=", "ARCHIVE_BUCKET", ",", "key", "=", "(", "'ubuntu'", "+", "get_package_key_suffix", "(", "flocker_version", ")", ")", ")", ",", "'ubuntu-16.04'", ":", "'https://{archive_bucket}.s3.amazonaws.com/{key}/$(lsb_release --release --short)/\\\\$(ARCH)'", ".", "format", "(", "archive_bucket", "=", "ARCHIVE_BUCKET", ",", "key", "=", "(", "'ubuntu'", "+", "get_package_key_suffix", "(", "flocker_version", ")", ")", ")", "}", "try", ":", "return", "distribution_to_url", "[", "distribution", "]", "except", "KeyError", ":", "raise", "UnsupportedDistribution", "(", ")" ]
return the url for the repository of a given distribution .
train
false
24,535
def p_enumerator_1(t): pass
[ "def", "p_enumerator_1", "(", "t", ")", ":", "pass" ]
enumerator : id .
train
false
24,536
def _create(filesystem, content_type=RAW, filename=None, params=None): if (not filesystem): raise InvalidArgumentError('Filesystem is empty') if (not isinstance(filesystem, basestring)): raise InvalidArgumentError('Filesystem should be a string') if (content_type != RAW): raise InvalidArgumentError('Invalid content type') request = file_service_pb.CreateRequest() response = file_service_pb.CreateResponse() request.set_filesystem(filesystem) request.set_content_type(content_type) if filename: if (not isinstance(filename, basestring)): raise InvalidArgumentError('Filename should be a string') request.set_filename(filename) if params: if (not isinstance(params, dict)): raise InvalidArgumentError('Parameters should be a dictionary') for (k, v) in params.items(): param = request.add_parameters() param.set_name(k) param.set_value(v) _make_call('Create', request, response) return response.filename()
[ "def", "_create", "(", "filesystem", ",", "content_type", "=", "RAW", ",", "filename", "=", "None", ",", "params", "=", "None", ")", ":", "if", "(", "not", "filesystem", ")", ":", "raise", "InvalidArgumentError", "(", "'Filesystem is empty'", ")", "if", "(", "not", "isinstance", "(", "filesystem", ",", "basestring", ")", ")", ":", "raise", "InvalidArgumentError", "(", "'Filesystem should be a string'", ")", "if", "(", "content_type", "!=", "RAW", ")", ":", "raise", "InvalidArgumentError", "(", "'Invalid content type'", ")", "request", "=", "file_service_pb", ".", "CreateRequest", "(", ")", "response", "=", "file_service_pb", ".", "CreateResponse", "(", ")", "request", ".", "set_filesystem", "(", "filesystem", ")", "request", ".", "set_content_type", "(", "content_type", ")", "if", "filename", ":", "if", "(", "not", "isinstance", "(", "filename", ",", "basestring", ")", ")", ":", "raise", "InvalidArgumentError", "(", "'Filename should be a string'", ")", "request", ".", "set_filename", "(", "filename", ")", "if", "params", ":", "if", "(", "not", "isinstance", "(", "params", ",", "dict", ")", ")", ":", "raise", "InvalidArgumentError", "(", "'Parameters should be a dictionary'", ")", "for", "(", "k", ",", "v", ")", "in", "params", ".", "items", "(", ")", ":", "param", "=", "request", ".", "add_parameters", "(", ")", "param", ".", "set_name", "(", "k", ")", "param", ".", "set_value", "(", "v", ")", "_make_call", "(", "'Create'", ",", "request", ",", "response", ")", "return", "response", ".", "filename", "(", ")" ]
create a file .
train
false
24,537
def test_get_words_css(): expected_words = ['DeepSkyBlue', 'nombre-valido', 'text', 'css', 'h', 'color', 'Hello', 'world', 'type', 'style'] assert (sorted(expected_words) == sorted(get_words_by_filename('example.css'))) assert (sorted(expected_words) == sorted(get_words_by_content('example.css')))
[ "def", "test_get_words_css", "(", ")", ":", "expected_words", "=", "[", "'DeepSkyBlue'", ",", "'nombre-valido'", ",", "'text'", ",", "'css'", ",", "'h'", ",", "'color'", ",", "'Hello'", ",", "'world'", ",", "'type'", ",", "'style'", "]", "assert", "(", "sorted", "(", "expected_words", ")", "==", "sorted", "(", "get_words_by_filename", "(", "'example.css'", ")", ")", ")", "assert", "(", "sorted", "(", "expected_words", ")", "==", "sorted", "(", "get_words_by_content", "(", "'example.css'", ")", ")", ")" ]
test for get word from css file syntax .
train
false
24,538
def dump_address_pair(pair): if pair[0]: return (((('"' + pair[0]) + '" <') + pair[1]) + '>') else: return pair[1]
[ "def", "dump_address_pair", "(", "pair", ")", ":", "if", "pair", "[", "0", "]", ":", "return", "(", "(", "(", "(", "'\"'", "+", "pair", "[", "0", "]", ")", "+", "'\" <'", ")", "+", "pair", "[", "1", "]", ")", "+", "'>'", ")", "else", ":", "return", "pair", "[", "1", "]" ]
dump a pair in a canonicalized form .
train
false
24,539
def _rereconstituter(name): privateName = (nativeString('_') + name) return property((lambda self: getattr(self, privateName)), (lambda self, value: (setattr(self, privateName, (value if isinstance(value, bytes) else value.encode('charmap'))) or self._reconstitute())))
[ "def", "_rereconstituter", "(", "name", ")", ":", "privateName", "=", "(", "nativeString", "(", "'_'", ")", "+", "name", ")", "return", "property", "(", "(", "lambda", "self", ":", "getattr", "(", "self", ",", "privateName", ")", ")", ",", "(", "lambda", "self", ",", "value", ":", "(", "setattr", "(", "self", ",", "privateName", ",", "(", "value", "if", "isinstance", "(", "value", ",", "bytes", ")", "else", "value", ".", "encode", "(", "'charmap'", ")", ")", ")", "or", "self", ".", "_reconstitute", "(", ")", ")", ")", ")" ]
attriute declaration to preserve mutability on l{urlpath} .
train
false
24,540
@decorator def close_first(fn, *args, **kw): testing_reaper.close_all() fn(*args, **kw)
[ "@", "decorator", "def", "close_first", "(", "fn", ",", "*", "args", ",", "**", "kw", ")", ":", "testing_reaper", ".", "close_all", "(", ")", "fn", "(", "*", "args", ",", "**", "kw", ")" ]
decorator that closes all connections before fn execution .
train
false
24,542
def clean_str_sst(string): string = re.sub("[^A-Za-z0-9(),!?\\'\\`]", ' ', string) string = re.sub('\\s{2,}', ' ', string) return string.strip().lower()
[ "def", "clean_str_sst", "(", "string", ")", ":", "string", "=", "re", ".", "sub", "(", "\"[^A-Za-z0-9(),!?\\\\'\\\\`]\"", ",", "' '", ",", "string", ")", "string", "=", "re", ".", "sub", "(", "'\\\\s{2,}'", ",", "' '", ",", "string", ")", "return", "string", ".", "strip", "(", ")", ".", "lower", "(", ")" ]
tokenization/string cleaning for the sst dataset .
train
false
24,544
def refspec_arg(local_branch, remote_branch, pull, push): if ((not pull) and local_branch and remote_branch): what = refspec(remote_branch, local_branch, push=push) else: what = (local_branch or remote_branch or None) return what
[ "def", "refspec_arg", "(", "local_branch", ",", "remote_branch", ",", "pull", ",", "push", ")", ":", "if", "(", "(", "not", "pull", ")", "and", "local_branch", "and", "remote_branch", ")", ":", "what", "=", "refspec", "(", "remote_branch", ",", "local_branch", ",", "push", "=", "push", ")", "else", ":", "what", "=", "(", "local_branch", "or", "remote_branch", "or", "None", ")", "return", "what" ]
return the refspec for a fetch or pull command .
train
false
24,545
def _check_weights(weights, n_components): weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False) _check_shape(weights, (n_components,), 'weights') if (any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0))): raise ValueError(("The parameter 'weights' should be in the range [0, 1], but got max value %.5f, min value %.5f" % (np.min(weights), np.max(weights)))) if (not np.allclose(np.abs((1.0 - np.sum(weights))), 0.0)): raise ValueError(("The parameter 'weights' should be normalized, but got sum(weights) = %.5f" % np.sum(weights))) return weights
[ "def", "_check_weights", "(", "weights", ",", "n_components", ")", ":", "weights", "=", "check_array", "(", "weights", ",", "dtype", "=", "[", "np", ".", "float64", ",", "np", ".", "float32", "]", ",", "ensure_2d", "=", "False", ")", "_check_shape", "(", "weights", ",", "(", "n_components", ",", ")", ",", "'weights'", ")", "if", "(", "any", "(", "np", ".", "less", "(", "weights", ",", "0.0", ")", ")", "or", "any", "(", "np", ".", "greater", "(", "weights", ",", "1.0", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "\"The parameter 'weights' should be in the range [0, 1], but got max value %.5f, min value %.5f\"", "%", "(", "np", ".", "min", "(", "weights", ")", ",", "np", ".", "max", "(", "weights", ")", ")", ")", ")", "if", "(", "not", "np", ".", "allclose", "(", "np", ".", "abs", "(", "(", "1.0", "-", "np", ".", "sum", "(", "weights", ")", ")", ")", ",", "0.0", ")", ")", ":", "raise", "ValueError", "(", "(", "\"The parameter 'weights' should be normalized, but got sum(weights) = %.5f\"", "%", "np", ".", "sum", "(", "weights", ")", ")", ")", "return", "weights" ]
check to make sure weights are valid .
train
false
24,547
def find_mod(module_name): parts = module_name.split('.') basepath = find_module(parts[0]) for submodname in parts[1:]: basepath = find_module(submodname, [basepath]) if (basepath and os.path.isdir(basepath)): basepath = get_init(basepath) return basepath
[ "def", "find_mod", "(", "module_name", ")", ":", "parts", "=", "module_name", ".", "split", "(", "'.'", ")", "basepath", "=", "find_module", "(", "parts", "[", "0", "]", ")", "for", "submodname", "in", "parts", "[", "1", ":", "]", ":", "basepath", "=", "find_module", "(", "submodname", ",", "[", "basepath", "]", ")", "if", "(", "basepath", "and", "os", ".", "path", ".", "isdir", "(", "basepath", ")", ")", ":", "basepath", "=", "get_init", "(", "basepath", ")", "return", "basepath" ]
find module module_name on sys .
train
true
24,548
@pytest.fixture def host_blocker_stub(stubs): stub = stubs.HostBlockerStub() objreg.register('host-blocker', stub) (yield stub) objreg.delete('host-blocker')
[ "@", "pytest", ".", "fixture", "def", "host_blocker_stub", "(", "stubs", ")", ":", "stub", "=", "stubs", ".", "HostBlockerStub", "(", ")", "objreg", ".", "register", "(", "'host-blocker'", ",", "stub", ")", "(", "yield", "stub", ")", "objreg", ".", "delete", "(", "'host-blocker'", ")" ]
fixture which provides a fake host blocker object .
train
false
24,549
def checkFile(filename): valid = True if ((filename is None) or (not os.path.isfile(filename))): valid = False if valid: try: with open(filename, 'rb'): pass except: valid = False if (not valid): raise PocsuiteSystemException(("unable to read file '%s'" % filename))
[ "def", "checkFile", "(", "filename", ")", ":", "valid", "=", "True", "if", "(", "(", "filename", "is", "None", ")", "or", "(", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ")", ")", ":", "valid", "=", "False", "if", "valid", ":", "try", ":", "with", "open", "(", "filename", ",", "'rb'", ")", ":", "pass", "except", ":", "valid", "=", "False", "if", "(", "not", "valid", ")", ":", "raise", "PocsuiteSystemException", "(", "(", "\"unable to read file '%s'\"", "%", "filename", ")", ")" ]
checks for file existence and readability .
train
false
24,550
def get_trace_component_for_action_execution(action_execution_db, liveaction_db): if (not action_execution_db): raise ValueError('action_execution_db expected.') trace_component = {'id': str(action_execution_db.id), 'ref': str(action_execution_db.action.get('ref', ''))} caused_by = {} parent_context = executions.get_parent_context(liveaction_db=liveaction_db) if (liveaction_db and parent_context): caused_by['type'] = 'action_execution' caused_by['id'] = liveaction_db.context['parent'].get('execution_id', None) elif (action_execution_db.rule and action_execution_db.trigger_instance): caused_by['type'] = 'rule' caused_by['id'] = ('%s:%s' % (action_execution_db.rule['id'], action_execution_db.trigger_instance['id'])) trace_component['caused_by'] = caused_by return trace_component
[ "def", "get_trace_component_for_action_execution", "(", "action_execution_db", ",", "liveaction_db", ")", ":", "if", "(", "not", "action_execution_db", ")", ":", "raise", "ValueError", "(", "'action_execution_db expected.'", ")", "trace_component", "=", "{", "'id'", ":", "str", "(", "action_execution_db", ".", "id", ")", ",", "'ref'", ":", "str", "(", "action_execution_db", ".", "action", ".", "get", "(", "'ref'", ",", "''", ")", ")", "}", "caused_by", "=", "{", "}", "parent_context", "=", "executions", ".", "get_parent_context", "(", "liveaction_db", "=", "liveaction_db", ")", "if", "(", "liveaction_db", "and", "parent_context", ")", ":", "caused_by", "[", "'type'", "]", "=", "'action_execution'", "caused_by", "[", "'id'", "]", "=", "liveaction_db", ".", "context", "[", "'parent'", "]", ".", "get", "(", "'execution_id'", ",", "None", ")", "elif", "(", "action_execution_db", ".", "rule", "and", "action_execution_db", ".", "trigger_instance", ")", ":", "caused_by", "[", "'type'", "]", "=", "'rule'", "caused_by", "[", "'id'", "]", "=", "(", "'%s:%s'", "%", "(", "action_execution_db", ".", "rule", "[", "'id'", "]", ",", "action_execution_db", ".", "trigger_instance", "[", "'id'", "]", ")", ")", "trace_component", "[", "'caused_by'", "]", "=", "caused_by", "return", "trace_component" ]
returns the trace_component compatible dict representation of an actionexecution .
train
false
24,551
def _auto_draw_if_interactive(fig, val): if (val and matplotlib.is_interactive() and (not fig.canvas.is_saving())): fig.canvas.draw_idle()
[ "def", "_auto_draw_if_interactive", "(", "fig", ",", "val", ")", ":", "if", "(", "val", "and", "matplotlib", ".", "is_interactive", "(", ")", "and", "(", "not", "fig", ".", "canvas", ".", "is_saving", "(", ")", ")", ")", ":", "fig", ".", "canvas", ".", "draw_idle", "(", ")" ]
this is an internal helper function for making sure that auto-redrawing works as intended in the plain python repl .
train
false
24,553
def get_suffixes(): extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES] source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] return ((extensions + source) + bytecode)
[ "def", "get_suffixes", "(", ")", ":", "extensions", "=", "[", "(", "s", ",", "'rb'", ",", "C_EXTENSION", ")", "for", "s", "in", "machinery", ".", "EXTENSION_SUFFIXES", "]", "source", "=", "[", "(", "s", ",", "'r'", ",", "PY_SOURCE", ")", "for", "s", "in", "machinery", ".", "SOURCE_SUFFIXES", "]", "bytecode", "=", "[", "(", "s", ",", "'rb'", ",", "PY_COMPILED", ")", "for", "s", "in", "machinery", ".", "BYTECODE_SUFFIXES", "]", "return", "(", "(", "extensions", "+", "source", ")", "+", "bytecode", ")" ]
return a list that describes the files that find_module() looks for .
train
false
24,557
def commit(): connection._commit() set_clean()
[ "def", "commit", "(", ")", ":", "connection", ".", "_commit", "(", ")", "set_clean", "(", ")" ]
does the commit itself and resets the dirty flag .
train
false
24,558
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None): node = nodes.reference('', '', internal=True) if (fromdocname == todocname): node['refid'] = targetid else: node['refuri'] = ((builder.get_relative_uri(fromdocname, todocname) + '#') + targetid) if title: node['reftitle'] = title node.append(child) return node
[ "def", "make_refnode", "(", "builder", ",", "fromdocname", ",", "todocname", ",", "targetid", ",", "child", ",", "title", "=", "None", ")", ":", "node", "=", "nodes", ".", "reference", "(", "''", ",", "''", ",", "internal", "=", "True", ")", "if", "(", "fromdocname", "==", "todocname", ")", ":", "node", "[", "'refid'", "]", "=", "targetid", "else", ":", "node", "[", "'refuri'", "]", "=", "(", "(", "builder", ".", "get_relative_uri", "(", "fromdocname", ",", "todocname", ")", "+", "'#'", ")", "+", "targetid", ")", "if", "title", ":", "node", "[", "'reftitle'", "]", "=", "title", "node", ".", "append", "(", "child", ")", "return", "node" ]
shortcut to create a reference node .
train
false
24,559
def prepare(image, size=(224, 224)): if (not available): raise ImportError(('PIL cannot be loaded. Install Pillow!\nThe actual import error is as follows:\n' + str(_import_error))) if isinstance(image, numpy.ndarray): if (image.ndim == 3): if (image.shape[0] == 1): image = image[0, :, :] elif (image.shape[0] == 3): image = image.transpose((1, 2, 0)) image = Image.fromarray(image.astype(numpy.uint8)) image = image.convert('RGB') if size: image = image.resize(size) image = numpy.asarray(image, dtype=numpy.float32) image = image[:, :, ::(-1)] image -= numpy.array([103.939, 116.779, 123.68], dtype=numpy.float32) image = image.transpose((2, 0, 1)) return image
[ "def", "prepare", "(", "image", ",", "size", "=", "(", "224", ",", "224", ")", ")", ":", "if", "(", "not", "available", ")", ":", "raise", "ImportError", "(", "(", "'PIL cannot be loaded. Install Pillow!\\nThe actual import error is as follows:\\n'", "+", "str", "(", "_import_error", ")", ")", ")", "if", "isinstance", "(", "image", ",", "numpy", ".", "ndarray", ")", ":", "if", "(", "image", ".", "ndim", "==", "3", ")", ":", "if", "(", "image", ".", "shape", "[", "0", "]", "==", "1", ")", ":", "image", "=", "image", "[", "0", ",", ":", ",", ":", "]", "elif", "(", "image", ".", "shape", "[", "0", "]", "==", "3", ")", ":", "image", "=", "image", ".", "transpose", "(", "(", "1", ",", "2", ",", "0", ")", ")", "image", "=", "Image", ".", "fromarray", "(", "image", ".", "astype", "(", "numpy", ".", "uint8", ")", ")", "image", "=", "image", ".", "convert", "(", "'RGB'", ")", "if", "size", ":", "image", "=", "image", ".", "resize", "(", "size", ")", "image", "=", "numpy", ".", "asarray", "(", "image", ",", "dtype", "=", "numpy", ".", "float32", ")", "image", "=", "image", "[", ":", ",", ":", ",", ":", ":", "(", "-", "1", ")", "]", "image", "-=", "numpy", ".", "array", "(", "[", "103.939", ",", "116.779", ",", "123.68", "]", ",", "dtype", "=", "numpy", ".", "float32", ")", "image", "=", "image", ".", "transpose", "(", "(", "2", ",", "0", ",", "1", ")", ")", "return", "image" ]
try to get current process ready to unpickle process object .
train
false
24,560
def overlapsForUnrelatedAreas(n, w, radius, repetitions=100, verbose=False): return overlapsForRelativeAreas(n, w, np.array([0, 0]), radius, dPosition=np.array([0, (radius * 10)]), num=repetitions, verbose=verbose)
[ "def", "overlapsForUnrelatedAreas", "(", "n", ",", "w", ",", "radius", ",", "repetitions", "=", "100", ",", "verbose", "=", "False", ")", ":", "return", "overlapsForRelativeAreas", "(", "n", ",", "w", ",", "np", ".", "array", "(", "[", "0", ",", "0", "]", ")", ",", "radius", ",", "dPosition", "=", "np", ".", "array", "(", "[", "0", ",", "(", "radius", "*", "10", ")", "]", ")", ",", "num", "=", "repetitions", ",", "verbose", "=", "verbose", ")" ]
return overlaps between an encoding and other .
train
false
24,561
def get_gmond_format(val): tp = type(val).__name__ if (tp == 'int'): return ('uint', '%u') elif (tp == 'float'): return ('float', '%.4f') elif (tp == 'string'): return ('string', '%u') else: return ('string', '%u')
[ "def", "get_gmond_format", "(", "val", ")", ":", "tp", "=", "type", "(", "val", ")", ".", "__name__", "if", "(", "tp", "==", "'int'", ")", ":", "return", "(", "'uint'", ",", "'%u'", ")", "elif", "(", "tp", "==", "'float'", ")", ":", "return", "(", "'float'", ",", "'%.4f'", ")", "elif", "(", "tp", "==", "'string'", ")", ":", "return", "(", "'string'", ",", "'%u'", ")", "else", ":", "return", "(", "'string'", ",", "'%u'", ")" ]
return the formatting and value_type values to use with gmond .
train
false
24,562
def bump_product_signal_handler(sender, instance, **kwargs): bump_cache_for_product(instance)
[ "def", "bump_product_signal_handler", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "bump_cache_for_product", "(", "instance", ")" ]
signal handler for clearing product cache .
train
false
24,564
def ParseNolintSuppressions(filename, raw_line, linenum, error): matched = Search('\\bNOLINT(NEXTLINE)?\\b(\\([^)]+\\))?', raw_line) if matched: if matched.group(1): suppressed_line = (linenum + 1) else: suppressed_line = linenum category = matched.group(2) if (category in (None, '(*)')): _error_suppressions.setdefault(None, set()).add(suppressed_line) elif (category.startswith('(') and category.endswith(')')): category = category[1:(-1)] if (category in _ERROR_CATEGORIES): _error_suppressions.setdefault(category, set()).add(suppressed_line) elif (category not in _LEGACY_ERROR_CATEGORIES): error(filename, linenum, 'readability/nolint', 5, ('Unknown NOLINT error category: %s' % category))
[ "def", "ParseNolintSuppressions", "(", "filename", ",", "raw_line", ",", "linenum", ",", "error", ")", ":", "matched", "=", "Search", "(", "'\\\\bNOLINT(NEXTLINE)?\\\\b(\\\\([^)]+\\\\))?'", ",", "raw_line", ")", "if", "matched", ":", "if", "matched", ".", "group", "(", "1", ")", ":", "suppressed_line", "=", "(", "linenum", "+", "1", ")", "else", ":", "suppressed_line", "=", "linenum", "category", "=", "matched", ".", "group", "(", "2", ")", "if", "(", "category", "in", "(", "None", ",", "'(*)'", ")", ")", ":", "_error_suppressions", ".", "setdefault", "(", "None", ",", "set", "(", ")", ")", ".", "add", "(", "suppressed_line", ")", "elif", "(", "category", ".", "startswith", "(", "'('", ")", "and", "category", ".", "endswith", "(", "')'", ")", ")", ":", "category", "=", "category", "[", "1", ":", "(", "-", "1", ")", "]", "if", "(", "category", "in", "_ERROR_CATEGORIES", ")", ":", "_error_suppressions", ".", "setdefault", "(", "category", ",", "set", "(", ")", ")", ".", "add", "(", "suppressed_line", ")", "elif", "(", "category", "not", "in", "_LEGACY_ERROR_CATEGORIES", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/nolint'", ",", "5", ",", "(", "'Unknown NOLINT error category: %s'", "%", "category", ")", ")" ]
updates the global list of line error-suppressions .
train
true
24,567
def network_get_all_by_uuids(context, network_uuids, project_only='allow_none'): return IMPL.network_get_all_by_uuids(context, network_uuids, project_only=project_only)
[ "def", "network_get_all_by_uuids", "(", "context", ",", "network_uuids", ",", "project_only", "=", "'allow_none'", ")", ":", "return", "IMPL", ".", "network_get_all_by_uuids", "(", "context", ",", "network_uuids", ",", "project_only", "=", "project_only", ")" ]
return networks by ids .
train
false
24,568
def is_in_section_title(node): from sphinx.util.nodes import traverse_parent for ancestor in traverse_parent(node): if (isinstance(ancestor, nodes.title) and isinstance(ancestor.parent, nodes.section)): return True return False
[ "def", "is_in_section_title", "(", "node", ")", ":", "from", "sphinx", ".", "util", ".", "nodes", "import", "traverse_parent", "for", "ancestor", "in", "traverse_parent", "(", "node", ")", ":", "if", "(", "isinstance", "(", "ancestor", ",", "nodes", ".", "title", ")", "and", "isinstance", "(", "ancestor", ".", "parent", ",", "nodes", ".", "section", ")", ")", ":", "return", "True", "return", "False" ]
determine whether the node is in a section title .
train
false
24,569
def get_inner_html_from_xpath(xpath_node): html = etree.tostring(xpath_node).strip() inner_html = re.sub(('(?ms)<%s[^>]*>(.*)</%s>' % (xpath_node.tag, xpath_node.tag)), '\\1', html) return inner_html.strip()
[ "def", "get_inner_html_from_xpath", "(", "xpath_node", ")", ":", "html", "=", "etree", ".", "tostring", "(", "xpath_node", ")", ".", "strip", "(", ")", "inner_html", "=", "re", ".", "sub", "(", "(", "'(?ms)<%s[^>]*>(.*)</%s>'", "%", "(", "xpath_node", ".", "tag", ",", "xpath_node", ".", "tag", ")", ")", ",", "'\\\\1'", ",", "html", ")", "return", "inner_html", ".", "strip", "(", ")" ]
returns inner html as string from xpath node .
train
false
24,570
def not_found(*args, **kwargs): raise falcon.HTTPNotFound()
[ "def", "not_found", "(", "*", "args", ",", "**", "kwargs", ")", ":", "raise", "falcon", ".", "HTTPNotFound", "(", ")" ]
called if no url matches .
train
false
24,571
def getSheetThickness(elementNode): return getCascadeFloatWithoutSelf(3.0, elementNode, 'sheetThickness')
[ "def", "getSheetThickness", "(", "elementNode", ")", ":", "return", "getCascadeFloatWithoutSelf", "(", "3.0", ",", "elementNode", ",", "'sheetThickness'", ")" ]
get the sheet thickness .
train
false
24,572
def _orbits(degree, generators): seen = set() orbs = [] sorted_I = list(range(degree)) I = set(sorted_I) while I: i = sorted_I[0] orb = _orbit(degree, generators, i) orbs.append(orb) I -= orb sorted_I = [i for i in sorted_I if (i not in orb)] return orbs
[ "def", "_orbits", "(", "degree", ",", "generators", ")", ":", "seen", "=", "set", "(", ")", "orbs", "=", "[", "]", "sorted_I", "=", "list", "(", "range", "(", "degree", ")", ")", "I", "=", "set", "(", "sorted_I", ")", "while", "I", ":", "i", "=", "sorted_I", "[", "0", "]", "orb", "=", "_orbit", "(", "degree", ",", "generators", ",", "i", ")", "orbs", ".", "append", "(", "orb", ")", "I", "-=", "orb", "sorted_I", "=", "[", "i", "for", "i", "in", "sorted_I", "if", "(", "i", "not", "in", "orb", ")", "]", "return", "orbs" ]
compute the orbits of g .
train
false
24,573
def escape_silent(s): if (s is None): return Markup() return escape(s)
[ "def", "escape_silent", "(", "s", ")", ":", "if", "(", "s", "is", "None", ")", ":", "return", "Markup", "(", ")", "return", "escape", "(", "s", ")" ]
like :func:escape but converts none into an empty markup string .
train
false