id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
28,915
@task(base=BaseInstructorTask) def exec_summary_report_csv(entry_id, xmodule_instance_args): action_name = 'generating_exec_summary_report' task_fn = partial(upload_exec_summary_report, xmodule_instance_args) return run_main_task(entry_id, task_fn, action_name)
[ "@", "task", "(", "base", "=", "BaseInstructorTask", ")", "def", "exec_summary_report_csv", "(", "entry_id", ",", "xmodule_instance_args", ")", ":", "action_name", "=", "'generating_exec_summary_report'", "task_fn", "=", "partial", "(", "upload_exec_summary_report", ","...
compute executive summary report for a course and upload the html generated report to an s3 bucket for download .
train
false
28,916
def _bytes_to_unicode(value): result = (value.decode('utf-8') if isinstance(value, six.binary_type) else value) if isinstance(result, six.text_type): return result else: raise ValueError(('%r could not be converted to unicode' % (value,)))
[ "def", "_bytes_to_unicode", "(", "value", ")", ":", "result", "=", "(", "value", ".", "decode", "(", "'utf-8'", ")", "if", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", "else", "value", ")", "if", "isinstance", "(", "result", ",", "s...
converts bytes to a unicode value .
train
true
28,917
def _log10_lb(c, correction={'1': 100, '2': 70, '3': 53, '4': 40, '5': 31, '6': 23, '7': 16, '8': 10, '9': 5}): if (c <= 0): raise ValueError('The argument to _log10_lb should be nonnegative.') str_c = str(c) return ((100 * len(str_c)) - correction[str_c[0]])
[ "def", "_log10_lb", "(", "c", ",", "correction", "=", "{", "'1'", ":", "100", ",", "'2'", ":", "70", ",", "'3'", ":", "53", ",", "'4'", ":", "40", ",", "'5'", ":", "31", ",", "'6'", ":", "23", ",", "'7'", ":", "16", ",", "'8'", ":", "10", ...
compute a lower bound for 100*log10(c) for a positive integer c .
train
false
28,918
def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0): srctext = srctext.split() trgtext = trgtext.split() srclen = len(srctext) trglen = len(trgtext) f_aligned = [j for (_, j) in alignment] max_phrase_length = (max_phrase_length or max(srclen, trglen)) bp = set() for e_start in range(srclen): max_idx = min(srclen, (e_start + max_phrase_length)) for e_end in range(e_start, max_idx): (f_start, f_end) = ((trglen - 1), (-1)) for (e, f) in alignment: if (e_start <= e <= e_end): f_start = min(f, f_start) f_end = max(f, f_end) phrases = extract(f_start, f_end, e_start, e_end, alignment, f_aligned, srctext, trgtext, srclen, trglen, max_phrase_length) if phrases: bp.update(phrases) return bp
[ "def", "phrase_extraction", "(", "srctext", ",", "trgtext", ",", "alignment", ",", "max_phrase_length", "=", "0", ")", ":", "srctext", "=", "srctext", ".", "split", "(", ")", "trgtext", "=", "trgtext", ".", "split", "(", ")", "srclen", "=", "len", "(", ...
phrase extraction algorithm extracts all consistent phrase pairs from a word-aligned sentence pair .
train
false
28,919
def print_containers_per_group(inventory): required_list = ['groups', 'container_name'] table = prettytable.PrettyTable(required_list) for group_name in inventory.keys(): containers = get_containers_for_group(inventory, group_name) if ((containers is None) or (len(containers) < 1)): continue if ((len(containers) == 1) and ('_' not in containers[0])): continue row = [group_name, '\n'.join(containers)] table.add_row(row) for tbl in table.align.keys(): table.align[tbl] = 'l' return table
[ "def", "print_containers_per_group", "(", "inventory", ")", ":", "required_list", "=", "[", "'groups'", ",", "'container_name'", "]", "table", "=", "prettytable", ".", "PrettyTable", "(", "required_list", ")", "for", "group_name", "in", "inventory", ".", "keys", ...
return a table of groups and the containers in each group .
train
false
28,922
def languages(): print_available_languages()
[ "def", "languages", "(", ")", ":", "print_available_languages", "(", ")" ]
returns a list of language codes for the given region code .
train
false
28,923
def zmq_pub_mon(endpoints, counter): ctx = zmq.Context.instance() poller = zmq.Poller() for (name, uri) in endpoints.iteritems(): logging.info('Registering %s (%s).', name, uri) sock = ctx.socket(zmq.SUB) sock.setsockopt(zmq.IDENTITY, name) sock.connect(uri) sock.setsockopt(zmq.SUBSCRIBE, '') poller.register(sock, zmq.POLLIN) while 1: try: for (socket, _) in poller.poll(): socket.recv(zmq.NOBLOCK) name = socket.getsockopt(zmq.IDENTITY) counter[name] += 1 except zmq.ZMQError as e: if (e.errno == errno.EINTR): continue raise
[ "def", "zmq_pub_mon", "(", "endpoints", ",", "counter", ")", ":", "ctx", "=", "zmq", ".", "Context", ".", "instance", "(", ")", "poller", "=", "zmq", ".", "Poller", "(", ")", "for", "(", "name", ",", "uri", ")", "in", "endpoints", ".", "iteritems", ...
measure throughput of zeromq publishers .
train
false
28,925
def dlimport_workdir(basedir): return tempfile.mkdtemp(dir=basedir)
[ "def", "dlimport_workdir", "(", "basedir", ")", ":", "return", "tempfile", ".", "mkdtemp", "(", "dir", "=", "basedir", ")" ]
return a directory where you should put your .
train
false
28,926
def get_xem_numbering_for_show(indexer_id, indexer): if (indexer_id is None): return {} indexer_id = int(indexer_id) indexer = int(indexer) xem_refresh(indexer_id, indexer) result = {} for dbData in [x[u'doc'] for x in sickrage.srCore.mainDB.db.get_many(u'tv_episodes', indexer_id, with_doc=True)]: season = int((dbData[u'season'] or 0)) episode = int((dbData[u'episode'] or 0)) scene_season = int((dbData[u'scene_season'] or 0)) scene_episode = int((dbData[u'scene_episode'] or 0)) if ((int(dbData[u'indexer']) != indexer) or ((scene_season or scene_episode) == 0)): continue result[(season, episode)] = (scene_season, scene_episode) return result
[ "def", "get_xem_numbering_for_show", "(", "indexer_id", ",", "indexer", ")", ":", "if", "(", "indexer_id", "is", "None", ")", ":", "return", "{", "}", "indexer_id", "=", "int", "(", "indexer_id", ")", "indexer", "=", "int", "(", "indexer", ")", "xem_refres...
returns a dict of : mappings for an entire show .
train
false
28,927
def fill_queue(queue_fill, any_list): for elem in any_list: queue_fill.put(elem)
[ "def", "fill_queue", "(", "queue_fill", ",", "any_list", ")", ":", "for", "elem", "in", "any_list", ":", "queue_fill", ".", "put", "(", "elem", ")" ]
takes element from a list and populates a queue with those elements .
train
false
28,928
def bisect_right(a, x, lo=0, hi=None): if (lo < 0): raise ValueError('lo must be non-negative') if (hi is None): hi = len(a) while (lo < hi): mid = ((lo + hi) // 2) if (x < a[mid]): hi = mid else: lo = (mid + 1) return lo
[ "def", "bisect_right", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "(", "lo", "<", "0", ")", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "(", "hi", "is", "None", ")", ":", "hi", ...
return the index where to insert item x in list a .
train
true
28,929
def quaternion_conjugate(quaternion): q = numpy.array(quaternion, dtype=numpy.float64, copy=True) numpy.negative(q[1:], q[1:]) return q
[ "def", "quaternion_conjugate", "(", "quaternion", ")", ":", "q", "=", "numpy", ".", "array", "(", "quaternion", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "True", ")", "numpy", ".", "negative", "(", "q", "[", "1", ":", "]", ",", ...
return conjugate of quaternion .
train
true
28,930
def set_master(service, conn_type, private='y', unpriv='y', chroot='y', wakeup='n', maxproc='100', command='', write_conf=True, path=MASTER_CF): (conf_dict, conf_list) = _parse_master(path) new_conf = [] dict_key = '{0} {1}'.format(service, conn_type) new_line = _format_master(service, conn_type, private, unpriv, chroot, wakeup, maxproc, command) for line in conf_list: if isinstance(line, dict): if ((line['service'] == service) and (line['conn_type'] == conn_type)): new_conf.append(new_line) else: new_conf.append(_format_master(**line)) else: new_conf.append(line) if (dict_key not in conf_dict): new_conf.append(new_line) if write_conf: _write_conf(new_conf, path) return '\n'.join(new_conf)
[ "def", "set_master", "(", "service", ",", "conn_type", ",", "private", "=", "'y'", ",", "unpriv", "=", "'y'", ",", "chroot", "=", "'y'", ",", "wakeup", "=", "'n'", ",", "maxproc", "=", "'100'", ",", "command", "=", "''", ",", "write_conf", "=", "True...
set a single config value in the master .
train
true
28,931
def _called_in_methods(func, klass, methods): if (not isinstance(func, astroid.Function)): return False for method in methods: try: infered = klass.getattr(method) except astroid.NotFoundError: continue for infer_method in infered: for callfunc in infer_method.nodes_of_class(astroid.CallFunc): try: bound = next(callfunc.func.infer()) except (astroid.InferenceError, StopIteration): continue if (not isinstance(bound, astroid.BoundMethod)): continue func_obj = bound._proxied if isinstance(func_obj, astroid.UnboundMethod): func_obj = func_obj._proxied if (func_obj.name == func.name): return True return False
[ "def", "_called_in_methods", "(", "func", ",", "klass", ",", "methods", ")", ":", "if", "(", "not", "isinstance", "(", "func", ",", "astroid", ".", "Function", ")", ")", ":", "return", "False", "for", "method", "in", "methods", ":", "try", ":", "infere...
check if the func was called in any of the given methods .
train
true
28,933
def new_unfollow(self, user_id, user_name): url_unfollow = (self.url_unfollow % user_id) try: unfollow = self.s.post(url_unfollow) if (unfollow.status_code == 200): self.unfollow_counter += 1 log_string = ('Unfollow: %s #%i.' % (user_name, self.unfollow_counter)) self.write_log(log_string) return unfollow except: self.write_log('Exept on unfollow!') return False
[ "def", "new_unfollow", "(", "self", ",", "user_id", ",", "user_name", ")", ":", "url_unfollow", "=", "(", "self", ".", "url_unfollow", "%", "user_id", ")", "try", ":", "unfollow", "=", "self", ".", "s", ".", "post", "(", "url_unfollow", ")", "if", "(",...
send http request to unfollow .
train
false
28,934
def _create_atom_id(resource_path, authority_name=None, date_string=None): if (authority_name is None): authority_name = config.get('ckan.feeds.authority_name', '').strip() if (not authority_name): site_url = config.get('ckan.site_url', '').strip() authority_name = urlparse.urlparse(site_url).netloc if (not authority_name): log.warning('No authority_name available for feed generation. Generated feed will be invalid.') if (date_string is None): date_string = config.get('ckan.feeds.date', '') if (not date_string): log.warning('No date_string available for feed generation. Please set the "ckan.feeds.date" config value.') site_url = config.get('ckan.site_url', '') return '/'.join([site_url, resource_path]) tagging_entity = ','.join([authority_name, date_string]) return ':'.join(['tag', tagging_entity, resource_path])
[ "def", "_create_atom_id", "(", "resource_path", ",", "authority_name", "=", "None", ",", "date_string", "=", "None", ")", ":", "if", "(", "authority_name", "is", "None", ")", ":", "authority_name", "=", "config", ".", "get", "(", "'ckan.feeds.authority_name'", ...
helper method that creates an atom id for a feed or entry .
train
false
28,936
def test_estimate_rank(): data = np.eye(10) assert_array_equal(estimate_rank(data, return_singular=True)[1], np.ones(10)) data[(0, 0)] = 0 assert_equal(estimate_rank(data), 9) assert_raises(ValueError, estimate_rank, data, 'foo')
[ "def", "test_estimate_rank", "(", ")", ":", "data", "=", "np", ".", "eye", "(", "10", ")", "assert_array_equal", "(", "estimate_rank", "(", "data", ",", "return_singular", "=", "True", ")", "[", "1", "]", ",", "np", ".", "ones", "(", "10", ")", ")", ...
test rank estimation .
train
false
28,937
def parse_soap_enveloped_saml_thingy(text, expected_tags): envelope = ElementTree.fromstring(text) assert (envelope.tag == ('{%s}Envelope' % soapenv.NAMESPACE)) assert (len(envelope) >= 1) body = None for part in envelope: if (part.tag == ('{%s}Body' % soapenv.NAMESPACE)): assert (len(part) == 1) body = part break if (body is None): return '' saml_part = body[0] if (saml_part.tag in expected_tags): return ElementTree.tostring(saml_part, encoding='UTF-8') else: raise WrongMessageType(("Was '%s' expected one of %s" % (saml_part.tag, expected_tags)))
[ "def", "parse_soap_enveloped_saml_thingy", "(", "text", ",", "expected_tags", ")", ":", "envelope", "=", "ElementTree", ".", "fromstring", "(", "text", ")", "assert", "(", "envelope", ".", "tag", "==", "(", "'{%s}Envelope'", "%", "soapenv", ".", "NAMESPACE", "...
parses a soap enveloped saml thing and returns the thing as a string .
train
true
28,938
def sublist_swap(lst, a, b, m): for i in xrange(m): (lst[(a + i)], lst[(b + i)]) = (lst[(b + i)], lst[(a + i)])
[ "def", "sublist_swap", "(", "lst", ",", "a", ",", "b", ",", "m", ")", ":", "for", "i", "in", "xrange", "(", "m", ")", ":", "(", "lst", "[", "(", "a", "+", "i", ")", "]", ",", "lst", "[", "(", "b", "+", "i", ")", "]", ")", "=", "(", "l...
swaps the elements: lst[a:a+m) with lst[b:b+m) without using extra space .
train
false
28,940
def trigger(registry, xml_parent, data): tconfig = XML.SubElement(xml_parent, 'hudson.tasks.BuildTrigger') childProjects = XML.SubElement(tconfig, 'childProjects') childProjects.text = data['project'] tthreshold = XML.SubElement(tconfig, 'threshold') threshold = data.get('threshold', 'SUCCESS') supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE'] if (threshold not in supported_thresholds): raise JenkinsJobsException(('threshold must be one of %s' % ', '.join(supported_thresholds))) tname = XML.SubElement(tthreshold, 'name') tname.text = hudson_model.THRESHOLDS[threshold]['name'] tordinal = XML.SubElement(tthreshold, 'ordinal') tordinal.text = hudson_model.THRESHOLDS[threshold]['ordinal'] tcolor = XML.SubElement(tthreshold, 'color') tcolor.text = hudson_model.THRESHOLDS[threshold]['color']
[ "def", "trigger", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "tconfig", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.tasks.BuildTrigger'", ")", "childProjects", "=", "XML", ".", "SubElement", "(", "tconfig", ",", "'childProje...
trigger a maker ifttt recipe .
train
false
28,941
def lookupNull(name, timeout=None): return getResolver().lookupNull(name, timeout)
[ "def", "lookupNull", "(", "name", ",", "timeout", "=", "None", ")", ":", "return", "getResolver", "(", ")", ".", "lookupNull", "(", "name", ",", "timeout", ")" ]
perform a null record lookup .
train
false
28,944
def strip_raw_ansi(string, parser=ANSI_PARSER): return parser.strip_raw_codes(string)
[ "def", "strip_raw_ansi", "(", "string", ",", "parser", "=", "ANSI_PARSER", ")", ":", "return", "parser", ".", "strip_raw_codes", "(", "string", ")" ]
remove raw ansi codes from string .
train
false
28,945
def setup_bridge(bridge, add_devices_callback): lights = {} @util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS) def update_lights(): 'Update the lights objects with latest info from bridge.' bridge.update_all_light_status() new_lights = [] for (light_id, light) in bridge.lights().items(): if (light_id not in lights): osram_light = OsramLightifyLight(light_id, light, update_lights) lights[light_id] = osram_light new_lights.append(osram_light) else: lights[light_id].light = light if new_lights: add_devices_callback(new_lights) update_lights()
[ "def", "setup_bridge", "(", "bridge", ",", "add_devices_callback", ")", ":", "lights", "=", "{", "}", "@", "util", ".", "Throttle", "(", "MIN_TIME_BETWEEN_SCANS", ",", "MIN_TIME_BETWEEN_FORCED_SCANS", ")", "def", "update_lights", "(", ")", ":", "bridge", ".", ...
setup a wemo link .
train
false
28,946
def _strip_listing_to_done(output_list): return [line for line in output_list if _safe_output(line)]
[ "def", "_strip_listing_to_done", "(", "output_list", ")", ":", "return", "[", "line", "for", "line", "in", "output_list", "if", "_safe_output", "(", "line", ")", "]" ]
conditionally remove non-relevant first and last line .
train
false
28,948
def normal_order(expr, recursive_limit=10, _recursive_depth=0): if (_recursive_depth > recursive_limit): warnings.warn('Too many recursions, aborting') return expr if isinstance(expr, Add): return _normal_order_terms(expr, recursive_limit=recursive_limit, _recursive_depth=_recursive_depth) elif isinstance(expr, Mul): return _normal_order_factor(expr, recursive_limit=recursive_limit, _recursive_depth=_recursive_depth) else: return expr
[ "def", "normal_order", "(", "expr", ",", "recursive_limit", "=", "10", ",", "_recursive_depth", "=", "0", ")", ":", "if", "(", "_recursive_depth", ">", "recursive_limit", ")", ":", "warnings", ".", "warn", "(", "'Too many recursions, aborting'", ")", "return", ...
normal order an expression with bosonic or fermionic operators .
train
false
28,949
def patch_os(): patch_module('os')
[ "def", "patch_os", "(", ")", ":", "patch_module", "(", "'os'", ")" ]
replace :func:os .
train
false
28,950
def print_results(distributions, list_all_files): results_printed = False for dist in distributions: results_printed = True logger.info('---') logger.info('Metadata-Version: %s', dist.get('metadata-version')) logger.info('Name: %s', dist['name']) logger.info('Version: %s', dist['version']) logger.info('Summary: %s', dist.get('summary')) logger.info('Home-page: %s', dist.get('home-page')) logger.info('Author: %s', dist.get('author')) logger.info('Author-email: %s', dist.get('author-email')) if (dist['installer'] is not None): logger.info('Installer: %s', dist['installer']) logger.info('License: %s', dist.get('license')) logger.info('Location: %s', dist['location']) logger.info('Requires: %s', ', '.join(dist['requires'])) logger.info('Classifiers:') for classifier in dist['classifiers']: logger.info(' %s', classifier) if list_all_files: logger.info('Files:') if ('files' in dist): for line in dist['files']: logger.info(' %s', line.strip()) else: logger.info('Cannot locate installed-files.txt') if ('entry_points' in dist): logger.info('Entry-points:') for line in dist['entry_points']: logger.info(' %s', line.strip()) return results_printed
[ "def", "print_results", "(", "distributions", ",", "list_all_files", ")", ":", "results_printed", "=", "False", "for", "dist", "in", "distributions", ":", "results_printed", "=", "True", "logger", ".", "info", "(", "'---'", ")", "logger", ".", "info", "(", "...
print the query results by requesting a page at a time .
train
true
28,952
def assert_warnings(fn, warning_msgs, regex=False): with assertions._expect_warnings(sa_exc.SAWarning, warning_msgs, regex=regex): return fn()
[ "def", "assert_warnings", "(", "fn", ",", "warning_msgs", ",", "regex", "=", "False", ")", ":", "with", "assertions", ".", "_expect_warnings", "(", "sa_exc", ".", "SAWarning", ",", "warning_msgs", ",", "regex", "=", "regex", ")", ":", "return", "fn", "(", ...
assert that each of the given warnings are emitted by fn .
train
false
28,953
def has_permission(permission, context, request): return request.has_permission(permission, context)
[ "def", "has_permission", "(", "permission", ",", "context", ",", "request", ")", ":", "return", "request", ".", "has_permission", "(", "permission", ",", "context", ")" ]
check if the object has a permission args: obj_name : the name of or path to the object .
train
false
28,954
def cgsnapshot_creating_from_src(): return IMPL.cgsnapshot_creating_from_src()
[ "def", "cgsnapshot_creating_from_src", "(", ")", ":", "return", "IMPL", ".", "cgsnapshot_creating_from_src", "(", ")" ]
get a filter that checks if a cgsnapshot is being created from a cg .
train
false
28,956
def touched(dst): import warnings warnings.warn('macostools.touched() has been deprecated', DeprecationWarning, 2)
[ "def", "touched", "(", "dst", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "'macostools.touched() has been deprecated'", ",", "DeprecationWarning", ",", "2", ")" ]
tell the finder a file has changed .
train
false
28,957
def _llvm_jit_code(args, expr, signature, callback_type): if (callback_type is None): jit = LLVMJitCode(signature) else: jit = LLVMJitCodeCallback(signature) jit._create_args(args) jit._create_function_base() jit._create_param_dict(args) strmod = jit._create_function(expr) if False: print('LLVM IR') print(strmod) fptr = jit._compile_function(strmod) return fptr
[ "def", "_llvm_jit_code", "(", "args", ",", "expr", ",", "signature", ",", "callback_type", ")", ":", "if", "(", "callback_type", "is", "None", ")", ":", "jit", "=", "LLVMJitCode", "(", "signature", ")", "else", ":", "jit", "=", "LLVMJitCodeCallback", "(", ...
create a native code function from a sympy expression .
train
false
28,958
def _write_incron_lines(user, lines): if (user == 'system'): ret = {} ret['retcode'] = _write_file(_INCRON_SYSTEM_TAB, 'salt', ''.join(lines)) return ret else: path = salt.utils.files.mkstemp() with salt.utils.fopen(path, 'w+') as fp_: fp_.writelines(lines) if ((__grains__['os_family'] == 'Solaris') and (user != 'root')): __salt__['cmd.run']('chown {0} {1}'.format(user, path), python_shell=False) ret = __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False) os.remove(path) return ret
[ "def", "_write_incron_lines", "(", "user", ",", "lines", ")", ":", "if", "(", "user", "==", "'system'", ")", ":", "ret", "=", "{", "}", "ret", "[", "'retcode'", "]", "=", "_write_file", "(", "_INCRON_SYSTEM_TAB", ",", "'salt'", ",", "''", ".", "join", ...
takes a list of lines to be committed to a users incrontab and writes it .
train
true
28,961
def reset(): _runtime.reset()
[ "def", "reset", "(", ")", ":", "_runtime", ".", "reset", "(", ")" ]
to reset a vm using its name cli example: .
train
false
28,963
def unicode_argv(): args = [] for arg in sys.argv: if isinstance(arg, bytes): arg = arg.decode(sys.getfilesystemencoding()) args.append(arg) return args
[ "def", "unicode_argv", "(", ")", ":", "args", "=", "[", "]", "for", "arg", "in", "sys", ".", "argv", ":", "if", "isinstance", "(", "arg", ",", "bytes", ")", ":", "arg", "=", "arg", ".", "decode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ...
like sys .
train
false
28,964
def mesh_dist(tris, vert): edges = mesh_edges(tris).tocoo() dist = np.sqrt(np.sum(((vert[edges.row, :] - vert[edges.col, :]) ** 2), axis=1)) dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape) return dist_matrix
[ "def", "mesh_dist", "(", "tris", ",", "vert", ")", ":", "edges", "=", "mesh_edges", "(", "tris", ")", ".", "tocoo", "(", ")", "dist", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "(", "vert", "[", "edges", ".", "row", ",", ":", "]...
compute adjacency matrix weighted by distances .
train
false
28,965
def update_limits(limits_dict): limits = get_limits() limits.update(limits_dict) update_site_config(u'limits', limits, validate=False) disable_users(limits) frappe.local.conf.limits = limits
[ "def", "update_limits", "(", "limits_dict", ")", ":", "limits", "=", "get_limits", "(", ")", "limits", ".", "update", "(", "limits_dict", ")", "update_site_config", "(", "u'limits'", ",", "limits", ",", "validate", "=", "False", ")", "disable_users", "(", "l...
add/update limit in site_config .
train
false
28,966
def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend): _lib = backend._lib _ffi = backend._ffi group = _lib.EC_KEY_get0_group(ec_key_cdata) with backend._tmp_bn_ctx() as bn_ctx: order = _lib.BN_CTX_get(bn_ctx) assert (order != _ffi.NULL) res = _lib.EC_GROUP_get_order(group, order, bn_ctx) assert (res == 1) order_bits = _lib.BN_num_bits(order) return _truncate_digest(digest, order_bits)
[ "def", "_truncate_digest_for_ecdsa", "(", "ec_key_cdata", ",", "digest", ",", "backend", ")", ":", "_lib", "=", "backend", ".", "_lib", "_ffi", "=", "backend", ".", "_ffi", "group", "=", "_lib", ".", "EC_KEY_get0_group", "(", "ec_key_cdata", ")", "with", "ba...
this function truncates digests that are longer than a given elliptic curve keys length so they can be signed .
train
false
28,967
def course_grading_policy(course_key): course = _retrieve_course(course_key) return GradingPolicySerializer(course.raw_grader, many=True).data
[ "def", "course_grading_policy", "(", "course_key", ")", ":", "course", "=", "_retrieve_course", "(", "course_key", ")", "return", "GradingPolicySerializer", "(", "course", ".", "raw_grader", ",", "many", "=", "True", ")", ".", "data" ]
retrieves the course grading policy .
train
false
28,968
def coordinator_setup(): url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = ('%s_%d' % (proc_info['hostname'], proc_info['pid'])) if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: coordinator = NoOpDriver() coordinator.start() return coordinator
[ "def", "coordinator_setup", "(", ")", ":", "url", "=", "cfg", ".", "CONF", ".", "coordination", ".", "url", "lock_timeout", "=", "cfg", ".", "CONF", ".", "coordination", ".", "lock_timeout", "proc_info", "=", "system_info", ".", "get_process_info", "(", ")",...
sets up the client for the coordination service .
train
false
28,969
def display_results(image_paths, probs): with open('imagenet-classes.txt', 'rb') as infile: class_labels = map(str.strip, infile.readlines()) class_indices = np.argmax(probs, axis=1) print '\n{:20} {:30} {}'.format('Image', 'Classified As', 'Confidence') print ('-' * 70) for (img_idx, image_path) in enumerate(image_paths): img_name = osp.basename(image_path) class_name = class_labels[class_indices[img_idx]] confidence = round((probs[(img_idx, class_indices[img_idx])] * 100), 2) print '{:20} {:30} {} %'.format(img_name, class_name, confidence)
[ "def", "display_results", "(", "image_paths", ",", "probs", ")", ":", "with", "open", "(", "'imagenet-classes.txt'", ",", "'rb'", ")", "as", "infile", ":", "class_labels", "=", "map", "(", "str", ".", "strip", ",", "infile", ".", "readlines", "(", ")", "...
displays the classification results given the class probability for each image .
train
false
28,970
def filename_to_list(filename): if isinstance(filename, (str, bytes)): return [filename] elif isinstance(filename, list): return filename elif is_container(filename): return [x for x in filename] else: return None
[ "def", "filename_to_list", "(", "filename", ")", ":", "if", "isinstance", "(", "filename", ",", "(", "str", ",", "bytes", ")", ")", ":", "return", "[", "filename", "]", "elif", "isinstance", "(", "filename", ",", "list", ")", ":", "return", "filename", ...
returns a list given either a string or a list .
train
false
28,971
def addSpacedPortionDirection(portionDirection, spacedPortionDirections): lastSpacedPortionDirection = spacedPortionDirections[(-1)] if ((portionDirection.portion - lastSpacedPortionDirection.portion) > 0.003): spacedPortionDirections.append(portionDirection) return if (portionDirection.directionReversed > lastSpacedPortionDirection.directionReversed): spacedPortionDirections.append(portionDirection)
[ "def", "addSpacedPortionDirection", "(", "portionDirection", ",", "spacedPortionDirections", ")", ":", "lastSpacedPortionDirection", "=", "spacedPortionDirections", "[", "(", "-", "1", ")", "]", "if", "(", "(", "portionDirection", ".", "portion", "-", "lastSpacedPorti...
add spaced portion directions .
train
false
28,974
def test_table_filter(): def all_positive(table, key_colnames): colnames = [name for name in table.colnames if (name not in key_colnames)] for colname in colnames: if np.any((table[colname] < 0)): return False return True t = Table.read([' a c d', ' -2 7.0 0', ' -2 5.0 1', ' 0 0.0 4', ' 1 3.0 5', ' 1 2.0 -6', ' 1 1.0 7', ' 3 3.0 5', ' 3 -2.0 6', ' 3 1.0 7'], format='ascii') tg = t.group_by('a') t2 = tg.groups.filter(all_positive) assert (t2.groups[0].pformat() == [' a c d ', '--- --- ---', ' -2 7.0 0', ' -2 5.0 1']) assert (t2.groups[1].pformat() == [' a c d ', '--- --- ---', ' 0 0.0 4'])
[ "def", "test_table_filter", "(", ")", ":", "def", "all_positive", "(", "table", ",", "key_colnames", ")", ":", "colnames", "=", "[", "name", "for", "name", "in", "table", ".", "colnames", "if", "(", "name", "not", "in", "key_colnames", ")", "]", "for", ...
table groups filtering .
train
false
28,975
def src(filename): if (filename is None): return filename if (sys.platform.startswith('java') and filename.endswith('$py.class')): return '.'.join((filename[:(-9)], 'py')) (base, ext) = os.path.splitext(filename) if (ext in ('.pyc', '.pyo', '.py')): return '.'.join((base, 'py')) return filename
[ "def", "src", "(", "filename", ")", ":", "if", "(", "filename", "is", "None", ")", ":", "return", "filename", "if", "(", "sys", ".", "platform", ".", "startswith", "(", "'java'", ")", "and", "filename", ".", "endswith", "(", "'$py.class'", ")", ")", ...
find the python source file for a .
train
true
28,977
def getunpackers(): path = __path__ prefix = (__name__ + '.') unpackers = [] interface = ['unpack', 'detect', 'PRIORITY'] for (_importer, modname, _ispkg) in pkgutil.iter_modules(path, prefix): if (('tests' not in modname) and (modname not in BLACKLIST)): try: module = __import__(modname, fromlist=interface) except ImportError: raise UnpackingError(('Bad unpacker: %s' % modname)) else: unpackers.append(module) return sorted(unpackers, key=(lambda mod: mod.PRIORITY))
[ "def", "getunpackers", "(", ")", ":", "path", "=", "__path__", "prefix", "=", "(", "__name__", "+", "'.'", ")", "unpackers", "=", "[", "]", "interface", "=", "[", "'unpack'", ",", "'detect'", ",", "'PRIORITY'", "]", "for", "(", "_importer", ",", "modna...
scans the unpackers dir .
train
false
28,978
def getAreaVector3LoopAbsolute(loop): return getAreaLoopAbsolute(getComplexPath(loop))
[ "def", "getAreaVector3LoopAbsolute", "(", "loop", ")", ":", "return", "getAreaLoopAbsolute", "(", "getComplexPath", "(", "loop", ")", ")" ]
get the absolute area of a vector3 polygon .
train
false
28,979
@csrf_exempt @add_p3p_header def lti_launch(request, course_id, usage_id): if (not settings.FEATURES['ENABLE_LTI_PROVIDER']): return HttpResponseForbidden() params = get_required_parameters(request.POST) if (not params): return HttpResponseBadRequest() params.update(get_optional_parameters(request.POST)) try: lti_consumer = LtiConsumer.get_or_supplement(params.get('tool_consumer_instance_guid', None), params['oauth_consumer_key']) except LtiConsumer.DoesNotExist: return HttpResponseForbidden() if (not SignatureValidator(lti_consumer).verify(request)): return HttpResponseForbidden() try: (course_key, usage_key) = parse_course_and_usage_keys(course_id, usage_id) except InvalidKeyError: log.error('Invalid course key %s or usage key %s from request %s', course_id, usage_id, request) raise Http404() params['course_key'] = course_key params['usage_key'] = usage_key authenticate_lti_user(request, params['user_id'], lti_consumer) store_outcome_parameters(params, request.user, lti_consumer) return render_courseware(request, params['usage_key'])
[ "@", "csrf_exempt", "@", "add_p3p_header", "def", "lti_launch", "(", "request", ",", "course_id", ",", "usage_id", ")", ":", "if", "(", "not", "settings", ".", "FEATURES", "[", "'ENABLE_LTI_PROVIDER'", "]", ")", ":", "return", "HttpResponseForbidden", "(", ")"...
endpoint for all requests to embed edx content via the lti protocol .
train
false
28,980
@command(usage='export task download urls as aria2 format') @command_line_parser() @with_parser(parse_login) def export_aria2(args): print export_aria2_conf(args)
[ "@", "command", "(", "usage", "=", "'export task download urls as aria2 format'", ")", "@", "command_line_parser", "(", ")", "@", "with_parser", "(", "parse_login", ")", "def", "export_aria2", "(", "args", ")", ":", "print", "export_aria2_conf", "(", "args", ")" ]
usage: lx export-aria2 [id|name] .
train
false
28,981
def create_nodegraph(args, ksize=None, multiplier=1.0, fp_rate=0.01): args = _check_fp_rate(args, fp_rate) if hasattr(args, u'force'): if (args.n_tables > 20): if (not args.force): print_error(u'\n** ERROR: khmer only supports number of tables <= 20.\n') sys.exit(1) else: log_warn(u'\n*** Warning: Maximum recommended number of tables is 20, discarded by force nonetheless!\n') if (ksize is None): ksize = args.ksize if (ksize > 32): print_error(u'\n** ERROR: khmer only supports k-mer sizes <= 32.\n') sys.exit(1) tablesize = calculate_graphsize(args, u'nodegraph', multiplier) return khmer.Nodegraph(ksize, tablesize, args.n_tables)
[ "def", "create_nodegraph", "(", "args", ",", "ksize", "=", "None", ",", "multiplier", "=", "1.0", ",", "fp_rate", "=", "0.01", ")", ":", "args", "=", "_check_fp_rate", "(", "args", ",", "fp_rate", ")", "if", "hasattr", "(", "args", ",", "u'force'", ")"...
create and return a nodegraph .
train
false
28,983
def exists_or_mkdir(path, verbose=True): if (not os.path.exists(path)): if verbose: print ('[!] Create %s ...' % path) os.makedirs(path) return False else: if verbose: print ('[*] %s exists ...' % path) return True
[ "def", "exists_or_mkdir", "(", "path", ",", "verbose", "=", "True", ")", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ")", ":", "if", "verbose", ":", "print", "(", "'[!] Create %s ...'", "%", "path", ")", "os", ".", "m...
check a directory .
train
false
28,987
def parse_directive_value(name, value, relaxed_bool=False): type = directive_types.get(name) if (not type): return None orig_value = value if (type is bool): value = str(value) if (value == 'True'): return True if (value == 'False'): return False if relaxed_bool: value = value.lower() if (value in ('true', 'yes')): return True elif (value in ('false', 'no')): return False raise ValueError(("%s directive must be set to True or False, got '%s'" % (name, orig_value))) elif (type is int): try: return int(value) except ValueError: raise ValueError(("%s directive must be set to an integer, got '%s'" % (name, orig_value))) elif (type is str): return str(value) elif callable(type): return type(name, value) else: assert False
[ "def", "parse_directive_value", "(", "name", ",", "value", ",", "relaxed_bool", "=", "False", ")", ":", "type", "=", "directive_types", ".", "get", "(", "name", ")", "if", "(", "not", "type", ")", ":", "return", "None", "orig_value", "=", "value", "if", ...
parses value as an option value for the given name and returns the interpreted value .
train
false
28,988
def _httpResponseToMessage(response, server_url): response_message = Message.fromKVForm(response.body) if (response.status == 400): raise ServerError.fromMessage(response_message) elif (response.status not in (200, 206)): fmt = 'bad status code from server %s: %s' error_message = (fmt % (server_url, response.status)) raise fetchers.HTTPFetchingError(error_message) return response_message
[ "def", "_httpResponseToMessage", "(", "response", ",", "server_url", ")", ":", "response_message", "=", "Message", ".", "fromKVForm", "(", "response", ".", "body", ")", "if", "(", "response", ".", "status", "==", "400", ")", ":", "raise", "ServerError", ".",...
adapt a post response to a message .
train
true
28,990
def test_unicode_labels_python2(Chart): if (sys.version_info[0] == 3): return chart = Chart() chart.add(u('S\xc3\xa9rie1'), [{'value': 1, 'xlink': 'http://1/', 'label': eval("u'{\\}\xc3\x82\xc2\xb0\xc4\xb3\xc3\xa6\xc3\xb0\xc2\xa9&\xc3\x97&<\xe2\x80\x94\xc3\x97\xe2\x82\xac\xc2\xbf_\xe2\x80\xa6\\{_\xe2\x80\xa6'")}, {'value': 2, 'xlink': {'href': 'http://6.example.com/'}, 'label': eval("u'\xc3\xa6\xc3\x82\xc2\xb0\xe2\x82\xac\xe2\x89\xa0|\xe2\x82\xac\xc3\xa6\xc3\x82\xc2\xb0\xe2\x82\xac\xc9\x99\xc3\xa6'")}, {'value': 3, 'label': eval("'unicode <3'")}]) if (not chart._dual): chart.x_labels = eval("[u'&\xc5\x93', u'\xc2\xbf?', u'\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0', 'unicode <3']") chart.render_pyquery()
[ "def", "test_unicode_labels_python2", "(", "Chart", ")", ":", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", ":", "return", "chart", "=", "Chart", "(", ")", "chart", ".", "add", "(", "u", "(", "'S\\xc3\\xa9rie1'", ")", ",", "["...
test unicode labels in python 2 .
train
false
28,991
def test_compiler_check_cache(): cp = compilerop.CachingCompiler() cp.cache('x=1', 99) linecache.checkcache() for k in linecache.cache: if k.startswith('<ipython-input-99'): break else: raise AssertionError('Entry for input-99 missing from linecache')
[ "def", "test_compiler_check_cache", "(", ")", ":", "cp", "=", "compilerop", ".", "CachingCompiler", "(", ")", "cp", ".", "cache", "(", "'x=1'", ",", "99", ")", "linecache", ".", "checkcache", "(", ")", "for", "k", "in", "linecache", ".", "cache", ":", ...
test the compiler properly manages the cache .
train
false
28,994
def switch_version(path, version=None, use_sudo=False, user=None): cmd = ['bzr', 'update'] if version: cmd.extend(['-r', version]) cmd.append(path) cmd = ' '.join(cmd) _run(cmd, use_sudo=use_sudo, user=user)
[ "def", "switch_version", "(", "path", ",", "version", "=", "None", ",", "use_sudo", "=", "False", ",", "user", "=", "None", ")", ":", "cmd", "=", "[", "'bzr'", ",", "'update'", "]", "if", "version", ":", "cmd", ".", "extend", "(", "[", "'-r'", ",",...
switch working tree to specified revno/revid .
train
false
28,995
def _check_nfft(n, n_fft, n_overlap): n_fft = (n if (n_fft > n) else n_fft) n_overlap = ((n_fft - 1) if (n_overlap >= n_fft) else n_overlap) return (n_fft, n_overlap)
[ "def", "_check_nfft", "(", "n", ",", "n_fft", ",", "n_overlap", ")", ":", "n_fft", "=", "(", "n", "if", "(", "n_fft", ">", "n", ")", "else", "n_fft", ")", "n_overlap", "=", "(", "(", "n_fft", "-", "1", ")", "if", "(", "n_overlap", ">=", "n_fft", ...
helper to make sure n_fft and n_overlap make sense .
train
false
28,997
def relative_days(current_wday, wday, dir): if (current_wday == wday): return (7 * dir) if (dir == 1): return (((wday + 7) - current_wday) % 7) else: return ((((current_wday + 7) - wday) % 7) * (-1))
[ "def", "relative_days", "(", "current_wday", ",", "wday", ",", "dir", ")", ":", "if", "(", "current_wday", "==", "wday", ")", ":", "return", "(", "7", "*", "dir", ")", "if", "(", "dir", "==", "1", ")", ":", "return", "(", "(", "(", "wday", "+", ...
returns the number of days to the "next" or "last" of a certain weekday .
train
false
28,998
@with_open_mode('w') @with_sizes('large') def write_large_chunks(f, source): for i in xrange(0, len(source), 1000000): f.write(source[i:(i + 1000000)])
[ "@", "with_open_mode", "(", "'w'", ")", "@", "with_sizes", "(", "'large'", ")", "def", "write_large_chunks", "(", "f", ",", "source", ")", ":", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "source", ")", ",", "1000000", ")", ":", "f", "."...
write 1e6 units at a time .
train
false
28,999
def csv_format(data): csv = [] for d in data: if (d in [None, False]): csv.append(u'') elif (type(d) not in (str, unicode)): csv.append(u'{}'.format(d)) elif (u',' in d): csv.append(u'"{}"'.format(d)) else: csv.append(d) return u','.join(csv)
[ "def", "csv_format", "(", "data", ")", ":", "csv", "=", "[", "]", "for", "d", "in", "data", ":", "if", "(", "d", "in", "[", "None", ",", "False", "]", ")", ":", "csv", ".", "append", "(", "u''", ")", "elif", "(", "type", "(", "d", ")", "not...
encapsulate any data which contains a comma within double quotes .
train
false
29,001
def connect_to_cloudfiles(region=None, public=None): if (public is None): is_public = (not bool(get_setting('use_servicenet'))) else: is_public = public ret = _create_client(ep_name='object_store', region=region, public=is_public) if ret: region = _safe_region(region) ret.cdn_management_url = _get_service_endpoint(None, 'object_cdn', region, public=is_public) return ret
[ "def", "connect_to_cloudfiles", "(", "region", "=", "None", ",", "public", "=", "None", ")", ":", "if", "(", "public", "is", "None", ")", ":", "is_public", "=", "(", "not", "bool", "(", "get_setting", "(", "'use_servicenet'", ")", ")", ")", "else", ":"...
creates a client for working with cloudfiles/swift .
train
true
29,002
def EpochTime(date): if isinstance(date, datetime.datetime): td = (date - BASE_DATE) else: td = (date - BASE_DATE.date()) milliseconds_since_epoch = long(((td.microseconds + ((td.seconds + ((td.days * 24) * 3600)) * (10 ** 6))) / (10 ** 3))) return milliseconds_since_epoch
[ "def", "EpochTime", "(", "date", ")", ":", "if", "isinstance", "(", "date", ",", "datetime", ".", "datetime", ")", ":", "td", "=", "(", "date", "-", "BASE_DATE", ")", "else", ":", "td", "=", "(", "date", "-", "BASE_DATE", ".", "date", "(", ")", "...
returns millisecond epoch time for a date or datetime .
train
false
29,003
def find_le(a, x): try: return a[bisect_right(a, x)] except IndexError: return a[(-1)]
[ "def", "find_le", "(", "a", ",", "x", ")", ":", "try", ":", "return", "a", "[", "bisect_right", "(", "a", ",", "x", ")", "]", "except", "IndexError", ":", "return", "a", "[", "(", "-", "1", ")", "]" ]
find rightmost value in a less than or equal to x .
train
false
29,004
@njit def _repeat_1d(x, K, out): N = x.shape[0] L = (out.shape[0] // (K * N)) for n in range(N): val = x[n] for k in range(K): for l in range(L): ind = ((((k * N) * L) + (n * L)) + l) out[ind] = val
[ "@", "njit", "def", "_repeat_1d", "(", "x", ",", "K", ",", "out", ")", ":", "N", "=", "x", ".", "shape", "[", "0", "]", "L", "=", "(", "out", ".", "shape", "[", "0", "]", "//", "(", "K", "*", "N", ")", ")", "for", "n", "in", "range", "(...
repeats each element of a vector many times and repeats the whole result many times parameters x: vector to be repeated k: number of times each element of x is repeated out: placeholder for the result returns none .
train
true
29,005
@conf.commands.register def nmap_fp(target, oport=80, cport=81): sigs = nmap_sig(target, oport, cport) return nmap_search(sigs)
[ "@", "conf", ".", "commands", ".", "register", "def", "nmap_fp", "(", "target", ",", "oport", "=", "80", ",", "cport", "=", "81", ")", ":", "sigs", "=", "nmap_sig", "(", "target", ",", "oport", ",", "cport", ")", "return", "nmap_search", "(", "sigs",...
nmap fingerprinting nmap_fp -> list of best guesses with accuracy .
train
false
29,007
def interpret(marker, execution_context=None): return Evaluator(execution_context).evaluate(marker.strip())
[ "def", "interpret", "(", "marker", ",", "execution_context", "=", "None", ")", ":", "return", "Evaluator", "(", "execution_context", ")", ".", "evaluate", "(", "marker", ".", "strip", "(", ")", ")" ]
interpret a marker and return a result depending on environment .
train
false
29,008
def create_monitor(hostname, username, password, monitor_type, name, **kwargs): bigip_session = _build_session(username, password) payload = {} payload['name'] = name for (key, value) in six.iteritems(kwargs): if (not key.startswith('__')): if (key not in ['hostname', 'username', 'password', 'type']): key = key.replace('_', '-') payload[key] = value try: response = bigip_session.post((BIG_IP_URL_BASE.format(host=hostname) + '/ltm/monitor/{type}'.format(type=monitor_type)), data=json.dumps(payload)) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
[ "def", "create_monitor", "(", "hostname", ",", "username", ",", "password", ",", "monitor_type", ",", "name", ",", "**", "kwargs", ")", ":", "bigip_session", "=", "_build_session", "(", "username", ",", "password", ")", "payload", "=", "{", "}", "payload", ...
a function to connect to a bigip device and create a monitor .
train
false
29,009
def getLoopWithoutCloseSequentialPoints(close, loop): if (len(loop) < 2): return loop lastPoint = loop[(-1)] loopWithoutCloseSequentialPoints = [] for point in loop: if (abs((point - lastPoint)) > close): loopWithoutCloseSequentialPoints.append(point) lastPoint = point return loopWithoutCloseSequentialPoints
[ "def", "getLoopWithoutCloseSequentialPoints", "(", "close", ",", "loop", ")", ":", "if", "(", "len", "(", "loop", ")", "<", "2", ")", ":", "return", "loop", "lastPoint", "=", "loop", "[", "(", "-", "1", ")", "]", "loopWithoutCloseSequentialPoints", "=", ...
get loop without close sequential points .
train
false
29,010
def get_team(state): redirect = state[u'website'].redirect request = state[u'request'] user = state[u'user'] slug = request.line.uri.path[u'team'] qs = request.line.uri.querystring from gratipay.models.team import Team team = Team.from_slug(slug) if (team is None): from gratipay.models.participant import Participant participant = Participant.from_username(slug) if (participant is not None): qs = ((u'?' + request.qs.raw) if request.qs.raw else u'') redirect(((u'/~' + request.path.raw[1:]) + qs)) raise Response(404) canonicalize(redirect, request.line.uri.path.raw, u'/', team.slug, slug, qs) if (team.is_closed and (not user.ADMIN)): raise Response(410) return team
[ "def", "get_team", "(", "state", ")", ":", "redirect", "=", "state", "[", "u'website'", "]", ".", "redirect", "request", "=", "state", "[", "u'request'", "]", "user", "=", "state", "[", "u'user'", "]", "slug", "=", "request", ".", "line", ".", "uri", ...
returns the team that the commentable_id belongs to if it exists .
train
false
29,012
def is_module_patched(modname): return (modname in saved)
[ "def", "is_module_patched", "(", "modname", ")", ":", "return", "(", "modname", "in", "saved", ")" ]
check if a module has been replaced with a cooperative version .
train
false
29,013
def geometric_mean(image, selem, out=None, mask=None, shift_x=False, shift_y=False): return _apply_scalar_per_pixel(generic_cy._geometric_mean, image, selem, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
[ "def", "geometric_mean", "(", "image", ",", "selem", ",", "out", "=", "None", ",", "mask", "=", "None", ",", "shift_x", "=", "False", ",", "shift_y", "=", "False", ")", ":", "return", "_apply_scalar_per_pixel", "(", "generic_cy", ".", "_geometric_mean", ",...
return local geometric mean of an image .
train
false
29,014
def urlquote(val): if (val is None): return '' if (not isinstance(val, unicode)): val = str(val) else: val = val.encode('utf-8') return urllib.quote(val)
[ "def", "urlquote", "(", "val", ")", ":", "if", "(", "val", "is", "None", ")", ":", "return", "''", "if", "(", "not", "isinstance", "(", "val", ",", "unicode", ")", ")", ":", "val", "=", "str", "(", "val", ")", "else", ":", "val", "=", "val", ...
quotes a string for use in a url .
train
true
29,016
def db_list(user=None, password=None, host=None, port=None): client = _client(user=user, password=password, host=host, port=port) return client.get_list_database()
[ "def", "db_list", "(", "user", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "client", "=", "_client", "(", "user", "=", "user", ",", "password", "=", "password", ",", "host", "=", "host", ...
return the databse list created on a ms sql server .
train
true
29,017
def load_modules_from_path(path): if (path[(-1):] != '/'): path += '/' if (not os.path.exists(path)): raise OSError(('Directory does not exist: %s' % path)) sys.path.append(path) for f in os.listdir(path): if ((len(f) > 3) and (f[(-3):] == '.py')): modname = f[:(-3)] __import__(modname, globals(), locals(), ['*'])
[ "def", "load_modules_from_path", "(", "path", ")", ":", "if", "(", "path", "[", "(", "-", "1", ")", ":", "]", "!=", "'/'", ")", ":", "path", "+=", "'/'", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ")", ":", "raise", ...
import all modules from the given directory .
train
true
29,018
def _api_key_patchops(op, pvlist): return [{'op': op, 'path': p, 'value': v} for (p, v) in pvlist]
[ "def", "_api_key_patchops", "(", "op", ",", "pvlist", ")", ":", "return", "[", "{", "'op'", ":", "op", ",", "'path'", ":", "p", ",", "'value'", ":", "v", "}", "for", "(", "p", ",", "v", ")", "in", "pvlist", "]" ]
helper function to return patchoperations object .
train
false
29,020
def gen_hdf(fn, lshape, N, mean=None, autoencode=False, multidimout=False): (C, H, W) = lshape dat = np.arange((((N * C) * H) * W)).reshape((N, ((C * H) * W))) h5f = h5py.File(fn, 'w') inp = h5f.create_dataset('input', dat.shape) inp[:] = dat.copy() inp.attrs['lshape'] = (C, H, W) if (mean is not None): mean_ = h5f.create_dataset('mean', mean.shape) mean_[:] = mean if (not autoencode): if multidimout: out = h5f.create_dataset('output', dat.shape) out[:] = dat[:, ::(-1)].copy() else: out = h5f.create_dataset('output', (N, 1), dtype='i8') out[:, 0] = np.arange(N) out.attrs['nclass'] = N h5f.close() return
[ "def", "gen_hdf", "(", "fn", ",", "lshape", ",", "N", ",", "mean", "=", "None", ",", "autoencode", "=", "False", ",", "multidimout", "=", "False", ")", ":", "(", "C", ",", "H", ",", "W", ")", "=", "lshape", "dat", "=", "np", ".", "arange", "(",...
generates the hdf file with the data for testing arguments: fn : filename lshape : shape of the input data n : number of data points mean : mean values .
train
false
29,021
def draw_trees(*trees): TreeView(*trees).mainloop() return
[ "def", "draw_trees", "(", "*", "trees", ")", ":", "TreeView", "(", "*", "trees", ")", ".", "mainloop", "(", ")", "return" ]
open a new window containing a graphical diagram of the given trees .
train
false
29,023
def _initializeTkVariantTests(root): global _tk_type if (sys.platform == 'darwin'): ws = root.tk.call('tk', 'windowingsystem') if ('x11' in ws): _tk_type = 'xquartz' elif ('aqua' not in ws): _tk_type = 'other' elif ('AppKit' in root.tk.call('winfo', 'server', '.')): _tk_type = 'cocoa' else: _tk_type = 'carbon' else: _tk_type = 'other'
[ "def", "_initializeTkVariantTests", "(", "root", ")", ":", "global", "_tk_type", "if", "(", "sys", ".", "platform", "==", "'darwin'", ")", ":", "ws", "=", "root", ".", "tk", ".", "call", "(", "'tk'", ",", "'windowingsystem'", ")", "if", "(", "'x11'", "...
initializes os x tk variant values for isaquatk() .
train
false
29,026
@db_api.retry_if_session_inactive() def is_object_blocked(context, object_id, object_type): standard_attr_id = _get_standard_attr_id(context, object_id, object_type) if (not standard_attr_id): return False return bool(context.session.query(pb_model.ProvisioningBlock).filter_by(standard_attr_id=standard_attr_id).count())
[ "@", "db_api", ".", "retry_if_session_inactive", "(", ")", "def", "is_object_blocked", "(", "context", ",", "object_id", ",", "object_type", ")", ":", "standard_attr_id", "=", "_get_standard_attr_id", "(", "context", ",", "object_id", ",", "object_type", ")", "if"...
return boolean indicating if object has a provisioning block .
train
false
29,027
def validate_draft_recipients(draft): if (not any((draft.to_addr, draft.bcc_addr, draft.cc_addr))): raise InputError('No recipients specified') for field in (draft.to_addr, draft.bcc_addr, draft.cc_addr): if (field is not None): for (_, email_address) in field: parsed = address.parse(email_address, addr_spec_only=True) if (not isinstance(parsed, address.EmailAddress)): raise InputError(u'Invalid recipient address {}'.format(email_address))
[ "def", "validate_draft_recipients", "(", "draft", ")", ":", "if", "(", "not", "any", "(", "(", "draft", ".", "to_addr", ",", "draft", ".", "bcc_addr", ",", "draft", ".", "cc_addr", ")", ")", ")", ":", "raise", "InputError", "(", "'No recipients specified'"...
check that a draft has at least one recipient .
train
false
29,028
def amax(a, axis=None, out=None, keepdims=False, dtype=None): return a.max(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
[ "def", "amax", "(", "a", ",", "axis", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ",", "dtype", "=", "None", ")", ":", "return", "a", ".", "max", "(", "axis", "=", "axis", ",", "dtype", "=", "dtype", ",", "out", "=", ...
returns the maximum of an array or the maximum along an axis .
train
false
29,029
def swapcase(s): return s.swapcase()
[ "def", "swapcase", "(", "s", ")", ":", "return", "s", ".", "swapcase", "(", ")" ]
swapcase(s) -> string return a copy of the string s with upper case characters converted to lowercase and vice versa .
train
false
29,030
def IndentLevel(by=1): global _Level if ((_Level + by) < 0): raise Error, 'indentation underflow (internal error)' _Level = (_Level + by)
[ "def", "IndentLevel", "(", "by", "=", "1", ")", ":", "global", "_Level", "if", "(", "(", "_Level", "+", "by", ")", "<", "0", ")", ":", "raise", "Error", ",", "'indentation underflow (internal error)'", "_Level", "=", "(", "_Level", "+", "by", ")" ]
increment the indentation level by one .
train
false
29,031
def get_restart_mode(restart_file): if os.path.exists(restart_file): flag = open(restart_file, 'r').read() return (flag == 'True') return False
[ "def", "get_restart_mode", "(", "restart_file", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "restart_file", ")", ":", "flag", "=", "open", "(", "restart_file", ",", "'r'", ")", ".", "read", "(", ")", "return", "(", "flag", "==", "'True'", ...
parse the server/portal restart status .
train
false
29,032
def log_query_and_profile_data_for_queryset(queryset): if (not ENABLE_PROFILING): return queryset if (not isinstance(queryset, QuerySet)): return queryset query = getattr(queryset, '_query', None) mongo_query = getattr(queryset, '_mongo_query', query) ordering = getattr(queryset, '_ordering', None) limit = getattr(queryset, '_limit', None) collection = getattr(queryset, '_collection', None) collection_name = getattr(collection, 'name', None) cloned_queryset = queryset.clone() explain_info = cloned_queryset.explain(format=True) if ((mongo_query is not None) and (collection_name is not None)): mongo_shell_query = construct_mongo_shell_query(mongo_query=mongo_query, collection_name=collection_name, ordering=ordering, limit=limit) extra = {'mongo_query': mongo_query, 'mongo_shell_query': mongo_shell_query} LOG.debug(('MongoDB query: %s' % mongo_shell_query), extra=extra) LOG.debug(('MongoDB explain data: %s' % explain_info)) return queryset
[ "def", "log_query_and_profile_data_for_queryset", "(", "queryset", ")", ":", "if", "(", "not", "ENABLE_PROFILING", ")", ":", "return", "queryset", "if", "(", "not", "isinstance", "(", "queryset", ",", "QuerySet", ")", ")", ":", "return", "queryset", "query", "...
function which logs mongodb query and profile data for the provided mongoengine queryset object .
train
false
29,033
def mkchannel(ctype=Channel, wtype=ChannelWriter, rtype=ChannelReader): c = ctype() wc = wtype(c) rc = rtype(c) return (wc, rc)
[ "def", "mkchannel", "(", "ctype", "=", "Channel", ",", "wtype", "=", "ChannelWriter", ",", "rtype", "=", "ChannelReader", ")", ":", "c", "=", "ctype", "(", ")", "wc", "=", "wtype", "(", "c", ")", "rc", "=", "rtype", "(", "c", ")", "return", "(", ...
create a channel .
train
false
29,034
def _make_regex(pem_type): return re.compile('\\s*(?P<pem_header>-----BEGIN {0}-----)\\s+(?:(?P<proc_type>Proc-Type: 4,ENCRYPTED)\\s*)?(?:(?P<dek_info>DEK-Info: (?:DES-[3A-Z\\-]+,[0-9A-F]{{16}}|[0-9A-Z\\-]+,[0-9A-F]{{32}}))\\s*)?(?P<pem_body>.+?)\\s+(?P<pem_footer>-----END {1}-----)\\s*'.format(pem_type, pem_type), re.DOTALL)
[ "def", "_make_regex", "(", "pem_type", ")", ":", "return", "re", ".", "compile", "(", "'\\\\s*(?P<pem_header>-----BEGIN {0}-----)\\\\s+(?:(?P<proc_type>Proc-Type: 4,ENCRYPTED)\\\\s*)?(?:(?P<dek_info>DEK-Info: (?:DES-[3A-Z\\\\-]+,[0-9A-F]{{16}}|[0-9A-Z\\\\-]+,[0-9A-F]{{32}}))\\\\s*)?(?P<pem_bod...
dynamically generate a regex to match pem_type .
train
false
29,036
def to_commit(obj): if (obj.type == 'tag'): obj = deref_tag(obj) if (obj.type != 'commit'): raise ValueError(('Cannot convert object %r to type commit' % obj)) return obj
[ "def", "to_commit", "(", "obj", ")", ":", "if", "(", "obj", ".", "type", "==", "'tag'", ")", ":", "obj", "=", "deref_tag", "(", "obj", ")", "if", "(", "obj", ".", "type", "!=", "'commit'", ")", ":", "raise", "ValueError", "(", "(", "'Cannot convert...
convert the given object to a commit if possible and return it .
train
true
29,037
def _between_impl(expr, op, cleft, cright, **kw): return BinaryExpression(expr, ClauseList(_check_literal(expr, operators.and_, cleft), _check_literal(expr, operators.and_, cright), operator=operators.and_, group=False, group_contents=False), op, negate=(operators.notbetween_op if (op is operators.between_op) else operators.between_op), modifiers=kw)
[ "def", "_between_impl", "(", "expr", ",", "op", ",", "cleft", ",", "cright", ",", "**", "kw", ")", ":", "return", "BinaryExpression", "(", "expr", ",", "ClauseList", "(", "_check_literal", "(", "expr", ",", "operators", ".", "and_", ",", "cleft", ")", ...
see :meth: .
train
false
29,038
def sort_dict(od, d): if isinstance(od, dict): ret = Struct() for k in od.keys(): v = d.get(k) if (v is not None): if isinstance(v, dict): v = sort_dict(od[k], v) elif isinstance(v, list): v = [sort_dict(od[k][0], v1) for v1 in v] ret[k] = v if hasattr(od, u'namespaces'): ret.namespaces.update(od.namespaces) ret.references.update(od.references) ret.qualified = od.qualified return ret else: return d
[ "def", "sort_dict", "(", "od", ",", "d", ")", ":", "if", "isinstance", "(", "od", ",", "dict", ")", ":", "ret", "=", "Struct", "(", ")", "for", "k", "in", "od", ".", "keys", "(", ")", ":", "v", "=", "d", ".", "get", "(", "k", ")", "if", "...
sort parameters .
train
false
29,039
def bigrams(sequence, **kwargs): for item in ngrams(sequence, 2, **kwargs): (yield item)
[ "def", "bigrams", "(", "sequence", ",", "**", "kwargs", ")", ":", "for", "item", "in", "ngrams", "(", "sequence", ",", "2", ",", "**", "kwargs", ")", ":", "(", "yield", "item", ")" ]
return the bigrams generated from a sequence of items .
train
false
29,040
def matchkeyword(colitem, keywordexpr): mapped_names = set() import pytest for item in colitem.listchain(): if (not isinstance(item, pytest.Instance)): mapped_names.add(item.name) for name in colitem.listextrakeywords(): mapped_names.add(name) if hasattr(colitem, 'function'): for name in colitem.function.__dict__: mapped_names.add(name) mapping = KeywordMapping(mapped_names) if (' ' not in keywordexpr): return mapping[keywordexpr] elif (keywordexpr.startswith('not ') and (' ' not in keywordexpr[4:])): return (not mapping[keywordexpr[4:]]) return eval(keywordexpr, {}, mapping)
[ "def", "matchkeyword", "(", "colitem", ",", "keywordexpr", ")", ":", "mapped_names", "=", "set", "(", ")", "import", "pytest", "for", "item", "in", "colitem", ".", "listchain", "(", ")", ":", "if", "(", "not", "isinstance", "(", "item", ",", "pytest", ...
tries to match given keyword expression to given collector item .
train
false
29,041
def inject_rename_contenttypes_operations(plan=None, apps=global_apps, using=DEFAULT_DB_ALIAS, **kwargs): if (plan is None): return try: ContentType = apps.get_model('contenttypes', 'ContentType') except LookupError: available = False else: if (not router.allow_migrate_model(using, ContentType)): return available = True for (migration, backward) in plan: if ((migration.app_label, migration.name) == ('contenttypes', '0001_initial')): if backward: break else: available = True continue if (not available): continue inserts = [] for (index, operation) in enumerate(migration.operations): if isinstance(operation, migrations.RenameModel): operation = RenameContentType(migration.app_label, operation.old_name_lower, operation.new_name_lower) inserts.append(((index + 1), operation)) for (inserted, (index, operation)) in enumerate(inserts): migration.operations.insert((inserted + index), operation)
[ "def", "inject_rename_contenttypes_operations", "(", "plan", "=", "None", ",", "apps", "=", "global_apps", ",", "using", "=", "DEFAULT_DB_ALIAS", ",", "**", "kwargs", ")", ":", "if", "(", "plan", "is", "None", ")", ":", "return", "try", ":", "ContentType", ...
insert a renamecontenttype operation after every planned renamemodel operation .
train
false
29,042
@app.route('/delete', methods=('DELETE',)) def view_delete(): return jsonify(get_dict('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
[ "@", "app", ".", "route", "(", "'/delete'", ",", "methods", "=", "(", "'DELETE'", ",", ")", ")", "def", "view_delete", "(", ")", ":", "return", "jsonify", "(", "get_dict", "(", "'url'", ",", "'args'", ",", "'form'", ",", "'data'", ",", "'origin'", ",...
returns delete data .
train
false
29,043
def make_help_cmd(cmd, docstring): def help_cmd(message=docstring, cmd=cmd): print ('=' * 15) print ('\nHelp for command %s:\n' % (cmd,)) print message.strip() print '' print ('=' * 15) print '' return help_cmd
[ "def", "make_help_cmd", "(", "cmd", ",", "docstring", ")", ":", "def", "help_cmd", "(", "message", "=", "docstring", ",", "cmd", "=", "cmd", ")", ":", "print", "(", "'='", "*", "15", ")", "print", "(", "'\\nHelp for command %s:\\n'", "%", "(", "cmd", "...
dynamically define a twill shell help function for the given command/docstring .
train
false
29,044
def docker_accessible(): try: client = DockerClient() client._client.ping() except Exception as e: return str(e) return None
[ "def", "docker_accessible", "(", ")", ":", "try", ":", "client", "=", "DockerClient", "(", ")", "client", ".", "_client", ".", "ping", "(", ")", "except", "Exception", "as", "e", ":", "return", "str", "(", "e", ")", "return", "None" ]
attempt to connect to the docker control socket .
train
false
29,045
def _request_deferred(request): request_callback = request.callback request_errback = request.errback def _restore_callbacks(result): request.callback = request_callback request.errback = request_errback return result d = defer.Deferred() d.addBoth(_restore_callbacks) if request.callback: d.addCallbacks(request.callback, request.errback) (request.callback, request.errback) = (d.callback, d.errback) return d
[ "def", "_request_deferred", "(", "request", ")", ":", "request_callback", "=", "request", ".", "callback", "request_errback", "=", "request", ".", "errback", "def", "_restore_callbacks", "(", "result", ")", ":", "request", ".", "callback", "=", "request_callback",...
wrap a request inside a deferred .
train
false
29,046
def variation(a, axis=0): (a, axis) = _chk_asarray(a, axis) return (a.std(axis) / a.mean(axis))
[ "def", "variation", "(", "a", ",", "axis", "=", "0", ")", ":", "(", "a", ",", "axis", ")", "=", "_chk_asarray", "(", "a", ",", "axis", ")", "return", "(", "a", ".", "std", "(", "axis", ")", "/", "a", ".", "mean", "(", "axis", ")", ")" ]
computes the coefficient of variation .
train
false
29,047
def reset_server(): from evennia.server.sessionhandler import SESSIONS logger.log_info(' Initial setup complete. Restarting Server once.') SESSIONS.server.shutdown(mode='reset')
[ "def", "reset_server", "(", ")", ":", "from", "evennia", ".", "server", ".", "sessionhandler", "import", "SESSIONS", "logger", ".", "log_info", "(", "' Initial setup complete. Restarting Server once.'", ")", "SESSIONS", ".", "server", ".", "shutdown", "(", "mode", ...
we end the initialization by resetting the server .
train
false
29,049
def _paramsFileTail(): str = "\n}\n\nmod = importBaseDescription('base.py', config)\nlocals().update(mod.__dict__)\n" return str
[ "def", "_paramsFileTail", "(", ")", ":", "str", "=", "\"\\n}\\n\\nmod = importBaseDescription('base.py', config)\\nlocals().update(mod.__dict__)\\n\"", "return", "str" ]
this is the tail of every params file we generate .
train
false