id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
11,673
def dodecahedral_graph(create_using=None): G = LCF_graph(20, [10, 7, 4, (-4), (-7), 10, (-4), 7, (-7), 4], 2, create_using) G.name = 'Dodecahedral Graph' return G
[ "def", "dodecahedral_graph", "(", "create_using", "=", "None", ")", ":", "G", "=", "LCF_graph", "(", "20", ",", "[", "10", ",", "7", ",", "4", ",", "(", "-", "4", ")", ",", "(", "-", "7", ")", ",", "10", ",", "(", "-", "4", ")", ",", "7", ",", "(", "-", "7", ")", ",", "4", "]", ",", "2", ",", "create_using", ")", "G", ".", "name", "=", "'Dodecahedral Graph'", "return", "G" ]
return the platonic dodecahedral graph .
train
false
11,674
def rescue(device, start, end): _validate_device(device) _validate_partition_boundary(start) _validate_partition_boundary(end) cmd = 'parted -m -s {0} rescue {1} {2}'.format(device, start, end) out = __salt__['cmd.run'](cmd).splitlines() return out
[ "def", "rescue", "(", "device", ",", "start", ",", "end", ")", ":", "_validate_device", "(", "device", ")", "_validate_partition_boundary", "(", "start", ")", "_validate_partition_boundary", "(", "end", ")", "cmd", "=", "'parted -m -s {0} rescue {1} {2}'", ".", "format", "(", "device", ",", "start", ",", "end", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "return", "out" ]
rescue a lost partition that was located somewhere between start and end .
train
true
11,675
def get_srpath(): name = get_site() action = None if (c.render_style in ('mobile', 'compact')): action = c.render_style else: action = request.environ['pylons.routes_dict'].get('action') if (not action): return name return '-'.join((name, action))
[ "def", "get_srpath", "(", ")", ":", "name", "=", "get_site", "(", ")", "action", "=", "None", "if", "(", "c", ".", "render_style", "in", "(", "'mobile'", ",", "'compact'", ")", ")", ":", "action", "=", "c", ".", "render_style", "else", ":", "action", "=", "request", ".", "environ", "[", "'pylons.routes_dict'", "]", ".", "get", "(", "'action'", ")", "if", "(", "not", "action", ")", ":", "return", "name", "return", "'-'", ".", "join", "(", "(", "name", ",", "action", ")", ")" ]
return the srpath of the current request .
train
false
11,676
def _del_repo_from_file(alias, filepath): with open(filepath) as fhandle: output = [] regex = re.compile(REPO_REGEXP) for line in fhandle: if regex.search(line): if line.startswith('#'): line = line[1:] cols = line.strip().split() if (alias != cols[1]): output.append(line) with open(filepath, 'w') as fhandle: fhandle.writelines(output)
[ "def", "_del_repo_from_file", "(", "alias", ",", "filepath", ")", ":", "with", "open", "(", "filepath", ")", "as", "fhandle", ":", "output", "=", "[", "]", "regex", "=", "re", ".", "compile", "(", "REPO_REGEXP", ")", "for", "line", "in", "fhandle", ":", "if", "regex", ".", "search", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "line", "=", "line", "[", "1", ":", "]", "cols", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "(", "alias", "!=", "cols", "[", "1", "]", ")", ":", "output", ".", "append", "(", "line", ")", "with", "open", "(", "filepath", ",", "'w'", ")", "as", "fhandle", ":", "fhandle", ".", "writelines", "(", "output", ")" ]
remove a repo from filepath .
train
false
11,677
def urlify(s, maxlen=80, keep_underscores=False): s = to_unicode(s) s = s.lower() s = unicodedata.normalize('NFKD', s) s = to_native(s, charset='ascii', errors='ignore') s = re.sub('&\\w+?;', '', s) if keep_underscores: s = re.sub('\\s+', '-', s) s = re.sub('[^\\w\\-]', '', s) else: s = re.sub('[\\s_]+', '-', s) s = re.sub('[^a-z0-9\\-]', '', s) s = re.sub('[-_][-_]+', '-', s) s = s.strip('-') return s[:maxlen]
[ "def", "urlify", "(", "s", ",", "maxlen", "=", "80", ",", "keep_underscores", "=", "False", ")", ":", "s", "=", "to_unicode", "(", "s", ")", "s", "=", "s", ".", "lower", "(", ")", "s", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "s", ")", "s", "=", "to_native", "(", "s", ",", "charset", "=", "'ascii'", ",", "errors", "=", "'ignore'", ")", "s", "=", "re", ".", "sub", "(", "'&\\\\w+?;'", ",", "''", ",", "s", ")", "if", "keep_underscores", ":", "s", "=", "re", ".", "sub", "(", "'\\\\s+'", ",", "'-'", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "'[^\\\\w\\\\-]'", ",", "''", ",", "s", ")", "else", ":", "s", "=", "re", ".", "sub", "(", "'[\\\\s_]+'", ",", "'-'", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "'[^a-z0-9\\\\-]'", ",", "''", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "'[-_][-_]+'", ",", "'-'", ",", "s", ")", "s", "=", "s", ".", "strip", "(", "'-'", ")", "return", "s", "[", ":", "maxlen", "]" ]
converts incoming string to a simplified ascii subset .
train
false
11,678
def DecodeVarLengthNumber(byte_str): value = 0 num_bytes = 0 for shift in xrange(0, 64, 7): (byte,) = struct.unpack('>B', byte_str[num_bytes:(num_bytes + 1)]) num_bytes += 1 if (byte & 128): value |= ((byte & 127) << shift) else: value |= (byte << shift) return (value, num_bytes) raise TypeError('string not decodable as variable length number')
[ "def", "DecodeVarLengthNumber", "(", "byte_str", ")", ":", "value", "=", "0", "num_bytes", "=", "0", "for", "shift", "in", "xrange", "(", "0", ",", "64", ",", "7", ")", ":", "(", "byte", ",", ")", "=", "struct", ".", "unpack", "(", "'>B'", ",", "byte_str", "[", "num_bytes", ":", "(", "num_bytes", "+", "1", ")", "]", ")", "num_bytes", "+=", "1", "if", "(", "byte", "&", "128", ")", ":", "value", "|=", "(", "(", "byte", "&", "127", ")", "<<", "shift", ")", "else", ":", "value", "|=", "(", "byte", "<<", "shift", ")", "return", "(", "value", ",", "num_bytes", ")", "raise", "TypeError", "(", "'string not decodable as variable length number'", ")" ]
interprets a raw byte string as a variable length encoded number and decodes .
train
false
11,679
def set_slotname(slot, name, host=None, admin_username=None, admin_password=None): return __execute_cmd('config -g cfgServerInfo -o cfgServerName -i {0} {1}'.format(slot, name), host=host, admin_username=admin_username, admin_password=admin_password)
[ "def", "set_slotname", "(", "slot", ",", "name", ",", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ")", ":", "return", "__execute_cmd", "(", "'config -g cfgServerInfo -o cfgServerName -i {0} {1}'", ".", "format", "(", "slot", ",", "name", ")", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ")" ]
set the name of a slot in a chassis .
train
true
11,680
def ParseAndReturnIncludePaths(appinfo_file, open_fn=open): try: appinfo_path = appinfo_file.name if (not os.path.isfile(appinfo_path)): raise Exception(('Name defined by appinfo_file does not appear to be a valid file: %s' % appinfo_path)) except AttributeError: raise Exception('File object passed to ParseAndMerge does not define attribute "name" as as full file path.') appyaml = appinfo.LoadSingleAppInfo(appinfo_file) (appyaml, include_paths) = _MergeBuiltinsIncludes(appinfo_path, appyaml, open_fn) if (not appyaml.handlers): if appyaml.IsVm(): appyaml.handlers = [appinfo.URLMap(url='.*', script='PLACEHOLDER')] else: raise appinfo_errors.MissingURLMapping('No URLMap entries found in application configuration') if (len(appyaml.handlers) > appinfo.MAX_URL_MAPS): raise appinfo_errors.TooManyURLMappings(('Found more than %d URLMap entries in application configuration' % appinfo.MAX_URL_MAPS)) if ((appyaml.runtime == 'python27') and appyaml.threadsafe): for handler in appyaml.handlers: if (handler.script and (handler.script.endswith('.py') or ('/' in handler.script))): raise appinfo_errors.ThreadsafeWithCgiHandler(('Threadsafe cannot be enabled with CGI handler: %s' % handler.script)) return (appyaml, include_paths)
[ "def", "ParseAndReturnIncludePaths", "(", "appinfo_file", ",", "open_fn", "=", "open", ")", ":", "try", ":", "appinfo_path", "=", "appinfo_file", ".", "name", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "appinfo_path", ")", ")", ":", "raise", "Exception", "(", "(", "'Name defined by appinfo_file does not appear to be a valid file: %s'", "%", "appinfo_path", ")", ")", "except", "AttributeError", ":", "raise", "Exception", "(", "'File object passed to ParseAndMerge does not define attribute \"name\" as as full file path.'", ")", "appyaml", "=", "appinfo", ".", "LoadSingleAppInfo", "(", "appinfo_file", ")", "(", "appyaml", ",", "include_paths", ")", "=", "_MergeBuiltinsIncludes", "(", "appinfo_path", ",", "appyaml", ",", "open_fn", ")", "if", "(", "not", "appyaml", ".", "handlers", ")", ":", "if", "appyaml", ".", "IsVm", "(", ")", ":", "appyaml", ".", "handlers", "=", "[", "appinfo", ".", "URLMap", "(", "url", "=", "'.*'", ",", "script", "=", "'PLACEHOLDER'", ")", "]", "else", ":", "raise", "appinfo_errors", ".", "MissingURLMapping", "(", "'No URLMap entries found in application configuration'", ")", "if", "(", "len", "(", "appyaml", ".", "handlers", ")", ">", "appinfo", ".", "MAX_URL_MAPS", ")", ":", "raise", "appinfo_errors", ".", "TooManyURLMappings", "(", "(", "'Found more than %d URLMap entries in application configuration'", "%", "appinfo", ".", "MAX_URL_MAPS", ")", ")", "if", "(", "(", "appyaml", ".", "runtime", "==", "'python27'", ")", "and", "appyaml", ".", "threadsafe", ")", ":", "for", "handler", "in", "appyaml", ".", "handlers", ":", "if", "(", "handler", ".", "script", "and", "(", "handler", ".", "script", ".", "endswith", "(", "'.py'", ")", "or", "(", "'/'", "in", "handler", ".", "script", ")", ")", ")", ":", "raise", "appinfo_errors", ".", "ThreadsafeWithCgiHandler", "(", "(", "'Threadsafe cannot be enabled with CGI handler: %s'", "%", "handler", ".", "script", ")", ")", "return", "(", "appyaml", ",", "include_paths", ")" ]
parse an appyaml file and merge referenced includes and builtins .
train
false
11,681
def handleManual(manualpois, filters, markers): for poi in manualpois: for (name, __, filter_function, __, __, __) in filters: result = filter_function(poi) if result: d = create_marker_from_filter_result(poi, result) markers[name]['raw'].append(d)
[ "def", "handleManual", "(", "manualpois", ",", "filters", ",", "markers", ")", ":", "for", "poi", "in", "manualpois", ":", "for", "(", "name", ",", "__", ",", "filter_function", ",", "__", ",", "__", ",", "__", ")", "in", "filters", ":", "result", "=", "filter_function", "(", "poi", ")", "if", "result", ":", "d", "=", "create_marker_from_filter_result", "(", "poi", ",", "result", ")", "markers", "[", "name", "]", "[", "'raw'", "]", ".", "append", "(", "d", ")" ]
add markers for manually defined pois to the list of markers .
train
false
11,683
def iconcat(a, b): if (not hasattr(a, '__getitem__')): msg = ("'%s' object can't be concatenated" % type(a).__name__) raise TypeError(msg) a += b return a
[ "def", "iconcat", "(", "a", ",", "b", ")", ":", "if", "(", "not", "hasattr", "(", "a", ",", "'__getitem__'", ")", ")", ":", "msg", "=", "(", "\"'%s' object can't be concatenated\"", "%", "type", "(", "a", ")", ".", "__name__", ")", "raise", "TypeError", "(", "msg", ")", "a", "+=", "b", "return", "a" ]
same as a += b .
train
true
11,684
def _gf_gcd(fp, gp, p): dom = fp.ring.domain while gp: rem = fp deg = gp.degree() lcinv = dom.invert(gp.LC, p) while True: degrem = rem.degree() if (degrem < deg): break rem = (rem - gp.mul_monom(((degrem - deg),)).mul_ground((lcinv * rem.LC))).trunc_ground(p) fp = gp gp = rem return fp.mul_ground(dom.invert(fp.LC, p)).trunc_ground(p)
[ "def", "_gf_gcd", "(", "fp", ",", "gp", ",", "p", ")", ":", "dom", "=", "fp", ".", "ring", ".", "domain", "while", "gp", ":", "rem", "=", "fp", "deg", "=", "gp", ".", "degree", "(", ")", "lcinv", "=", "dom", ".", "invert", "(", "gp", ".", "LC", ",", "p", ")", "while", "True", ":", "degrem", "=", "rem", ".", "degree", "(", ")", "if", "(", "degrem", "<", "deg", ")", ":", "break", "rem", "=", "(", "rem", "-", "gp", ".", "mul_monom", "(", "(", "(", "degrem", "-", "deg", ")", ",", ")", ")", ".", "mul_ground", "(", "(", "lcinv", "*", "rem", ".", "LC", ")", ")", ")", ".", "trunc_ground", "(", "p", ")", "fp", "=", "gp", "gp", "=", "rem", "return", "fp", ".", "mul_ground", "(", "dom", ".", "invert", "(", "fp", ".", "LC", ",", "p", ")", ")", ".", "trunc_ground", "(", "p", ")" ]
compute the gcd of two univariate polynomials in mathbb{z}_p[x] .
train
false
11,685
def fake_elsewhere(db, participant, platform): insert_fake_data(db, 'elsewhere', platform=platform, user_id=fake_text_id(), user_name=participant.username, participant=participant.username, extra_info=None)
[ "def", "fake_elsewhere", "(", "db", ",", "participant", ",", "platform", ")", ":", "insert_fake_data", "(", "db", ",", "'elsewhere'", ",", "platform", "=", "platform", ",", "user_id", "=", "fake_text_id", "(", ")", ",", "user_name", "=", "participant", ".", "username", ",", "participant", "=", "participant", ".", "username", ",", "extra_info", "=", "None", ")" ]
create a fake elsewhere .
train
false
11,686
def weak_lru_cache(maxsize=100): class desc(lazyval, ): def __get__(self, instance, owner): if (instance is None): return self try: return self._cache[instance] except KeyError: inst = ref(instance) @_weak_lru_cache(maxsize) @wraps(self._get) def wrapper(*args, **kwargs): return self._get(inst(), *args, **kwargs) self._cache[instance] = wrapper return wrapper @_weak_lru_cache(maxsize) def __call__(self, *args, **kwargs): return self._get(*args, **kwargs) return desc
[ "def", "weak_lru_cache", "(", "maxsize", "=", "100", ")", ":", "class", "desc", "(", "lazyval", ",", ")", ":", "def", "__get__", "(", "self", ",", "instance", ",", "owner", ")", ":", "if", "(", "instance", "is", "None", ")", ":", "return", "self", "try", ":", "return", "self", ".", "_cache", "[", "instance", "]", "except", "KeyError", ":", "inst", "=", "ref", "(", "instance", ")", "@", "_weak_lru_cache", "(", "maxsize", ")", "@", "wraps", "(", "self", ".", "_get", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "self", ".", "_get", "(", "inst", "(", ")", ",", "*", "args", ",", "**", "kwargs", ")", "self", ".", "_cache", "[", "instance", "]", "=", "wrapper", "return", "wrapper", "@", "_weak_lru_cache", "(", "maxsize", ")", "def", "__call__", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "self", ".", "_get", "(", "*", "args", ",", "**", "kwargs", ")", "return", "desc" ]
weak least-recently-used cache decorator .
train
true
11,687
def reportUnhandledErrors(case, d): def cleanup(): if isinstance(d.result, Failure): return d case.addCleanup(cleanup) return d
[ "def", "reportUnhandledErrors", "(", "case", ",", "d", ")", ":", "def", "cleanup", "(", ")", ":", "if", "isinstance", "(", "d", ".", "result", ",", "Failure", ")", ":", "return", "d", "case", ".", "addCleanup", "(", "cleanup", ")", "return", "d" ]
make sure that any unhandled errors from the given deferred are reported when the test case ends .
train
false
11,688
def station_vehicle(): s3.prep = (lambda r: (r.method == 'import')) return s3_rest_controller()
[ "def", "station_vehicle", "(", ")", ":", "s3", ".", "prep", "=", "(", "lambda", "r", ":", "(", "r", ".", "method", "==", "'import'", ")", ")", "return", "s3_rest_controller", "(", ")" ]
vehicles of fire stations .
train
false
11,689
def get_numerical_gradient(f, tensors, delta=1e-05): gradients = [] for i in range(len(tensors)): tensors[i] = tensors[i].astype(np.float64) gradients.append(np.zeros(tensors[i].shape)) for (tensor, gradient) in zip(tensors, gradients): tensor_flat = tensor.reshape(((-1),)) gradient_flat = gradient.reshape(((-1),)) for idx in range(len(tensor_flat)): backup = tensor_flat[idx] tensor_flat[idx] = (tensor_flat[idx] + delta) f_inc = np.sum(f(np, *tensors)) tensor_flat[idx] = (backup - delta) f_dec = np.sum(f(np, *tensors)) tensor_flat[idx] = backup gradient_flat[idx] = ((f_inc - f_dec) / (2.0 * delta)) return gradients
[ "def", "get_numerical_gradient", "(", "f", ",", "tensors", ",", "delta", "=", "1e-05", ")", ":", "gradients", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "tensors", ")", ")", ":", "tensors", "[", "i", "]", "=", "tensors", "[", "i", "]", ".", "astype", "(", "np", ".", "float64", ")", "gradients", ".", "append", "(", "np", ".", "zeros", "(", "tensors", "[", "i", "]", ".", "shape", ")", ")", "for", "(", "tensor", ",", "gradient", ")", "in", "zip", "(", "tensors", ",", "gradients", ")", ":", "tensor_flat", "=", "tensor", ".", "reshape", "(", "(", "(", "-", "1", ")", ",", ")", ")", "gradient_flat", "=", "gradient", ".", "reshape", "(", "(", "(", "-", "1", ")", ",", ")", ")", "for", "idx", "in", "range", "(", "len", "(", "tensor_flat", ")", ")", ":", "backup", "=", "tensor_flat", "[", "idx", "]", "tensor_flat", "[", "idx", "]", "=", "(", "tensor_flat", "[", "idx", "]", "+", "delta", ")", "f_inc", "=", "np", ".", "sum", "(", "f", "(", "np", ",", "*", "tensors", ")", ")", "tensor_flat", "[", "idx", "]", "=", "(", "backup", "-", "delta", ")", "f_dec", "=", "np", ".", "sum", "(", "f", "(", "np", ",", "*", "tensors", ")", ")", "tensor_flat", "[", "idx", "]", "=", "backup", "gradient_flat", "[", "idx", "]", "=", "(", "(", "f_inc", "-", "f_dec", ")", "/", "(", "2.0", "*", "delta", ")", ")", "return", "gradients" ]
sum all of fs elements to make the last layer error as one .
train
false
11,690
def getabsfile(object, _filename=None): if (_filename is None): _filename = (getsourcefile(object) or getfile(object)) return os.path.normcase(os.path.abspath(_filename))
[ "def", "getabsfile", "(", "object", ",", "_filename", "=", "None", ")", ":", "if", "(", "_filename", "is", "None", ")", ":", "_filename", "=", "(", "getsourcefile", "(", "object", ")", "or", "getfile", "(", "object", ")", ")", "return", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "abspath", "(", "_filename", ")", ")" ]
return an absolute path to the source or compiled file for an object .
train
true
11,691
def red(text, attrib=None): return colorize(text, 'red', attrib)
[ "def", "red", "(", "text", ",", "attrib", "=", "None", ")", ":", "return", "colorize", "(", "text", ",", "'red'", ",", "attrib", ")" ]
wrapper for colorize .
train
false
11,692
def enumerate_states(*args, **options): state = args[0] if (not ((len(args) == 2) or (len(args) == 3))): raise NotImplementedError('Wrong number of arguments!') if (not isinstance(state, StateBase)): raise TypeError('First argument is not a state!') if (len(args) == 3): num_states = args[2] options['start_index'] = args[1] else: num_states = len(args[1]) options['index_list'] = args[1] try: ret = state._enumerate_state(num_states, **options) except NotImplementedError: ret = [] return ret
[ "def", "enumerate_states", "(", "*", "args", ",", "**", "options", ")", ":", "state", "=", "args", "[", "0", "]", "if", "(", "not", "(", "(", "len", "(", "args", ")", "==", "2", ")", "or", "(", "len", "(", "args", ")", "==", "3", ")", ")", ")", ":", "raise", "NotImplementedError", "(", "'Wrong number of arguments!'", ")", "if", "(", "not", "isinstance", "(", "state", ",", "StateBase", ")", ")", ":", "raise", "TypeError", "(", "'First argument is not a state!'", ")", "if", "(", "len", "(", "args", ")", "==", "3", ")", ":", "num_states", "=", "args", "[", "2", "]", "options", "[", "'start_index'", "]", "=", "args", "[", "1", "]", "else", ":", "num_states", "=", "len", "(", "args", "[", "1", "]", ")", "options", "[", "'index_list'", "]", "=", "args", "[", "1", "]", "try", ":", "ret", "=", "state", ".", "_enumerate_state", "(", "num_states", ",", "**", "options", ")", "except", "NotImplementedError", ":", "ret", "=", "[", "]", "return", "ret" ]
returns instances of the given state with dummy indices appended operates in two different modes: 1 .
train
false
11,693
def glob_to_re(pattern): pattern_re = fnmatch.translate(pattern) sep = os.sep if (os.sep == '\\'): sep = '\\\\\\\\' escaped = ('\\1[^%s]' % sep) pattern_re = re.sub('((?<!\\\\)(\\\\\\\\)*)\\.', escaped, pattern_re) return pattern_re
[ "def", "glob_to_re", "(", "pattern", ")", ":", "pattern_re", "=", "fnmatch", ".", "translate", "(", "pattern", ")", "sep", "=", "os", ".", "sep", "if", "(", "os", ".", "sep", "==", "'\\\\'", ")", ":", "sep", "=", "'\\\\\\\\\\\\\\\\'", "escaped", "=", "(", "'\\\\1[^%s]'", "%", "sep", ")", "pattern_re", "=", "re", ".", "sub", "(", "'((?<!\\\\\\\\)(\\\\\\\\\\\\\\\\)*)\\\\.'", ",", "escaped", ",", "pattern_re", ")", "return", "pattern_re" ]
translate a shell-like glob pattern to a regular expression; return a string containing the regex .
train
false
11,694
def _truncate_bitmap(what): for i in xrange((len(what) - 1), (-1), (-1)): if (what[i] != '\x00'): break return ''.join(what[0:(i + 1)])
[ "def", "_truncate_bitmap", "(", "what", ")", ":", "for", "i", "in", "xrange", "(", "(", "len", "(", "what", ")", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "if", "(", "what", "[", "i", "]", "!=", "'\\x00'", ")", ":", "break", "return", "''", ".", "join", "(", "what", "[", "0", ":", "(", "i", "+", "1", ")", "]", ")" ]
determine the index of greatest byte that isnt all zeros .
train
true
11,696
def importElementTree(module_names=None): if (module_names is None): module_names = elementtree_modules for mod_name in module_names: try: ElementTree = __import__(mod_name, None, None, ['unused']) except ImportError: pass else: try: ElementTree.XML('<unused/>') except (SystemExit, MemoryError, AssertionError): raise except: why = sys.exc_info()[1] log(('Not using ElementTree library %r because it failed to parse a trivial document: %s' % (mod_name, why))) else: return ElementTree else: raise ImportError(('No ElementTree library found. You may need to install one. Tried importing %r' % (module_names,)))
[ "def", "importElementTree", "(", "module_names", "=", "None", ")", ":", "if", "(", "module_names", "is", "None", ")", ":", "module_names", "=", "elementtree_modules", "for", "mod_name", "in", "module_names", ":", "try", ":", "ElementTree", "=", "__import__", "(", "mod_name", ",", "None", ",", "None", ",", "[", "'unused'", "]", ")", "except", "ImportError", ":", "pass", "else", ":", "try", ":", "ElementTree", ".", "XML", "(", "'<unused/>'", ")", "except", "(", "SystemExit", ",", "MemoryError", ",", "AssertionError", ")", ":", "raise", "except", ":", "why", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "log", "(", "(", "'Not using ElementTree library %r because it failed to parse a trivial document: %s'", "%", "(", "mod_name", ",", "why", ")", ")", ")", "else", ":", "return", "ElementTree", "else", ":", "raise", "ImportError", "(", "(", "'No ElementTree library found. You may need to install one. Tried importing %r'", "%", "(", "module_names", ",", ")", ")", ")" ]
find a working elementtree implementation .
train
false
11,697
def filter_method(item, settings=None): all_match = True default_filters = {} filters = {} if hasattr(settings, 'filters'): filters.update(default_filters) filters.update(settings.filters.__dict__) for (field, value) in filters.items(): try: res = attrgetter(field)(item) except: res = None if (res != value): all_match = False break return all_match
[ "def", "filter_method", "(", "item", ",", "settings", "=", "None", ")", ":", "all_match", "=", "True", "default_filters", "=", "{", "}", "filters", "=", "{", "}", "if", "hasattr", "(", "settings", ",", "'filters'", ")", ":", "filters", ".", "update", "(", "default_filters", ")", "filters", ".", "update", "(", "settings", ".", "filters", ".", "__dict__", ")", "for", "(", "field", ",", "value", ")", "in", "filters", ".", "items", "(", ")", ":", "try", ":", "res", "=", "attrgetter", "(", "field", ")", "(", "item", ")", "except", ":", "res", "=", "None", "if", "(", "res", "!=", "value", ")", ":", "all_match", "=", "False", "break", "return", "all_match" ]
returns true if all the filters in the given settings evaluate to true .
train
false
11,698
def db_ensure_indexes(): LOG.debug('Ensuring database indexes...') model_classes = get_model_classes() for model_class in model_classes: class_name = model_class.__name__ LOG.debug(('Ensuring indexes for model "%s"...' % model_class.__name__)) model_class.ensure_indexes() if (model_class.__name__ in INDEX_CLEANUP_MODEL_NAMES_BLACKLIST): LOG.debug(('Skipping index cleanup for blacklisted model "%s"...' % class_name)) continue LOG.debug(('Removing extra indexes for model "%s"...' % class_name)) removed_count = cleanup_extra_indexes(model_class=model_class) LOG.debug(('Removed "%s" extra indexes for model "%s"' % (removed_count, class_name)))
[ "def", "db_ensure_indexes", "(", ")", ":", "LOG", ".", "debug", "(", "'Ensuring database indexes...'", ")", "model_classes", "=", "get_model_classes", "(", ")", "for", "model_class", "in", "model_classes", ":", "class_name", "=", "model_class", ".", "__name__", "LOG", ".", "debug", "(", "(", "'Ensuring indexes for model \"%s\"...'", "%", "model_class", ".", "__name__", ")", ")", "model_class", ".", "ensure_indexes", "(", ")", "if", "(", "model_class", ".", "__name__", "in", "INDEX_CLEANUP_MODEL_NAMES_BLACKLIST", ")", ":", "LOG", ".", "debug", "(", "(", "'Skipping index cleanup for blacklisted model \"%s\"...'", "%", "class_name", ")", ")", "continue", "LOG", ".", "debug", "(", "(", "'Removing extra indexes for model \"%s\"...'", "%", "class_name", ")", ")", "removed_count", "=", "cleanup_extra_indexes", "(", "model_class", "=", "model_class", ")", "LOG", ".", "debug", "(", "(", "'Removed \"%s\" extra indexes for model \"%s\"'", "%", "(", "removed_count", ",", "class_name", ")", ")", ")" ]
this function ensures that indexes for all the models have been created and the extra indexes cleaned up .
train
false
11,699
def Name(name, prefix=None): return Leaf(token.NAME, name, prefix=prefix)
[ "def", "Name", "(", "name", ",", "prefix", "=", "None", ")", ":", "return", "Leaf", "(", "token", ".", "NAME", ",", "name", ",", "prefix", "=", "prefix", ")" ]
return a name leaf .
train
false
11,700
def bias_weights(length, param_list=None, name=''): bias_initialization = numpy.zeros(length).astype(theano.config.floatX) bias = theano.shared(bias_initialization, name=name) if (param_list is not None): param_list.append(bias) return bias
[ "def", "bias_weights", "(", "length", ",", "param_list", "=", "None", ",", "name", "=", "''", ")", ":", "bias_initialization", "=", "numpy", ".", "zeros", "(", "length", ")", ".", "astype", "(", "theano", ".", "config", ".", "floatX", ")", "bias", "=", "theano", ".", "shared", "(", "bias_initialization", ",", "name", "=", "name", ")", "if", "(", "param_list", "is", "not", "None", ")", ":", "param_list", ".", "append", "(", "bias", ")", "return", "bias" ]
theano shared variable for bias unit .
train
false
11,701
def guess_srv_port(port1, port2, proto='tcp'): if (not _PORTS_POPULATED): _set_ports() ports = _PORTS.get(proto, {}) cmpval = cmp(ports.get(port1, 0), ports.get(port2, 0)) if (cmpval == 0): return cmp(port2, port1) return cmpval
[ "def", "guess_srv_port", "(", "port1", ",", "port2", ",", "proto", "=", "'tcp'", ")", ":", "if", "(", "not", "_PORTS_POPULATED", ")", ":", "_set_ports", "(", ")", "ports", "=", "_PORTS", ".", "get", "(", "proto", ",", "{", "}", ")", "cmpval", "=", "cmp", "(", "ports", ".", "get", "(", "port1", ",", "0", ")", ",", "ports", ".", "get", "(", "port2", ",", "0", ")", ")", "if", "(", "cmpval", "==", "0", ")", ":", "return", "cmp", "(", "port2", ",", "port1", ")", "return", "cmpval" ]
returns 1 when port1 is probably the server port .
train
false
11,702
def _clean_data(api_response): data = {} data.update(api_response['DATA']) if (not data): response_data = api_response['DATA'] data.update(response_data) return data
[ "def", "_clean_data", "(", "api_response", ")", ":", "data", "=", "{", "}", "data", ".", "update", "(", "api_response", "[", "'DATA'", "]", ")", "if", "(", "not", "data", ")", ":", "response_data", "=", "api_response", "[", "'DATA'", "]", "data", ".", "update", "(", "response_data", ")", "return", "data" ]
returns the data response from a linode api query as a single pre-formatted dictionary api_response the query to be cleaned .
train
true
11,704
def download(url=u'', method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False): return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
[ "def", "download", "(", "url", "=", "u''", ",", "method", "=", "GET", ",", "query", "=", "{", "}", ",", "timeout", "=", "10", ",", "cached", "=", "True", ",", "throttle", "=", "0", ",", "proxy", "=", "None", ",", "user_agent", "=", "USER_AGENT", ",", "referrer", "=", "REFERRER", ",", "authentication", "=", "None", ",", "unicode", "=", "False", ")", ":", "return", "URL", "(", "url", ",", "method", ",", "query", ")", ".", "download", "(", "timeout", ",", "cached", ",", "throttle", ",", "proxy", ",", "user_agent", ",", "referrer", ",", "authentication", ",", "unicode", ")" ]
send client contents of zipfile *zip_basename*-<timestamp> .
train
false
11,705
def _find_key(prefix, pip_list): try: match = next(iter((x for x in pip_list if (x.lower() == prefix.lower())))) except StopIteration: return None else: return match
[ "def", "_find_key", "(", "prefix", ",", "pip_list", ")", ":", "try", ":", "match", "=", "next", "(", "iter", "(", "(", "x", "for", "x", "in", "pip_list", "if", "(", "x", ".", "lower", "(", ")", "==", "prefix", ".", "lower", "(", ")", ")", ")", ")", ")", "except", "StopIteration", ":", "return", "None", "else", ":", "return", "match" ]
does a case-insensitive match in the pip_list for the desired package .
train
false
11,706
def cs_graph_components(x): try: shape = x.shape except AttributeError: raise ValueError(_msg0) if (not ((len(x.shape) == 2) and (x.shape[0] == x.shape[1]))): raise ValueError((_msg1 % x.shape)) if isspmatrix(x): x = x.tocsr() else: x = csr_matrix(x) label = np.empty((shape[0],), dtype=x.indptr.dtype) n_comp = _cs_graph_components(shape[0], x.indptr, x.indices, label) return (n_comp, label)
[ "def", "cs_graph_components", "(", "x", ")", ":", "try", ":", "shape", "=", "x", ".", "shape", "except", "AttributeError", ":", "raise", "ValueError", "(", "_msg0", ")", "if", "(", "not", "(", "(", "len", "(", "x", ".", "shape", ")", "==", "2", ")", "and", "(", "x", ".", "shape", "[", "0", "]", "==", "x", ".", "shape", "[", "1", "]", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "_msg1", "%", "x", ".", "shape", ")", ")", "if", "isspmatrix", "(", "x", ")", ":", "x", "=", "x", ".", "tocsr", "(", ")", "else", ":", "x", "=", "csr_matrix", "(", "x", ")", "label", "=", "np", ".", "empty", "(", "(", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "x", ".", "indptr", ".", "dtype", ")", "n_comp", "=", "_cs_graph_components", "(", "shape", "[", "0", "]", ",", "x", ".", "indptr", ",", "x", ".", "indices", ",", "label", ")", "return", "(", "n_comp", ",", "label", ")" ]
determine connected components of a graph stored as a compressed sparse row or column matrix .
train
false
11,707
def get_i18n(factory=I18n, key=_i18n_registry_key, request=None): request = (request or webapp2.get_request()) i18n = request.registry.get(key) if (not i18n): i18n = request.registry[key] = factory(request) return i18n
[ "def", "get_i18n", "(", "factory", "=", "I18n", ",", "key", "=", "_i18n_registry_key", ",", "request", "=", "None", ")", ":", "request", "=", "(", "request", "or", "webapp2", ".", "get_request", "(", ")", ")", "i18n", "=", "request", ".", "registry", ".", "get", "(", "key", ")", "if", "(", "not", "i18n", ")", ":", "i18n", "=", "request", ".", "registry", "[", "key", "]", "=", "factory", "(", "request", ")", "return", "i18n" ]
returns an instance of :class:i18n from the request registry .
train
false
11,708
def oracle_old_passwd(password, username, uppercase=True): (IV, pad) = (('\x00' * 8), '\x00') if isinstance(username, unicode): username = unicode.encode(username, UNICODE_ENCODING) if isinstance(password, unicode): password = unicode.encode(password, UNICODE_ENCODING) unistr = ''.join((('\x00%s' % c) for c in (username + password).upper())) cipher = des(hexdecode('0123456789ABCDEF'), CBC, IV, pad) encrypted = cipher.encrypt(unistr) cipher = des(encrypted[(-8):], CBC, IV, pad) encrypted = cipher.encrypt(unistr) retVal = hexencode(encrypted[(-8):]) return (retVal.upper() if uppercase else retVal.lower())
[ "def", "oracle_old_passwd", "(", "password", ",", "username", ",", "uppercase", "=", "True", ")", ":", "(", "IV", ",", "pad", ")", "=", "(", "(", "'\\x00'", "*", "8", ")", ",", "'\\x00'", ")", "if", "isinstance", "(", "username", ",", "unicode", ")", ":", "username", "=", "unicode", ".", "encode", "(", "username", ",", "UNICODE_ENCODING", ")", "if", "isinstance", "(", "password", ",", "unicode", ")", ":", "password", "=", "unicode", ".", "encode", "(", "password", ",", "UNICODE_ENCODING", ")", "unistr", "=", "''", ".", "join", "(", "(", "(", "'\\x00%s'", "%", "c", ")", "for", "c", "in", "(", "username", "+", "password", ")", ".", "upper", "(", ")", ")", ")", "cipher", "=", "des", "(", "hexdecode", "(", "'0123456789ABCDEF'", ")", ",", "CBC", ",", "IV", ",", "pad", ")", "encrypted", "=", "cipher", ".", "encrypt", "(", "unistr", ")", "cipher", "=", "des", "(", "encrypted", "[", "(", "-", "8", ")", ":", "]", ",", "CBC", ",", "IV", ",", "pad", ")", "encrypted", "=", "cipher", ".", "encrypt", "(", "unistr", ")", "retVal", "=", "hexencode", "(", "encrypted", "[", "(", "-", "8", ")", ":", "]", ")", "return", "(", "retVal", ".", "upper", "(", ")", "if", "uppercase", "else", "retVal", ".", "lower", "(", ")", ")" ]
reference(s): URL .
train
false
11,710
def _gcm_handle_canonical_id(canonical_id, current_id, cloud_type): if GCMDevice.objects.filter(registration_id=canonical_id, cloud_message_type=cloud_type, active=True).exists(): GCMDevice.objects.filter(registration_id=current_id, cloud_message_type=cloud_type).update(active=False) else: GCMDevice.objects.filter(registration_id=current_id, cloud_message_type=cloud_type).update(registration_id=canonical_id)
[ "def", "_gcm_handle_canonical_id", "(", "canonical_id", ",", "current_id", ",", "cloud_type", ")", ":", "if", "GCMDevice", ".", "objects", ".", "filter", "(", "registration_id", "=", "canonical_id", ",", "cloud_message_type", "=", "cloud_type", ",", "active", "=", "True", ")", ".", "exists", "(", ")", ":", "GCMDevice", ".", "objects", ".", "filter", "(", "registration_id", "=", "current_id", ",", "cloud_message_type", "=", "cloud_type", ")", ".", "update", "(", "active", "=", "False", ")", "else", ":", "GCMDevice", ".", "objects", ".", "filter", "(", "registration_id", "=", "current_id", ",", "cloud_message_type", "=", "cloud_type", ")", ".", "update", "(", "registration_id", "=", "canonical_id", ")" ]
handle situation when gcm server response contains canonical id .
train
false
11,713
def parse_path_metadata(source_path, settings=None, process=None): metadata = {} (dirname, basename) = os.path.split(source_path) (base, ext) = os.path.splitext(basename) subdir = os.path.basename(dirname) if settings: checks = [] for (key, data) in [(u'FILENAME_METADATA', base), (u'PATH_METADATA', source_path)]: checks.append((settings.get(key, None), data)) if settings.get(u'USE_FOLDER_AS_CATEGORY', None): checks.append((u'(?P<category>.*)', subdir)) for (regexp, data) in checks: if (regexp and data): match = re.match(regexp, data) if match: for (k, v) in match.groupdict().items(): k = k.lower() if (k not in metadata): if process: v = process(k, v) metadata[k] = v return metadata
[ "def", "parse_path_metadata", "(", "source_path", ",", "settings", "=", "None", ",", "process", "=", "None", ")", ":", "metadata", "=", "{", "}", "(", "dirname", ",", "basename", ")", "=", "os", ".", "path", ".", "split", "(", "source_path", ")", "(", "base", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "subdir", "=", "os", ".", "path", ".", "basename", "(", "dirname", ")", "if", "settings", ":", "checks", "=", "[", "]", "for", "(", "key", ",", "data", ")", "in", "[", "(", "u'FILENAME_METADATA'", ",", "base", ")", ",", "(", "u'PATH_METADATA'", ",", "source_path", ")", "]", ":", "checks", ".", "append", "(", "(", "settings", ".", "get", "(", "key", ",", "None", ")", ",", "data", ")", ")", "if", "settings", ".", "get", "(", "u'USE_FOLDER_AS_CATEGORY'", ",", "None", ")", ":", "checks", ".", "append", "(", "(", "u'(?P<category>.*)'", ",", "subdir", ")", ")", "for", "(", "regexp", ",", "data", ")", "in", "checks", ":", "if", "(", "regexp", "and", "data", ")", ":", "match", "=", "re", ".", "match", "(", "regexp", ",", "data", ")", "if", "match", ":", "for", "(", "k", ",", "v", ")", "in", "match", ".", "groupdict", "(", ")", ".", "items", "(", ")", ":", "k", "=", "k", ".", "lower", "(", ")", "if", "(", "k", "not", "in", "metadata", ")", ":", "if", "process", ":", "v", "=", "process", "(", "k", ",", "v", ")", "metadata", "[", "k", "]", "=", "v", "return", "metadata" ]
extract a metadata dictionary from a files path .
train
false
11,714
def _check_hour_range(hrs): if np.any((np.abs(hrs) == 24.0)): warn(IllegalHourWarning(hrs, u'Treating as 24 hr')) elif (np.any((hrs < (-24.0))) or np.any((hrs > 24.0))): raise IllegalHourError(hrs)
[ "def", "_check_hour_range", "(", "hrs", ")", ":", "if", "np", ".", "any", "(", "(", "np", ".", "abs", "(", "hrs", ")", "==", "24.0", ")", ")", ":", "warn", "(", "IllegalHourWarning", "(", "hrs", ",", "u'Treating as 24 hr'", ")", ")", "elif", "(", "np", ".", "any", "(", "(", "hrs", "<", "(", "-", "24.0", ")", ")", ")", "or", "np", ".", "any", "(", "(", "hrs", ">", "24.0", ")", ")", ")", ":", "raise", "IllegalHourError", "(", "hrs", ")" ]
checks that the given value is in the range .
train
false
11,715
def test_entity_ids(): schema = vol.Schema(cv.entity_ids) options = ('invalid_entity', 'sensor.light,sensor_invalid', ['invalid_entity'], ['sensor.light', 'sensor_invalid'], ['sensor.light,sensor_invalid']) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ([], ['sensor.light'], 'sensor.light') for value in options: schema(value) assert (schema('sensor.LIGHT, light.kitchen ') == ['sensor.light', 'light.kitchen'])
[ "def", "test_entity_ids", "(", ")", ":", "schema", "=", "vol", ".", "Schema", "(", "cv", ".", "entity_ids", ")", "options", "=", "(", "'invalid_entity'", ",", "'sensor.light,sensor_invalid'", ",", "[", "'invalid_entity'", "]", ",", "[", "'sensor.light'", ",", "'sensor_invalid'", "]", ",", "[", "'sensor.light,sensor_invalid'", "]", ")", "for", "value", "in", "options", ":", "with", "pytest", ".", "raises", "(", "vol", ".", "MultipleInvalid", ")", ":", "schema", "(", "value", ")", "options", "=", "(", "[", "]", ",", "[", "'sensor.light'", "]", ",", "'sensor.light'", ")", "for", "value", "in", "options", ":", "schema", "(", "value", ")", "assert", "(", "schema", "(", "'sensor.LIGHT, light.kitchen '", ")", "==", "[", "'sensor.light'", ",", "'light.kitchen'", "]", ")" ]
test entity id validation .
train
false
11,716
@dispatch(Slice, h5py.Dataset) def pre_compute(expr, data, scope=None, **kwargs): return data
[ "@", "dispatch", "(", "Slice", ",", "h5py", ".", "Dataset", ")", "def", "pre_compute", "(", "expr", ",", "data", ",", "scope", "=", "None", ",", "**", "kwargs", ")", ":", "return", "data" ]
dont push slices into memory .
train
false
11,717
def check_untested(funcdict, c_cls, p_cls): c_attr = set(dir(c_cls)) p_attr = set(dir(p_cls)) intersect = (c_attr & p_attr) funcdict['c_only'] = tuple(sorted((c_attr - intersect))) funcdict['p_only'] = tuple(sorted((p_attr - intersect))) tested = set() for lst in funcdict.values(): for v in lst: v = (v.replace('context.', '') if (c_cls == C.Context) else v) tested.add(v) funcdict['untested'] = tuple(sorted((intersect - tested)))
[ "def", "check_untested", "(", "funcdict", ",", "c_cls", ",", "p_cls", ")", ":", "c_attr", "=", "set", "(", "dir", "(", "c_cls", ")", ")", "p_attr", "=", "set", "(", "dir", "(", "p_cls", ")", ")", "intersect", "=", "(", "c_attr", "&", "p_attr", ")", "funcdict", "[", "'c_only'", "]", "=", "tuple", "(", "sorted", "(", "(", "c_attr", "-", "intersect", ")", ")", ")", "funcdict", "[", "'p_only'", "]", "=", "tuple", "(", "sorted", "(", "(", "p_attr", "-", "intersect", ")", ")", ")", "tested", "=", "set", "(", ")", "for", "lst", "in", "funcdict", ".", "values", "(", ")", ":", "for", "v", "in", "lst", ":", "v", "=", "(", "v", ".", "replace", "(", "'context.'", ",", "''", ")", "if", "(", "c_cls", "==", "C", ".", "Context", ")", "else", "v", ")", "tested", ".", "add", "(", "v", ")", "funcdict", "[", "'untested'", "]", "=", "tuple", "(", "sorted", "(", "(", "intersect", "-", "tested", ")", ")", ")" ]
determine untested .
train
false
11,719
def format_time_diff(start=None, end=None): if (end is None): end = datetime.datetime.now() diff = (end - start) (minutes, seconds) = divmod(diff.seconds, 60) (hours, minutes) = divmod(minutes, 60) days = diff.days output = [] written = False if days: written = True output.append(('%dd' % days)) if (written or hours): written = True output.append(('%dh' % hours)) if (written or minutes): output.append(('%dm' % minutes)) output.append(('%ds' % seconds)) return ':'.join(output)
[ "def", "format_time_diff", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "(", "end", "is", "None", ")", ":", "end", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "diff", "=", "(", "end", "-", "start", ")", "(", "minutes", ",", "seconds", ")", "=", "divmod", "(", "diff", ".", "seconds", ",", "60", ")", "(", "hours", ",", "minutes", ")", "=", "divmod", "(", "minutes", ",", "60", ")", "days", "=", "diff", ".", "days", "output", "=", "[", "]", "written", "=", "False", "if", "days", ":", "written", "=", "True", "output", ".", "append", "(", "(", "'%dd'", "%", "days", ")", ")", "if", "(", "written", "or", "hours", ")", ":", "written", "=", "True", "output", ".", "append", "(", "(", "'%dh'", "%", "hours", ")", ")", "if", "(", "written", "or", "minutes", ")", ":", "output", ".", "append", "(", "(", "'%dm'", "%", "minutes", ")", ")", "output", ".", "append", "(", "(", "'%ds'", "%", "seconds", ")", ")", "return", "':'", ".", "join", "(", "output", ")" ]
formats the difference between two times as xd:xh:xm:xs .
train
false
11,720
def must_generate_value(name, type, ignored_template_inputs, spec_info, spec, ignore_template_numbers): if (ignore_template_numbers and (type == u'Number')): return False if (spec.xor and (spec.xor[0] != name)): return False if (u'an existing directory name' in spec_info): return False if ((u'a list' in spec_info) or (u'a tuple' in spec_info)): return False if (u'a dictionary' in spec_info): return False if (u"' or '" in spec_info): return False if (not ignored_template_inputs): return True return (not (name in ignored_template_inputs))
[ "def", "must_generate_value", "(", "name", ",", "type", ",", "ignored_template_inputs", ",", "spec_info", ",", "spec", ",", "ignore_template_numbers", ")", ":", "if", "(", "ignore_template_numbers", "and", "(", "type", "==", "u'Number'", ")", ")", ":", "return", "False", "if", "(", "spec", ".", "xor", "and", "(", "spec", ".", "xor", "[", "0", "]", "!=", "name", ")", ")", ":", "return", "False", "if", "(", "u'an existing directory name'", "in", "spec_info", ")", ":", "return", "False", "if", "(", "(", "u'a list'", "in", "spec_info", ")", "or", "(", "u'a tuple'", "in", "spec_info", ")", ")", ":", "return", "False", "if", "(", "u'a dictionary'", "in", "spec_info", ")", ":", "return", "False", "if", "(", "u\"' or '\"", "in", "spec_info", ")", ":", "return", "False", "if", "(", "not", "ignored_template_inputs", ")", ":", "return", "True", "return", "(", "not", "(", "name", "in", "ignored_template_inputs", ")", ")" ]
return true if a temporary value must be generated for this input .
train
false
11,721
def resource_defaults_to(name, default, value, extra_args=None, cibname=None): return _item_present(name=name, item='resource', item_id='{0}={1}'.format(default, value), item_type=None, show='defaults', create='defaults', extra_args=extra_args, cibname=cibname)
[ "def", "resource_defaults_to", "(", "name", ",", "default", ",", "value", ",", "extra_args", "=", "None", ",", "cibname", "=", "None", ")", ":", "return", "_item_present", "(", "name", "=", "name", ",", "item", "=", "'resource'", ",", "item_id", "=", "'{0}={1}'", ".", "format", "(", "default", ",", "value", ")", ",", "item_type", "=", "None", ",", "show", "=", "'defaults'", ",", "create", "=", "'defaults'", ",", "extra_args", "=", "extra_args", ",", "cibname", "=", "cibname", ")" ]
ensure a resource default in the cluster is set to a given value should be run on one cluster node only can only be run on a node with a functional pacemaker/corosync name irrelevant .
train
true
11,722
def freqd_zpk(z, p, k, worN=None, whole=False): (z, p) = map(atleast_1d, (z, p)) if whole: lastpoint = (2 * pi) else: lastpoint = pi if (worN is None): N = 512 w = numpy.linspace(0, lastpoint, N, endpoint=False) elif isinstance(worN, int): N = worN w = numpy.linspace(0, lastpoint, N, endpoint=False) else: w = worN w = atleast_1d(w) zm1 = exp((1j * w)) h = ((k * polyvalfromroots(zm1, z)) / polyvalfromroots(zm1, p)) return (w, h)
[ "def", "freqd_zpk", "(", "z", ",", "p", ",", "k", ",", "worN", "=", "None", ",", "whole", "=", "False", ")", ":", "(", "z", ",", "p", ")", "=", "map", "(", "atleast_1d", ",", "(", "z", ",", "p", ")", ")", "if", "whole", ":", "lastpoint", "=", "(", "2", "*", "pi", ")", "else", ":", "lastpoint", "=", "pi", "if", "(", "worN", "is", "None", ")", ":", "N", "=", "512", "w", "=", "numpy", ".", "linspace", "(", "0", ",", "lastpoint", ",", "N", ",", "endpoint", "=", "False", ")", "elif", "isinstance", "(", "worN", ",", "int", ")", ":", "N", "=", "worN", "w", "=", "numpy", ".", "linspace", "(", "0", ",", "lastpoint", ",", "N", ",", "endpoint", "=", "False", ")", "else", ":", "w", "=", "worN", "w", "=", "atleast_1d", "(", "w", ")", "zm1", "=", "exp", "(", "(", "1j", "*", "w", ")", ")", "h", "=", "(", "(", "k", "*", "polyvalfromroots", "(", "zm1", ",", "z", ")", ")", "/", "polyvalfromroots", "(", "zm1", ",", "p", ")", ")", "return", "(", "w", ",", "h", ")" ]
compute the frequency response of a digital filter in zpk form .
train
false
11,724
def quantile(iterable, p=0.5, sort=True, a=1, b=(-1), c=0, d=1): s = (sorted(iterable) if (sort is True) else list(iterable)) n = len(s) (f, i) = modf(((a + ((b + n) * p)) - 1)) if (n == 0): raise ValueError('quantile() arg is an empty sequence') if (f == 0): return float(s[int(i)]) if (i < 0): return float(s[int(i)]) if (i >= n): return float(s[(-1)]) i = int(floor(i)) return (s[i] + ((s[(i + 1)] - s[i]) * (c + (d * f))))
[ "def", "quantile", "(", "iterable", ",", "p", "=", "0.5", ",", "sort", "=", "True", ",", "a", "=", "1", ",", "b", "=", "(", "-", "1", ")", ",", "c", "=", "0", ",", "d", "=", "1", ")", ":", "s", "=", "(", "sorted", "(", "iterable", ")", "if", "(", "sort", "is", "True", ")", "else", "list", "(", "iterable", ")", ")", "n", "=", "len", "(", "s", ")", "(", "f", ",", "i", ")", "=", "modf", "(", "(", "(", "a", "+", "(", "(", "b", "+", "n", ")", "*", "p", ")", ")", "-", "1", ")", ")", "if", "(", "n", "==", "0", ")", ":", "raise", "ValueError", "(", "'quantile() arg is an empty sequence'", ")", "if", "(", "f", "==", "0", ")", ":", "return", "float", "(", "s", "[", "int", "(", "i", ")", "]", ")", "if", "(", "i", "<", "0", ")", ":", "return", "float", "(", "s", "[", "int", "(", "i", ")", "]", ")", "if", "(", "i", ">=", "n", ")", ":", "return", "float", "(", "s", "[", "(", "-", "1", ")", "]", ")", "i", "=", "int", "(", "floor", "(", "i", ")", ")", "return", "(", "s", "[", "i", "]", "+", "(", "(", "s", "[", "(", "i", "+", "1", ")", "]", "-", "s", "[", "i", "]", ")", "*", "(", "c", "+", "(", "d", "*", "f", ")", ")", ")", ")" ]
returns the pth-percentile value in x .
train
false
11,725
def release_ownership_of_exploration(committer_id, exploration_id): _release_ownership_of_activity(committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
[ "def", "release_ownership_of_exploration", "(", "committer_id", ",", "exploration_id", ")", ":", "_release_ownership_of_activity", "(", "committer_id", ",", "exploration_id", ",", "feconf", ".", "ACTIVITY_TYPE_EXPLORATION", ")" ]
releases ownership of an exploration to the community .
train
false
11,728
def renderer_doc(*args): renderers_ = salt.loader.render(__opts__, []) docs = {} if (not args): for func in six.iterkeys(renderers_): docs[func] = renderers_[func].__doc__ return _strip_rst(docs) for module in args: if (('*' in module) or ('.' in module)): for func in fnmatch.filter(renderers_, module): docs[func] = renderers_[func].__doc__ else: moduledot = (module + '.') for func in six.iterkeys(renderers_): if func.startswith(moduledot): docs[func] = renderers_[func].__doc__ return _strip_rst(docs)
[ "def", "renderer_doc", "(", "*", "args", ")", ":", "renderers_", "=", "salt", ".", "loader", ".", "render", "(", "__opts__", ",", "[", "]", ")", "docs", "=", "{", "}", "if", "(", "not", "args", ")", ":", "for", "func", "in", "six", ".", "iterkeys", "(", "renderers_", ")", ":", "docs", "[", "func", "]", "=", "renderers_", "[", "func", "]", ".", "__doc__", "return", "_strip_rst", "(", "docs", ")", "for", "module", "in", "args", ":", "if", "(", "(", "'*'", "in", "module", ")", "or", "(", "'.'", "in", "module", ")", ")", ":", "for", "func", "in", "fnmatch", ".", "filter", "(", "renderers_", ",", "module", ")", ":", "docs", "[", "func", "]", "=", "renderers_", "[", "func", "]", ".", "__doc__", "else", ":", "moduledot", "=", "(", "module", "+", "'.'", ")", "for", "func", "in", "six", ".", "iterkeys", "(", "renderers_", ")", ":", "if", "func", ".", "startswith", "(", "moduledot", ")", ":", "docs", "[", "func", "]", "=", "renderers_", "[", "func", "]", ".", "__doc__", "return", "_strip_rst", "(", "docs", ")" ]
return the docstrings for all renderers .
train
true
11,729
def _emulate(func, *args, **kwargs): with raise_on_meta_error(funcname(func)): return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
[ "def", "_emulate", "(", "func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "with", "raise_on_meta_error", "(", "funcname", "(", "func", ")", ")", ":", "return", "func", "(", "*", "_extract_meta", "(", "args", ",", "True", ")", ",", "**", "_extract_meta", "(", "kwargs", ",", "True", ")", ")" ]
apply a function using args / kwargs .
train
false
11,730
def getAlterationFileLine(fileName): lines = getAlterationLines(fileName) if (len(lines) == 0): return [] return getAlterationFileLineBlindly(fileName)
[ "def", "getAlterationFileLine", "(", "fileName", ")", ":", "lines", "=", "getAlterationLines", "(", "fileName", ")", "if", "(", "len", "(", "lines", ")", "==", "0", ")", ":", "return", "[", "]", "return", "getAlterationFileLineBlindly", "(", "fileName", ")" ]
get the alteration file line from the filename .
train
false
11,731
def MINMAX(ds, count, timeperiod=(- (2 ** 31))): ret = call_talib_with_ds(ds, count, talib.MINMAX, timeperiod) if (ret is None): ret = (None, None) return ret
[ "def", "MINMAX", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ")", ":", "ret", "=", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "MINMAX", ",", "timeperiod", ")", "if", "(", "ret", "is", "None", ")", ":", "ret", "=", "(", "None", ",", "None", ")", "return", "ret" ]
lowest and highest values over a specified period .
train
false
11,732
def denormalize(val): if (val.find('_') != (-1)): val = val.replace('_', '-') return val
[ "def", "denormalize", "(", "val", ")", ":", "if", "(", "val", ".", "find", "(", "'_'", ")", "!=", "(", "-", "1", ")", ")", ":", "val", "=", "val", ".", "replace", "(", "'_'", ",", "'-'", ")", "return", "val" ]
reverse the normalization done by the normalize function .
train
false
11,733
def _format_job_instance(job): ret = {'Function': job.get('fun', 'unknown-function'), 'Arguments': list(job.get('arg', [])), 'Target': job.get('tgt', 'unknown-target'), 'Target-type': job.get('tgt_type', []), 'User': job.get('user', 'root')} if ('metadata' in job): ret['Metadata'] = job.get('metadata', {}) elif ('kwargs' in job): if ('metadata' in job['kwargs']): ret['Metadata'] = job['kwargs'].get('metadata', {}) return ret
[ "def", "_format_job_instance", "(", "job", ")", ":", "ret", "=", "{", "'Function'", ":", "job", ".", "get", "(", "'fun'", ",", "'unknown-function'", ")", ",", "'Arguments'", ":", "list", "(", "job", ".", "get", "(", "'arg'", ",", "[", "]", ")", ")", ",", "'Target'", ":", "job", ".", "get", "(", "'tgt'", ",", "'unknown-target'", ")", ",", "'Target-type'", ":", "job", ".", "get", "(", "'tgt_type'", ",", "[", "]", ")", ",", "'User'", ":", "job", ".", "get", "(", "'user'", ",", "'root'", ")", "}", "if", "(", "'metadata'", "in", "job", ")", ":", "ret", "[", "'Metadata'", "]", "=", "job", ".", "get", "(", "'metadata'", ",", "{", "}", ")", "elif", "(", "'kwargs'", "in", "job", ")", ":", "if", "(", "'metadata'", "in", "job", "[", "'kwargs'", "]", ")", ":", "ret", "[", "'Metadata'", "]", "=", "job", "[", "'kwargs'", "]", ".", "get", "(", "'metadata'", ",", "{", "}", ")", "return", "ret" ]
format the job instance correctly .
train
true
11,735
@snippet def sink_storage(client, to_delete): bucket = _sink_storage_setup(client) to_delete.append(bucket) SINK_NAME = ('robots-storage-%d' % (_millis(),)) FILTER = 'textPayload:robot' DESTINATION = ('storage.googleapis.com/%s' % (bucket.name,)) sink = client.sink(SINK_NAME, filter_=FILTER, destination=DESTINATION) assert (not sink.exists()) sink.create() assert sink.exists() to_delete.insert(0, sink)
[ "@", "snippet", "def", "sink_storage", "(", "client", ",", "to_delete", ")", ":", "bucket", "=", "_sink_storage_setup", "(", "client", ")", "to_delete", ".", "append", "(", "bucket", ")", "SINK_NAME", "=", "(", "'robots-storage-%d'", "%", "(", "_millis", "(", ")", ",", ")", ")", "FILTER", "=", "'textPayload:robot'", "DESTINATION", "=", "(", "'storage.googleapis.com/%s'", "%", "(", "bucket", ".", "name", ",", ")", ")", "sink", "=", "client", ".", "sink", "(", "SINK_NAME", ",", "filter_", "=", "FILTER", ",", "destination", "=", "DESTINATION", ")", "assert", "(", "not", "sink", ".", "exists", "(", ")", ")", "sink", ".", "create", "(", ")", "assert", "sink", ".", "exists", "(", ")", "to_delete", ".", "insert", "(", "0", ",", "sink", ")" ]
sink log entries to storage .
train
true
11,736
@_docstring('work') def get_works_by_iswc(iswc, includes=[]): return _do_mb_query('iswc', iswc, includes)
[ "@", "_docstring", "(", "'work'", ")", "def", "get_works_by_iswc", "(", "iswc", ",", "includes", "=", "[", "]", ")", ":", "return", "_do_mb_query", "(", "'iswc'", ",", "iswc", ",", "includes", ")" ]
search for works with an :musicbrainz:iswc .
train
false
11,739
def process_twitter_outbox(): msg.process_outbox(contact_method='TWITTER')
[ "def", "process_twitter_outbox", "(", ")", ":", "msg", ".", "process_outbox", "(", "contact_method", "=", "'TWITTER'", ")" ]
send pending twitter messages .
train
false
11,740
def scipy2sparse(vec, eps=1e-09): vec = vec.tocsr() assert (vec.shape[0] == 1) return [(int(pos), float(val)) for (pos, val) in zip(vec.indices, vec.data) if (np.abs(val) > eps)]
[ "def", "scipy2sparse", "(", "vec", ",", "eps", "=", "1e-09", ")", ":", "vec", "=", "vec", ".", "tocsr", "(", ")", "assert", "(", "vec", ".", "shape", "[", "0", "]", "==", "1", ")", "return", "[", "(", "int", "(", "pos", ")", ",", "float", "(", "val", ")", ")", "for", "(", "pos", ",", "val", ")", "in", "zip", "(", "vec", ".", "indices", ",", "vec", ".", "data", ")", "if", "(", "np", ".", "abs", "(", "val", ")", ">", "eps", ")", "]" ]
convert a scipy .
train
false
11,741
@event(u'task.execute.started') def load_task(task): if (not SimplePersistence.class_store[task.name]): SimplePersistence.load(task.name)
[ "@", "event", "(", "u'task.execute.started'", ")", "def", "load_task", "(", "task", ")", ":", "if", "(", "not", "SimplePersistence", ".", "class_store", "[", "task", ".", "name", "]", ")", ":", "SimplePersistence", ".", "load", "(", "task", ".", "name", ")" ]
imports task dynamically given a module and a task name .
train
false
11,742
def twips(val, mult=0.05): try: return (float(val) * mult) except (ValueError, TypeError, AttributeError, KeyError): if (val and val.endswith(u'pt') and (mult == 0.05)): return twips(val[:(-2)], mult=1.0)
[ "def", "twips", "(", "val", ",", "mult", "=", "0.05", ")", ":", "try", ":", "return", "(", "float", "(", "val", ")", "*", "mult", ")", "except", "(", "ValueError", ",", "TypeError", ",", "AttributeError", ",", "KeyError", ")", ":", "if", "(", "val", "and", "val", ".", "endswith", "(", "u'pt'", ")", "and", "(", "mult", "==", "0.05", ")", ")", ":", "return", "twips", "(", "val", "[", ":", "(", "-", "2", ")", "]", ",", "mult", "=", "1.0", ")" ]
parse val as either a pure number representing twentieths of a point or a number followed by the suffix pt .
train
false
11,743
def testRemoteSwitches(remote='ubuntu2', link=RemoteGRELink): servers = ['localhost', remote] topo = TreeTopo(depth=4, fanout=2) net = MininetCluster(topo=topo, servers=servers, link=link, placement=RoundRobinPlacer) net.start() net.pingAll() net.stop()
[ "def", "testRemoteSwitches", "(", "remote", "=", "'ubuntu2'", ",", "link", "=", "RemoteGRELink", ")", ":", "servers", "=", "[", "'localhost'", ",", "remote", "]", "topo", "=", "TreeTopo", "(", "depth", "=", "4", ",", "fanout", "=", "2", ")", "net", "=", "MininetCluster", "(", "topo", "=", "topo", ",", "servers", "=", "servers", ",", "link", "=", "link", ",", "placement", "=", "RoundRobinPlacer", ")", "net", ".", "start", "(", ")", "net", ".", "pingAll", "(", ")", "net", ".", "stop", "(", ")" ]
test with local hosts and remote switches .
train
false
11,745
def list_disabled(): return __salt__['grains.get']('state_runs_disabled')
[ "def", "list_disabled", "(", ")", ":", "return", "__salt__", "[", "'grains.get'", "]", "(", "'state_runs_disabled'", ")" ]
list the states which are currently disabled cli example: .
train
false
11,746
@webob.dec.wsgify def delete_resource_provider(req): uuid = util.wsgi_path_item(req.environ, 'uuid') context = req.environ['placement.context'] try: resource_provider = objects.ResourceProvider.get_by_uuid(context, uuid) resource_provider.destroy() except exception.ResourceProviderInUse as exc: raise webob.exc.HTTPConflict((_('Unable to delete resource provider %(rp_uuid)s: %(error)s') % {'rp_uuid': uuid, 'error': exc}), json_formatter=util.json_error_formatter) except exception.NotFound as exc: raise webob.exc.HTTPNotFound((_('No resource provider with uuid %s found for delete') % uuid)) req.response.status = 204 req.response.content_type = None return req.response
[ "@", "webob", ".", "dec", ".", "wsgify", "def", "delete_resource_provider", "(", "req", ")", ":", "uuid", "=", "util", ".", "wsgi_path_item", "(", "req", ".", "environ", ",", "'uuid'", ")", "context", "=", "req", ".", "environ", "[", "'placement.context'", "]", "try", ":", "resource_provider", "=", "objects", ".", "ResourceProvider", ".", "get_by_uuid", "(", "context", ",", "uuid", ")", "resource_provider", ".", "destroy", "(", ")", "except", "exception", ".", "ResourceProviderInUse", "as", "exc", ":", "raise", "webob", ".", "exc", ".", "HTTPConflict", "(", "(", "_", "(", "'Unable to delete resource provider %(rp_uuid)s: %(error)s'", ")", "%", "{", "'rp_uuid'", ":", "uuid", ",", "'error'", ":", "exc", "}", ")", ",", "json_formatter", "=", "util", ".", "json_error_formatter", ")", "except", "exception", ".", "NotFound", "as", "exc", ":", "raise", "webob", ".", "exc", ".", "HTTPNotFound", "(", "(", "_", "(", "'No resource provider with uuid %s found for delete'", ")", "%", "uuid", ")", ")", "req", ".", "response", ".", "status", "=", "204", "req", ".", "response", ".", "content_type", "=", "None", "return", "req", ".", "response" ]
delete to destroy a single resource provider .
train
false
11,748
def parse_web_listing(url): dirs = [] files = [] r = requests.get(url, timeout=3.05) if (r.status_code != requests.codes.ok): raise Exception(('HTTP Status Code %s' % r.status_code)) for line in r.content.split('\n'): line = line.strip() match = re.match('^.*\\<a.+href\\=[\\\'\\"]([^\\\'\\"]+)[\\\'\\"].*\\>.*(\\w{1,4}-\\w{1,4}-\\w{1,4})', line, flags=re.IGNORECASE) if match: if match.group(1).endswith('/'): dirs.append(match.group(1)) elif match.group(1).lower().endswith(utils.image.SUPPORTED_EXTENSIONS): files.append(match.group(1)) return (dirs, files)
[ "def", "parse_web_listing", "(", "url", ")", ":", "dirs", "=", "[", "]", "files", "=", "[", "]", "r", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "3.05", ")", "if", "(", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ")", ":", "raise", "Exception", "(", "(", "'HTTP Status Code %s'", "%", "r", ".", "status_code", ")", ")", "for", "line", "in", "r", ".", "content", ".", "split", "(", "'\\n'", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "match", "=", "re", ".", "match", "(", "'^.*\\\\<a.+href\\\\=[\\\\\\'\\\\\"]([^\\\\\\'\\\\\"]+)[\\\\\\'\\\\\"].*\\\\>.*(\\\\w{1,4}-\\\\w{1,4}-\\\\w{1,4})'", ",", "line", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "if", "match", ":", "if", "match", ".", "group", "(", "1", ")", ".", "endswith", "(", "'/'", ")", ":", "dirs", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "elif", "match", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ".", "endswith", "(", "utils", ".", "image", ".", "SUPPORTED_EXTENSIONS", ")", ":", "files", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "return", "(", "dirs", ",", "files", ")" ]
utility for parse_folder() parses an autoindexed folder into directories and files returns .
train
false
11,750
@utils.arg('ip_range', metavar='<range>', help=_('Address range to delete.')) @deprecated_network def do_floating_ip_bulk_delete(cs, args): cs.floating_ips_bulk.delete(args.ip_range)
[ "@", "utils", ".", "arg", "(", "'ip_range'", ",", "metavar", "=", "'<range>'", ",", "help", "=", "_", "(", "'Address range to delete.'", ")", ")", "@", "deprecated_network", "def", "do_floating_ip_bulk_delete", "(", "cs", ",", "args", ")", ":", "cs", ".", "floating_ips_bulk", ".", "delete", "(", "args", ".", "ip_range", ")" ]
bulk delete floating ips by range .
train
false
11,751
def unpack_tuple(builder, tup, count=None): if (count is None): count = len(tup.type.elements) vals = [builder.extract_value(tup, i) for i in range(count)] return vals
[ "def", "unpack_tuple", "(", "builder", ",", "tup", ",", "count", "=", "None", ")", ":", "if", "(", "count", "is", "None", ")", ":", "count", "=", "len", "(", "tup", ".", "type", ".", "elements", ")", "vals", "=", "[", "builder", ".", "extract_value", "(", "tup", ",", "i", ")", "for", "i", "in", "range", "(", "count", ")", "]", "return", "vals" ]
unpack an array or structure of values .
train
false
11,753
def user_redirects(user): return Document.objects.filter(revisions__creator=user).filter(html__startswith='<p>REDIRECT <a').distinct()
[ "def", "user_redirects", "(", "user", ")", ":", "return", "Document", ".", "objects", ".", "filter", "(", "revisions__creator", "=", "user", ")", ".", "filter", "(", "html__startswith", "=", "'<p>REDIRECT <a'", ")", ".", "distinct", "(", ")" ]
return the redirects a user has contributed to .
train
false
11,754
def prepare_docstring(s, ignore=1): lines = s.expandtabs().splitlines() margin = sys.maxsize for line in lines[ignore:]: content = len(line.lstrip()) if content: indent = (len(line) - content) margin = min(margin, indent) for i in range(ignore): if (i < len(lines)): lines[i] = lines[i].lstrip() if (margin < sys.maxsize): for i in range(ignore, len(lines)): lines[i] = lines[i][margin:] while (lines and (not lines[0])): lines.pop(0) if (lines and lines[(-1)]): lines.append('') return lines
[ "def", "prepare_docstring", "(", "s", ",", "ignore", "=", "1", ")", ":", "lines", "=", "s", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "margin", "=", "sys", ".", "maxsize", "for", "line", "in", "lines", "[", "ignore", ":", "]", ":", "content", "=", "len", "(", "line", ".", "lstrip", "(", ")", ")", "if", "content", ":", "indent", "=", "(", "len", "(", "line", ")", "-", "content", ")", "margin", "=", "min", "(", "margin", ",", "indent", ")", "for", "i", "in", "range", "(", "ignore", ")", ":", "if", "(", "i", "<", "len", "(", "lines", ")", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", ".", "lstrip", "(", ")", "if", "(", "margin", "<", "sys", ".", "maxsize", ")", ":", "for", "i", "in", "range", "(", "ignore", ",", "len", "(", "lines", ")", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", "[", "margin", ":", "]", "while", "(", "lines", "and", "(", "not", "lines", "[", "0", "]", ")", ")", ":", "lines", ".", "pop", "(", "0", ")", "if", "(", "lines", "and", "lines", "[", "(", "-", "1", ")", "]", ")", ":", "lines", ".", "append", "(", "''", ")", "return", "lines" ]
convert a docstring into lines of parseable rest .
train
false
11,756
def get_issues_filters_data(project, querysets): data = OrderedDict([('types', _get_issues_types(project, querysets['types'])), ('statuses', _get_issues_statuses(project, querysets['statuses'])), ('priorities', _get_issues_priorities(project, querysets['priorities'])), ('severities', _get_issues_severities(project, querysets['severities'])), ('assigned_to', _get_issues_assigned_to(project, querysets['assigned_to'])), ('owners', _get_issues_owners(project, querysets['owners'])), ('tags', _get_issues_tags(project, querysets['tags']))]) return data
[ "def", "get_issues_filters_data", "(", "project", ",", "querysets", ")", ":", "data", "=", "OrderedDict", "(", "[", "(", "'types'", ",", "_get_issues_types", "(", "project", ",", "querysets", "[", "'types'", "]", ")", ")", ",", "(", "'statuses'", ",", "_get_issues_statuses", "(", "project", ",", "querysets", "[", "'statuses'", "]", ")", ")", ",", "(", "'priorities'", ",", "_get_issues_priorities", "(", "project", ",", "querysets", "[", "'priorities'", "]", ")", ")", ",", "(", "'severities'", ",", "_get_issues_severities", "(", "project", ",", "querysets", "[", "'severities'", "]", ")", ")", ",", "(", "'assigned_to'", ",", "_get_issues_assigned_to", "(", "project", ",", "querysets", "[", "'assigned_to'", "]", ")", ")", ",", "(", "'owners'", ",", "_get_issues_owners", "(", "project", ",", "querysets", "[", "'owners'", "]", ")", ")", ",", "(", "'tags'", ",", "_get_issues_tags", "(", "project", ",", "querysets", "[", "'tags'", "]", ")", ")", "]", ")", "return", "data" ]
given a project and an issues queryset .
train
false
11,757
def get_calling_mod(stack): form = stack[1] return getmodule(form[0]).__name__
[ "def", "get_calling_mod", "(", "stack", ")", ":", "form", "=", "stack", "[", "1", "]", "return", "getmodule", "(", "form", "[", "0", "]", ")", ".", "__name__" ]
retrieve the calling function based on the call stack .
train
false
11,758
def literal_column(text, type_=None): return ColumnClause(text, type_=type_, is_literal=True)
[ "def", "literal_column", "(", "text", ",", "type_", "=", "None", ")", ":", "return", "ColumnClause", "(", "text", ",", "type_", "=", "type_", ",", "is_literal", "=", "True", ")" ]
return a textual column expression .
train
false
11,759
def do_vote_by_user(parser, token): bits = token.contents.split() if (len(bits) != 6): raise template.TemplateSyntaxError(("'%s' tag takes exactly five arguments" % bits[0])) if (bits[2] != 'on'): raise template.TemplateSyntaxError(("second argument to '%s' tag must be 'on'" % bits[0])) if (bits[4] != 'as'): raise template.TemplateSyntaxError(("fourth argument to '%s' tag must be 'as'" % bits[0])) return VoteByUserNode(bits[1], bits[3], bits[5])
[ "def", "do_vote_by_user", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "(", "len", "(", "bits", ")", "!=", "6", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "\"'%s' tag takes exactly five arguments\"", "%", "bits", "[", "0", "]", ")", ")", "if", "(", "bits", "[", "2", "]", "!=", "'on'", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "\"second argument to '%s' tag must be 'on'\"", "%", "bits", "[", "0", "]", ")", ")", "if", "(", "bits", "[", "4", "]", "!=", "'as'", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "\"fourth argument to '%s' tag must be 'as'\"", "%", "bits", "[", "0", "]", ")", ")", "return", "VoteByUserNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "3", "]", ",", "bits", "[", "5", "]", ")" ]
retrieves the vote cast by a user on a particular object and stores it in a context variable .
train
false
11,760
def onboarding_complete(request): return (setup_wizard_complete() and setup_blocks_complete(request))
[ "def", "onboarding_complete", "(", "request", ")", ":", "return", "(", "setup_wizard_complete", "(", ")", "and", "setup_blocks_complete", "(", "request", ")", ")" ]
check if the shop wizard and all setup blocks are complete :return: whether onboarding is complete :rtype: boolean .
train
false
11,761
def getmembers(object, predicate=None): results = [] for key in dir(object): value = getattr(object, key) if ((not predicate) or predicate(value)): results.append((key, value)) results.sort() return results
[ "def", "getmembers", "(", "object", ",", "predicate", "=", "None", ")", ":", "results", "=", "[", "]", "for", "key", "in", "dir", "(", "object", ")", ":", "value", "=", "getattr", "(", "object", ",", "key", ")", "if", "(", "(", "not", "predicate", ")", "or", "predicate", "(", "value", ")", ")", ":", "results", ".", "append", "(", "(", "key", ",", "value", ")", ")", "results", ".", "sort", "(", ")", "return", "results" ]
return all members of an object as pairs sorted by name .
train
true
11,762
def _isInt(x, precision=0.0001): xInt = int(round(x)) return ((abs((x - xInt)) < (precision * x)), xInt)
[ "def", "_isInt", "(", "x", ",", "precision", "=", "0.0001", ")", ":", "xInt", "=", "int", "(", "round", "(", "x", ")", ")", "return", "(", "(", "abs", "(", "(", "x", "-", "xInt", ")", ")", "<", "(", "precision", "*", "x", ")", ")", ",", "xInt", ")" ]
return for a given floating point number .
train
true
11,764
def token_hex(nbytes=None): return binascii.hexlify(token_bytes(nbytes)).decode('ascii')
[ "def", "token_hex", "(", "nbytes", "=", "None", ")", ":", "return", "binascii", ".", "hexlify", "(", "token_bytes", "(", "nbytes", ")", ")", ".", "decode", "(", "'ascii'", ")" ]
return a random text string .
train
false
11,765
def _getPredictedField(options): if ((not options['inferenceArgs']) or (not options['inferenceArgs']['predictedField'])): return (None, None) predictedField = options['inferenceArgs']['predictedField'] predictedFieldInfo = None includedFields = options['includedFields'] for info in includedFields: if (info['fieldName'] == predictedField): predictedFieldInfo = info break if (predictedFieldInfo is None): raise ValueError(("Predicted field '%s' does not exist in included fields." % predictedField)) predictedFieldType = predictedFieldInfo['fieldType'] return (predictedField, predictedFieldType)
[ "def", "_getPredictedField", "(", "options", ")", ":", "if", "(", "(", "not", "options", "[", "'inferenceArgs'", "]", ")", "or", "(", "not", "options", "[", "'inferenceArgs'", "]", "[", "'predictedField'", "]", ")", ")", ":", "return", "(", "None", ",", "None", ")", "predictedField", "=", "options", "[", "'inferenceArgs'", "]", "[", "'predictedField'", "]", "predictedFieldInfo", "=", "None", "includedFields", "=", "options", "[", "'includedFields'", "]", "for", "info", "in", "includedFields", ":", "if", "(", "info", "[", "'fieldName'", "]", "==", "predictedField", ")", ":", "predictedFieldInfo", "=", "info", "break", "if", "(", "predictedFieldInfo", "is", "None", ")", ":", "raise", "ValueError", "(", "(", "\"Predicted field '%s' does not exist in included fields.\"", "%", "predictedField", ")", ")", "predictedFieldType", "=", "predictedFieldInfo", "[", "'fieldType'", "]", "return", "(", "predictedField", ",", "predictedFieldType", ")" ]
gets the predicted field and its datatype from the options dictionary returns: .
train
true
11,766
def FileExists(fname): import os try: os.stat(fname) return 1 except os.error as details: return 0
[ "def", "FileExists", "(", "fname", ")", ":", "import", "os", "try", ":", "os", ".", "stat", "(", "fname", ")", "return", "1", "except", "os", ".", "error", "as", "details", ":", "return", "0" ]
returns true if a given s3 key exists .
train
false
11,767
def read_html(io, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, tupleize_cols=False, thousands=',', encoding=None, decimal='.', converters=None, na_values=None, keep_default_na=True): _importers() if (isinstance(skiprows, numbers.Integral) and (skiprows < 0)): raise ValueError('cannot skip rows starting from the end of the data (you passed a negative value)') _validate_header_arg(header) return _parse(flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, tupleize_cols=tupleize_cols, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na)
[ "def", "read_html", "(", "io", ",", "match", "=", "'.+'", ",", "flavor", "=", "None", ",", "header", "=", "None", ",", "index_col", "=", "None", ",", "skiprows", "=", "None", ",", "attrs", "=", "None", ",", "parse_dates", "=", "False", ",", "tupleize_cols", "=", "False", ",", "thousands", "=", "','", ",", "encoding", "=", "None", ",", "decimal", "=", "'.'", ",", "converters", "=", "None", ",", "na_values", "=", "None", ",", "keep_default_na", "=", "True", ")", ":", "_importers", "(", ")", "if", "(", "isinstance", "(", "skiprows", ",", "numbers", ".", "Integral", ")", "and", "(", "skiprows", "<", "0", ")", ")", ":", "raise", "ValueError", "(", "'cannot skip rows starting from the end of the data (you passed a negative value)'", ")", "_validate_header_arg", "(", "header", ")", "return", "_parse", "(", "flavor", "=", "flavor", ",", "io", "=", "io", ",", "match", "=", "match", ",", "header", "=", "header", ",", "index_col", "=", "index_col", ",", "skiprows", "=", "skiprows", ",", "parse_dates", "=", "parse_dates", ",", "tupleize_cols", "=", "tupleize_cols", ",", "thousands", "=", "thousands", ",", "attrs", "=", "attrs", ",", "encoding", "=", "encoding", ",", "decimal", "=", "decimal", ",", "converters", "=", "converters", ",", "na_values", "=", "na_values", ",", "keep_default_na", "=", "keep_default_na", ")" ]
read html tables into a list of dataframe objects .
train
true
11,768
def is_course_passed(course, grade_summary=None, student=None, request=None): nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if (cutoff > 0)] success_cutoff = (min(nonzero_cutoffs) if nonzero_cutoffs else None) if (grade_summary is None): grade_summary = CourseGradeFactory().create(student, course).summary return (success_cutoff and (grade_summary['percent'] >= success_cutoff))
[ "def", "is_course_passed", "(", "course", ",", "grade_summary", "=", "None", ",", "student", "=", "None", ",", "request", "=", "None", ")", ":", "nonzero_cutoffs", "=", "[", "cutoff", "for", "cutoff", "in", "course", ".", "grade_cutoffs", ".", "values", "(", ")", "if", "(", "cutoff", ">", "0", ")", "]", "success_cutoff", "=", "(", "min", "(", "nonzero_cutoffs", ")", "if", "nonzero_cutoffs", "else", "None", ")", "if", "(", "grade_summary", "is", "None", ")", ":", "grade_summary", "=", "CourseGradeFactory", "(", ")", ".", "create", "(", "student", ",", "course", ")", ".", "summary", "return", "(", "success_cutoff", "and", "(", "grade_summary", "[", "'percent'", "]", ">=", "success_cutoff", ")", ")" ]
check users course passing status .
train
false
11,770
def _HashFromFileHandle(file_handle): pos = file_handle.tell() content_hash = _Hash(file_handle.read()) file_handle.seek(pos, 0) return content_hash
[ "def", "_HashFromFileHandle", "(", "file_handle", ")", ":", "pos", "=", "file_handle", ".", "tell", "(", ")", "content_hash", "=", "_Hash", "(", "file_handle", ".", "read", "(", ")", ")", "file_handle", ".", "seek", "(", "pos", ",", "0", ")", "return", "content_hash" ]
compute the hash of the content of the file pointed to by file_handle .
train
false
11,772
def test_should_call_prompt_with_process_json(mocker): mock_prompt = mocker.patch(u'cookiecutter.prompt.click.prompt', autospec=True) read_user_dict(u'name', {u'project_slug': u'pytest-plugin'}) assert (mock_prompt.call_args == mocker.call(u'name', type=click.STRING, default=u'default', value_proc=process_json))
[ "def", "test_should_call_prompt_with_process_json", "(", "mocker", ")", ":", "mock_prompt", "=", "mocker", ".", "patch", "(", "u'cookiecutter.prompt.click.prompt'", ",", "autospec", "=", "True", ")", "read_user_dict", "(", "u'name'", ",", "{", "u'project_slug'", ":", "u'pytest-plugin'", "}", ")", "assert", "(", "mock_prompt", ".", "call_args", "==", "mocker", ".", "call", "(", "u'name'", ",", "type", "=", "click", ".", "STRING", ",", "default", "=", "u'default'", ",", "value_proc", "=", "process_json", ")", ")" ]
test to make sure that process_jon is actually being used to generate a processer for the user input .
train
false
11,773
@protocol.commands.add(u'volume', change=protocol.INT) def volume(context, change): if ((change < (-100)) or (change > 100)): raise exceptions.MpdArgError(u'Invalid volume value') old_volume = context.core.mixer.get_volume().get() if (old_volume is None): raise exceptions.MpdSystemError(u'problems setting volume') new_volume = min(max(0, (old_volume + change)), 100) success = context.core.mixer.set_volume(new_volume).get() if (not success): raise exceptions.MpdSystemError(u'problems setting volume')
[ "@", "protocol", ".", "commands", ".", "add", "(", "u'volume'", ",", "change", "=", "protocol", ".", "INT", ")", "def", "volume", "(", "context", ",", "change", ")", ":", "if", "(", "(", "change", "<", "(", "-", "100", ")", ")", "or", "(", "change", ">", "100", ")", ")", ":", "raise", "exceptions", ".", "MpdArgError", "(", "u'Invalid volume value'", ")", "old_volume", "=", "context", ".", "core", ".", "mixer", ".", "get_volume", "(", ")", ".", "get", "(", ")", "if", "(", "old_volume", "is", "None", ")", ":", "raise", "exceptions", ".", "MpdSystemError", "(", "u'problems setting volume'", ")", "new_volume", "=", "min", "(", "max", "(", "0", ",", "(", "old_volume", "+", "change", ")", ")", ",", "100", ")", "success", "=", "context", ".", "core", ".", "mixer", ".", "set_volume", "(", "new_volume", ")", ".", "get", "(", ")", "if", "(", "not", "success", ")", ":", "raise", "exceptions", ".", "MpdSystemError", "(", "u'problems setting volume'", ")" ]
returns the volume of a set of nodes .
train
false
11,774
def update_catalogs(resources=None, languages=None): cmd = makemessages.Command() opts = {'locale': ['en'], 'exclude': [], 'extensions': ['py', 'jinja'], 'domain': 'django', 'all': False, 'symlinks': False, 'ignore_patterns': [], 'use_default_ignore_patterns': True, 'no_wrap': False, 'no_location': False, 'no_obsolete': False, 'keep_pot': False, 'verbosity': 0} if (resources is not None): print '`update_catalogs` will always process all resources.' os.chdir(os.getcwd()) print 'Updating en catalogs for all taiga-back resourcess...' cmd.handle(**opts) contrib_dirs = _get_locale_dirs(None) for (name, dir_) in contrib_dirs: _check_diff(name, dir_)
[ "def", "update_catalogs", "(", "resources", "=", "None", ",", "languages", "=", "None", ")", ":", "cmd", "=", "makemessages", ".", "Command", "(", ")", "opts", "=", "{", "'locale'", ":", "[", "'en'", "]", ",", "'exclude'", ":", "[", "]", ",", "'extensions'", ":", "[", "'py'", ",", "'jinja'", "]", ",", "'domain'", ":", "'django'", ",", "'all'", ":", "False", ",", "'symlinks'", ":", "False", ",", "'ignore_patterns'", ":", "[", "]", ",", "'use_default_ignore_patterns'", ":", "True", ",", "'no_wrap'", ":", "False", ",", "'no_location'", ":", "False", ",", "'no_obsolete'", ":", "False", ",", "'keep_pot'", ":", "False", ",", "'verbosity'", ":", "0", "}", "if", "(", "resources", "is", "not", "None", ")", ":", "print", "'`update_catalogs` will always process all resources.'", "os", ".", "chdir", "(", "os", ".", "getcwd", "(", ")", ")", "print", "'Updating en catalogs for all taiga-back resourcess...'", "cmd", ".", "handle", "(", "**", "opts", ")", "contrib_dirs", "=", "_get_locale_dirs", "(", "None", ")", "for", "(", "name", ",", "dir_", ")", "in", "contrib_dirs", ":", "_check_diff", "(", "name", ",", "dir_", ")" ]
update the en/lc_messages/django .
train
false
11,776
def populate_diff_chunks(files, enable_syntax_highlighting=True, request=None): from reviewboard.diffviewer.chunk_generator import get_diff_chunk_generator for diff_file in files: generator = get_diff_chunk_generator(request, diff_file[u'filediff'], diff_file[u'interfilediff'], diff_file[u'force_interdiff'], enable_syntax_highlighting) chunks = list(generator.get_chunks()) diff_file.update({u'chunks': chunks, u'num_chunks': len(chunks), u'changed_chunk_indexes': [], u'whitespace_only': (len(chunks) > 0)}) for (j, chunk) in enumerate(chunks): chunk[u'index'] = j if (chunk[u'change'] != u'equal'): diff_file[u'changed_chunk_indexes'].append(j) meta = chunk.get(u'meta', {}) if (not meta.get(u'whitespace_chunk', False)): diff_file[u'whitespace_only'] = False diff_file.update({u'num_changes': len(diff_file[u'changed_chunk_indexes']), u'chunks_loaded': True})
[ "def", "populate_diff_chunks", "(", "files", ",", "enable_syntax_highlighting", "=", "True", ",", "request", "=", "None", ")", ":", "from", "reviewboard", ".", "diffviewer", ".", "chunk_generator", "import", "get_diff_chunk_generator", "for", "diff_file", "in", "files", ":", "generator", "=", "get_diff_chunk_generator", "(", "request", ",", "diff_file", "[", "u'filediff'", "]", ",", "diff_file", "[", "u'interfilediff'", "]", ",", "diff_file", "[", "u'force_interdiff'", "]", ",", "enable_syntax_highlighting", ")", "chunks", "=", "list", "(", "generator", ".", "get_chunks", "(", ")", ")", "diff_file", ".", "update", "(", "{", "u'chunks'", ":", "chunks", ",", "u'num_chunks'", ":", "len", "(", "chunks", ")", ",", "u'changed_chunk_indexes'", ":", "[", "]", ",", "u'whitespace_only'", ":", "(", "len", "(", "chunks", ")", ">", "0", ")", "}", ")", "for", "(", "j", ",", "chunk", ")", "in", "enumerate", "(", "chunks", ")", ":", "chunk", "[", "u'index'", "]", "=", "j", "if", "(", "chunk", "[", "u'change'", "]", "!=", "u'equal'", ")", ":", "diff_file", "[", "u'changed_chunk_indexes'", "]", ".", "append", "(", "j", ")", "meta", "=", "chunk", ".", "get", "(", "u'meta'", ",", "{", "}", ")", "if", "(", "not", "meta", ".", "get", "(", "u'whitespace_chunk'", ",", "False", ")", ")", ":", "diff_file", "[", "u'whitespace_only'", "]", "=", "False", "diff_file", ".", "update", "(", "{", "u'num_changes'", ":", "len", "(", "diff_file", "[", "u'changed_chunk_indexes'", "]", ")", ",", "u'chunks_loaded'", ":", "True", "}", ")" ]
populates a list of diff files with chunk data .
train
false
11,777
def _match_progs(value, progs): if (not progs): return True for prog in progs: if prog.search(six.text_type(value)): return True return False
[ "def", "_match_progs", "(", "value", ",", "progs", ")", ":", "if", "(", "not", "progs", ")", ":", "return", "True", "for", "prog", "in", "progs", ":", "if", "prog", ".", "search", "(", "six", ".", "text_type", "(", "value", ")", ")", ":", "return", "True", "return", "False" ]
check if value is matching any of the compiled regexes in the progs list .
train
false
11,778
def list_platform_sets(server_url): config = _get_asam_configuration(server_url) if (not config): return False url = config['platformset_config_url'] data = {'manual': 'false'} auth = (config['username'], config['password']) try: html_content = _make_post_request(url, data, auth, verify=False) except Exception as exc: err_msg = 'Failed to look up existing platform sets' log.error('{0}:\n{1}'.format(err_msg, exc)) return {server_url: err_msg} parser = _parse_html_content(html_content) platform_set_list = _get_platform_sets(parser.data) if platform_set_list: return {server_url: platform_set_list} else: return {server_url: 'No existing platform sets found'}
[ "def", "list_platform_sets", "(", "server_url", ")", ":", "config", "=", "_get_asam_configuration", "(", "server_url", ")", "if", "(", "not", "config", ")", ":", "return", "False", "url", "=", "config", "[", "'platformset_config_url'", "]", "data", "=", "{", "'manual'", ":", "'false'", "}", "auth", "=", "(", "config", "[", "'username'", "]", ",", "config", "[", "'password'", "]", ")", "try", ":", "html_content", "=", "_make_post_request", "(", "url", ",", "data", ",", "auth", ",", "verify", "=", "False", ")", "except", "Exception", "as", "exc", ":", "err_msg", "=", "'Failed to look up existing platform sets'", "log", ".", "error", "(", "'{0}:\\n{1}'", ".", "format", "(", "err_msg", ",", "exc", ")", ")", "return", "{", "server_url", ":", "err_msg", "}", "parser", "=", "_parse_html_content", "(", "html_content", ")", "platform_set_list", "=", "_get_platform_sets", "(", "parser", ".", "data", ")", "if", "platform_set_list", ":", "return", "{", "server_url", ":", "platform_set_list", "}", "else", ":", "return", "{", "server_url", ":", "'No existing platform sets found'", "}" ]
to list all asam platform sets present on the novell fan-out driver cli example: .
train
true
11,779
@rule(u'$nickname[,:]\\s+(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?["\u201c](.+?)["\u201d]\\? *$') @example(u'$nickname: "mon chien"? or $nickname: fr "mon chien"?') @priority(u'low') def tr(bot, trigger): (in_lang, out_lang, phrase) = trigger.groups() if ((len(phrase) > 350) and (not trigger.admin)): return bot.reply(u'Phrase must be under 350 characters.') if (phrase.strip() == u''): return bot.reply(u'You need to specify a string for me to translate!') in_lang = (in_lang or u'auto') out_lang = (out_lang or u'en') if (in_lang != out_lang): (msg, in_lang) = translate(phrase, in_lang, out_lang, verify_ssl=bot.config.core.verify_ssl) if ((sys.version_info.major < 3) and isinstance(msg, str)): msg = msg.decode(u'utf-8') if msg: msg = web.decode(msg) msg = (u'"%s" (%s to %s, translate.google.com)' % (msg, in_lang, out_lang)) else: msg = (u'The %s to %s translation failed, are you sure you specified valid language abbreviations?' % (in_lang, out_lang)) bot.reply(msg) else: bot.reply(u'Language guessing failed, so try suggesting one!')
[ "@", "rule", "(", "u'$nickname[,:]\\\\s+(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?[\"\\u201c](.+?)[\"\\u201d]\\\\? *$'", ")", "@", "example", "(", "u'$nickname: \"mon chien\"? or $nickname: fr \"mon chien\"?'", ")", "@", "priority", "(", "u'low'", ")", "def", "tr", "(", "bot", ",", "trigger", ")", ":", "(", "in_lang", ",", "out_lang", ",", "phrase", ")", "=", "trigger", ".", "groups", "(", ")", "if", "(", "(", "len", "(", "phrase", ")", ">", "350", ")", "and", "(", "not", "trigger", ".", "admin", ")", ")", ":", "return", "bot", ".", "reply", "(", "u'Phrase must be under 350 characters.'", ")", "if", "(", "phrase", ".", "strip", "(", ")", "==", "u''", ")", ":", "return", "bot", ".", "reply", "(", "u'You need to specify a string for me to translate!'", ")", "in_lang", "=", "(", "in_lang", "or", "u'auto'", ")", "out_lang", "=", "(", "out_lang", "or", "u'en'", ")", "if", "(", "in_lang", "!=", "out_lang", ")", ":", "(", "msg", ",", "in_lang", ")", "=", "translate", "(", "phrase", ",", "in_lang", ",", "out_lang", ",", "verify_ssl", "=", "bot", ".", "config", ".", "core", ".", "verify_ssl", ")", "if", "(", "(", "sys", ".", "version_info", ".", "major", "<", "3", ")", "and", "isinstance", "(", "msg", ",", "str", ")", ")", ":", "msg", "=", "msg", ".", "decode", "(", "u'utf-8'", ")", "if", "msg", ":", "msg", "=", "web", ".", "decode", "(", "msg", ")", "msg", "=", "(", "u'\"%s\" (%s to %s, translate.google.com)'", "%", "(", "msg", ",", "in_lang", ",", "out_lang", ")", ")", "else", ":", "msg", "=", "(", "u'The %s to %s translation failed, are you sure you specified valid language abbreviations?'", "%", "(", "in_lang", ",", "out_lang", ")", ")", "bot", ".", "reply", "(", "msg", ")", "else", ":", "bot", ".", "reply", "(", "u'Language guessing failed, so try suggesting one!'", ")" ]
translates a phrase .
train
false
11,780
def changes_command(args): changes = list_changes(args.project_id, args.name) for change in changes: print change
[ "def", "changes_command", "(", "args", ")", ":", "changes", "=", "list_changes", "(", "args", ".", "project_id", ",", "args", ".", "name", ")", "for", "change", "in", "changes", ":", "print", "change" ]
list all changes records for a zone .
train
false
11,781
def strip_common_indent(lines): lines = [x for x in lines if (x.strip() != u'')] minindent = min([len(_match_indent(x).group(0)) for x in lines]) lines = [x[minindent:] for x in lines] return lines
[ "def", "strip_common_indent", "(", "lines", ")", ":", "lines", "=", "[", "x", "for", "x", "in", "lines", "if", "(", "x", ".", "strip", "(", ")", "!=", "u''", ")", "]", "minindent", "=", "min", "(", "[", "len", "(", "_match_indent", "(", "x", ")", ".", "group", "(", "0", ")", ")", "for", "x", "in", "lines", "]", ")", "lines", "=", "[", "x", "[", "minindent", ":", "]", "for", "x", "in", "lines", "]", "return", "lines" ]
strips empty lines and common indentation from the list of strings given in lines .
train
false
11,782
def package_to_requirement(package_name): match = re.search('^(.*?)-(dev|\\d.*)', package_name) if match: name = match.group(1) version = match.group(2) else: name = package_name version = '' if version: return ('%s==%s' % (name, version)) else: return name
[ "def", "package_to_requirement", "(", "package_name", ")", ":", "match", "=", "re", ".", "search", "(", "'^(.*?)-(dev|\\\\d.*)'", ",", "package_name", ")", "if", "match", ":", "name", "=", "match", ".", "group", "(", "1", ")", "version", "=", "match", ".", "group", "(", "2", ")", "else", ":", "name", "=", "package_name", "version", "=", "''", "if", "version", ":", "return", "(", "'%s==%s'", "%", "(", "name", ",", "version", ")", ")", "else", ":", "return", "name" ]
translate a name like foo-1 .
train
true
11,784
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): a = ma.asarray(a).ravel() if (limits is None): n = float(a.count()) return (a.std(axis=axis, ddof=ddof) / ma.sqrt(n)) am = trima(a.ravel(), limits, inclusive) sd = np.sqrt(am.var(axis=axis, ddof=ddof)) return (sd / np.sqrt(am.count()))
[ "def", "tsem", "(", "a", ",", "limits", "=", "None", ",", "inclusive", "=", "(", "True", ",", "True", ")", ",", "axis", "=", "0", ",", "ddof", "=", "1", ")", ":", "a", "=", "ma", ".", "asarray", "(", "a", ")", ".", "ravel", "(", ")", "if", "(", "limits", "is", "None", ")", ":", "n", "=", "float", "(", "a", ".", "count", "(", ")", ")", "return", "(", "a", ".", "std", "(", "axis", "=", "axis", ",", "ddof", "=", "ddof", ")", "/", "ma", ".", "sqrt", "(", "n", ")", ")", "am", "=", "trima", "(", "a", ".", "ravel", "(", ")", ",", "limits", ",", "inclusive", ")", "sd", "=", "np", ".", "sqrt", "(", "am", ".", "var", "(", "axis", "=", "axis", ",", "ddof", "=", "ddof", ")", ")", "return", "(", "sd", "/", "np", ".", "sqrt", "(", "am", ".", "count", "(", ")", ")", ")" ]
compute the trimmed standard error of the mean .
train
false
11,785
def attach_total_comments_to_queryset(queryset, as_field='total_comments'): model = queryset.model sql = "\n SELECT COUNT(history_historyentry.id)\n FROM history_historyentry\n WHERE history_historyentry.key = CONCAT('{key_prefix}', {tbl}.id) AND\n history_historyentry.comment is not null AND\n history_historyentry.comment != ''\n " typename = get_typename_for_model_class(model) sql = sql.format(tbl=model._meta.db_table, key_prefix='{}:'.format(typename)) queryset = queryset.extra(select={as_field: sql}) return queryset
[ "def", "attach_total_comments_to_queryset", "(", "queryset", ",", "as_field", "=", "'total_comments'", ")", ":", "model", "=", "queryset", ".", "model", "sql", "=", "\"\\n SELECT COUNT(history_historyentry.id)\\n FROM history_historyentry\\n WHERE history_historyentry.key = CONCAT('{key_prefix}', {tbl}.id) AND\\n history_historyentry.comment is not null AND\\n history_historyentry.comment != ''\\n \"", "typename", "=", "get_typename_for_model_class", "(", "model", ")", "sql", "=", "sql", ".", "format", "(", "tbl", "=", "model", ".", "_meta", ".", "db_table", ",", "key_prefix", "=", "'{}:'", ".", "format", "(", "typename", ")", ")", "queryset", "=", "queryset", ".", "extra", "(", "select", "=", "{", "as_field", ":", "sql", "}", ")", "return", "queryset" ]
attach a total comments counter to each object of the queryset .
train
false
11,787
def ensure_metadata_list(text): if isinstance(text, six.text_type): if (u';' in text): text = text.split(u';') else: text = text.split(u',') return list(OrderedDict.fromkeys([v for v in (w.strip() for w in text) if v]))
[ "def", "ensure_metadata_list", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "if", "(", "u';'", "in", "text", ")", ":", "text", "=", "text", ".", "split", "(", "u';'", ")", "else", ":", "text", "=", "text", ".", "split", "(", "u','", ")", "return", "list", "(", "OrderedDict", ".", "fromkeys", "(", "[", "v", "for", "v", "in", "(", "w", ".", "strip", "(", ")", "for", "w", "in", "text", ")", "if", "v", "]", ")", ")" ]
canonicalize the format of a list of authors or tags .
train
false
11,788
def GetRpcServer(options): rpc_server_class = HttpRpcServer def GetUserCredentials(): 'Prompts the user for a username and password.' global global_status st = global_status global_status = None email = options.email if (email is None): email = GetEmail(('Email (login for uploading to %s)' % options.server)) password = getpass.getpass(('Password for %s: ' % email)) global_status = st return (email, password) host = (options.host or options.server).lower() if ((host == 'localhost') or host.startswith('localhost:')): email = options.email if (email is None): email = 'test@example.com' logging.info(('Using debug user %s. Override with --email' % email)) server = rpc_server_class(options.server, (lambda : (email, 'password')), host_override=options.host, extra_headers={'Cookie': ('dev_appserver_login="%s:False"' % email)}, save_cookies=options.save_cookies) server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies)
[ "def", "GetRpcServer", "(", "options", ")", ":", "rpc_server_class", "=", "HttpRpcServer", "def", "GetUserCredentials", "(", ")", ":", "global", "global_status", "st", "=", "global_status", "global_status", "=", "None", "email", "=", "options", ".", "email", "if", "(", "email", "is", "None", ")", ":", "email", "=", "GetEmail", "(", "(", "'Email (login for uploading to %s)'", "%", "options", ".", "server", ")", ")", "password", "=", "getpass", ".", "getpass", "(", "(", "'Password for %s: '", "%", "email", ")", ")", "global_status", "=", "st", "return", "(", "email", ",", "password", ")", "host", "=", "(", "options", ".", "host", "or", "options", ".", "server", ")", ".", "lower", "(", ")", "if", "(", "(", "host", "==", "'localhost'", ")", "or", "host", ".", "startswith", "(", "'localhost:'", ")", ")", ":", "email", "=", "options", ".", "email", "if", "(", "email", "is", "None", ")", ":", "email", "=", "'test@example.com'", "logging", ".", "info", "(", "(", "'Using debug user %s. Override with --email'", "%", "email", ")", ")", "server", "=", "rpc_server_class", "(", "options", ".", "server", ",", "(", "lambda", ":", "(", "email", ",", "'password'", ")", ")", ",", "host_override", "=", "options", ".", "host", ",", "extra_headers", "=", "{", "'Cookie'", ":", "(", "'dev_appserver_login=\"%s:False\"'", "%", "email", ")", "}", ",", "save_cookies", "=", "options", ".", "save_cookies", ")", "server", ".", "authenticated", "=", "True", "return", "server", "return", "rpc_server_class", "(", "options", ".", "server", ",", "GetUserCredentials", ",", "host_override", "=", "options", ".", "host", ",", "save_cookies", "=", "options", ".", "save_cookies", ")" ]
returns an instance of an abstractrpcserver .
train
false
11,789
def hamming_dist(v1, v2): edits = (v1 != v2) return edits.sum()
[ "def", "hamming_dist", "(", "v1", ",", "v2", ")", ":", "edits", "=", "(", "v1", "!=", "v2", ")", "return", "edits", ".", "sum", "(", ")" ]
two binary arrays .
train
false
11,790
def _get_evoked(): evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) evoked.crop(0, 0.2) return evoked
[ "def", "_get_evoked", "(", ")", ":", "evoked", "=", "read_evokeds", "(", "fname_data", ",", "condition", "=", "0", ",", "baseline", "=", "(", "None", ",", "0", ")", ")", "evoked", ".", "crop", "(", "0", ",", "0.2", ")", "return", "evoked" ]
get evoked data .
train
false
11,792
def dirinfo(path, opts=None): cmd = 'mfsdirinfo' ret = {} if opts: cmd += (' -' + opts) cmd += (' ' + path) out = __salt__['cmd.run_all'](cmd, python_shell=False) output = out['stdout'].splitlines() for line in output: if (not line): continue comps = line.split(':') ret[comps[0].strip()] = comps[1].strip() return ret
[ "def", "dirinfo", "(", "path", ",", "opts", "=", "None", ")", ":", "cmd", "=", "'mfsdirinfo'", "ret", "=", "{", "}", "if", "opts", ":", "cmd", "+=", "(", "' -'", "+", "opts", ")", "cmd", "+=", "(", "' '", "+", "path", ")", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "output", "=", "out", "[", "'stdout'", "]", ".", "splitlines", "(", ")", "for", "line", "in", "output", ":", "if", "(", "not", "line", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", "':'", ")", "ret", "[", "comps", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "comps", "[", "1", "]", ".", "strip", "(", ")", "return", "ret" ]
return information on a directory located on the moose cli example: .
train
true
11,793
def get_browser_versions(browser): html = get(settings.BROWSER_BASE_PAGE.format(browser=quote_plus(browser))) html = html.decode(u'iso-8859-1') html = html.split(u"<div id='liste'>")[1] html = html.split(u'</div>')[0] browsers_iter = re.finditer(u"\\?id=\\d+\\'>(.+?)</a", html, re.UNICODE) browsers = [] for browser in browsers_iter: if (u'more' in browser.group(1).lower()): continue browsers.append(browser.group(1)) if (len(browsers) == settings.BROWSERS_COUNT_LIMIT): break return browsers
[ "def", "get_browser_versions", "(", "browser", ")", ":", "html", "=", "get", "(", "settings", ".", "BROWSER_BASE_PAGE", ".", "format", "(", "browser", "=", "quote_plus", "(", "browser", ")", ")", ")", "html", "=", "html", ".", "decode", "(", "u'iso-8859-1'", ")", "html", "=", "html", ".", "split", "(", "u\"<div id='liste'>\"", ")", "[", "1", "]", "html", "=", "html", ".", "split", "(", "u'</div>'", ")", "[", "0", "]", "browsers_iter", "=", "re", ".", "finditer", "(", "u\"\\\\?id=\\\\d+\\\\'>(.+?)</a\"", ",", "html", ",", "re", ".", "UNICODE", ")", "browsers", "=", "[", "]", "for", "browser", "in", "browsers_iter", ":", "if", "(", "u'more'", "in", "browser", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ")", ":", "continue", "browsers", ".", "append", "(", "browser", ".", "group", "(", "1", ")", ")", "if", "(", "len", "(", "browsers", ")", "==", "settings", ".", "BROWSERS_COUNT_LIMIT", ")", ":", "break", "return", "browsers" ]
very very hardcoded/dirty re/split stuff .
train
false
11,794
def libvlc_media_list_item_at_index(p_ml, i_pos): f = (_Cfunctions.get('libvlc_media_list_item_at_index', None) or _Cfunction('libvlc_media_list_item_at_index', ((1,), (1,)), class_result(Media), ctypes.c_void_p, MediaList, ctypes.c_int)) return f(p_ml, i_pos)
[ "def", "libvlc_media_list_item_at_index", "(", "p_ml", ",", "i_pos", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_item_at_index'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_item_at_index'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "class_result", "(", "Media", ")", ",", "ctypes", ".", "c_void_p", ",", "MediaList", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_ml", ",", "i_pos", ")" ]
list media instance in media list at a position the l{libvlc_media_list_lock} should be held upon entering this function .
train
true
11,796
def _connection_line(x, fig, sourceax, targetax): from matplotlib.lines import Line2D transFigure = fig.transFigure.inverted() tf = fig.transFigure (xt, yt) = transFigure.transform(targetax.transAxes.transform([0.5, 0.25])) (xs, _) = transFigure.transform(sourceax.transData.transform([x, 0])) (_, ys) = transFigure.transform(sourceax.transAxes.transform([0, 1])) return Line2D((xt, xs), (yt, ys), transform=tf, color='grey', linestyle='-', linewidth=1.5, alpha=0.66, zorder=0)
[ "def", "_connection_line", "(", "x", ",", "fig", ",", "sourceax", ",", "targetax", ")", ":", "from", "matplotlib", ".", "lines", "import", "Line2D", "transFigure", "=", "fig", ".", "transFigure", ".", "inverted", "(", ")", "tf", "=", "fig", ".", "transFigure", "(", "xt", ",", "yt", ")", "=", "transFigure", ".", "transform", "(", "targetax", ".", "transAxes", ".", "transform", "(", "[", "0.5", ",", "0.25", "]", ")", ")", "(", "xs", ",", "_", ")", "=", "transFigure", ".", "transform", "(", "sourceax", ".", "transData", ".", "transform", "(", "[", "x", ",", "0", "]", ")", ")", "(", "_", ",", "ys", ")", "=", "transFigure", ".", "transform", "(", "sourceax", ".", "transAxes", ".", "transform", "(", "[", "0", ",", "1", "]", ")", ")", "return", "Line2D", "(", "(", "xt", ",", "xs", ")", ",", "(", "yt", ",", "ys", ")", ",", "transform", "=", "tf", ",", "color", "=", "'grey'", ",", "linestyle", "=", "'-'", ",", "linewidth", "=", "1.5", ",", "alpha", "=", "0.66", ",", "zorder", "=", "0", ")" ]
connect time series and topolots .
train
false
11,797
@pytest.fixture def bridge(): return readline.ReadlineBridge()
[ "@", "pytest", ".", "fixture", "def", "bridge", "(", ")", ":", "return", "readline", ".", "ReadlineBridge", "(", ")" ]
fixture providing a readlinebridge .
train
false
11,799
@testing.requires_testing_data def test_subject_info(): tempdir = _TempDir() raw = read_raw_fif(fif_fname).crop(0, 1) assert_true((raw.info['subject_info'] is None)) keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex', 'hand'] vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1] subject_info = dict() for (key, val) in zip(keys, vals): subject_info[key] = val raw.info['subject_info'] = subject_info out_fname = op.join(tempdir, 'test_subj_info_raw.fif') raw.save(out_fname, overwrite=True) raw_read = read_raw_fif(out_fname) for key in keys: assert_equal(subject_info[key], raw_read.info['subject_info'][key]) assert_equal(raw.info['meas_date'], raw_read.info['meas_date'])
[ "@", "testing", ".", "requires_testing_data", "def", "test_subject_info", "(", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "raw", "=", "read_raw_fif", "(", "fif_fname", ")", ".", "crop", "(", "0", ",", "1", ")", "assert_true", "(", "(", "raw", ".", "info", "[", "'subject_info'", "]", "is", "None", ")", ")", "keys", "=", "[", "'id'", ",", "'his_id'", ",", "'last_name'", ",", "'first_name'", ",", "'birthday'", ",", "'sex'", ",", "'hand'", "]", "vals", "=", "[", "1", ",", "'foobar'", ",", "'bar'", ",", "'foo'", ",", "(", "1901", ",", "2", ",", "3", ")", ",", "0", ",", "1", "]", "subject_info", "=", "dict", "(", ")", "for", "(", "key", ",", "val", ")", "in", "zip", "(", "keys", ",", "vals", ")", ":", "subject_info", "[", "key", "]", "=", "val", "raw", ".", "info", "[", "'subject_info'", "]", "=", "subject_info", "out_fname", "=", "op", ".", "join", "(", "tempdir", ",", "'test_subj_info_raw.fif'", ")", "raw", ".", "save", "(", "out_fname", ",", "overwrite", "=", "True", ")", "raw_read", "=", "read_raw_fif", "(", "out_fname", ")", "for", "key", "in", "keys", ":", "assert_equal", "(", "subject_info", "[", "key", "]", ",", "raw_read", ".", "info", "[", "'subject_info'", "]", "[", "key", "]", ")", "assert_equal", "(", "raw", ".", "info", "[", "'meas_date'", "]", ",", "raw_read", ".", "info", "[", "'meas_date'", "]", ")" ]
test reading subject information .
train
false