id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
40,931
def poly_LC(f, K): if (not f): return K.zero else: return f[0]
[ "def", "poly_LC", "(", "f", ",", "K", ")", ":", "if", "(", "not", "f", ")", ":", "return", "K", ".", "zero", "else", ":", "return", "f", "[", "0", "]" ]
return leading coefficient of f .
train
false
40,932
def composition(f, g): def fg(arg): return g(f(arg)) return fg
[ "def", "composition", "(", "f", ",", "g", ")", ":", "def", "fg", "(", "arg", ")", ":", "return", "g", "(", "f", "(", "arg", ")", ")", "return", "fg" ]
return a composition of two functions .
train
false
40,933
def exp_backoff_fn(fn, *args, **kwargs): max_tries = kwargs.pop(u'max_tries', MAX_TRIES) if (not on_win): return fn(*args, **kwargs) import random for n in range(max_tries): try: result = fn(*args, **kwargs) except (OSError, IOError) as e: log.trace(repr(e)) if (e.errno in (EPERM, EACCES)): if (n == (max_tries - 1)): raise sleep_time = (((2 ** n) + random.random()) * 0.1) caller_frame = sys._getframe(1) log.trace(u'retrying %s/%s %s() in %g sec', basename(caller_frame.f_code.co_filename), caller_frame.f_lineno, fn.__name__, sleep_time) sleep(sleep_time) elif (e.errno in (ENOENT, ENOTEMPTY)): raise else: log.warn(u'Uncaught backoff with errno %s %d', errorcode[e.errno], e.errno) raise else: return result
[ "def", "exp_backoff_fn", "(", "fn", ",", "*", "args", ",", "**", "kwargs", ")", ":", "max_tries", "=", "kwargs", ".", "pop", "(", "u'max_tries'", ",", "MAX_TRIES", ")", "if", "(", "not", "on_win", ")", ":", "return", "fn", "(", "*", "args", ",", "**", "kwargs", ")", "import", "random", "for", "n", "in", "range", "(", "max_tries", ")", ":", "try", ":", "result", "=", "fn", "(", "*", "args", ",", "**", "kwargs", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "log", ".", "trace", "(", "repr", "(", "e", ")", ")", "if", "(", "e", ".", "errno", "in", "(", "EPERM", ",", "EACCES", ")", ")", ":", "if", "(", "n", "==", "(", "max_tries", "-", "1", ")", ")", ":", "raise", "sleep_time", "=", "(", "(", "(", "2", "**", "n", ")", "+", "random", ".", "random", "(", ")", ")", "*", "0.1", ")", "caller_frame", "=", "sys", ".", "_getframe", "(", "1", ")", "log", ".", "trace", "(", "u'retrying %s/%s %s() in %g sec'", ",", "basename", "(", "caller_frame", ".", "f_code", ".", "co_filename", ")", ",", "caller_frame", ".", "f_lineno", ",", "fn", ".", "__name__", ",", "sleep_time", ")", "sleep", "(", "sleep_time", ")", "elif", "(", "e", ".", "errno", "in", "(", "ENOENT", ",", "ENOTEMPTY", ")", ")", ":", "raise", "else", ":", "log", ".", "warn", "(", "u'Uncaught backoff with errno %s %d'", ",", "errorcode", "[", "e", ".", "errno", "]", ",", "e", ".", "errno", ")", "raise", "else", ":", "return", "result" ]
mostly for retrying file operations that fail on windows due to virus scanners .
train
false
40,934
def upload(): os.system('cd build/html; rsync -avz . pandas@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/vbench/ -essh')
[ "def", "upload", "(", ")", ":", "os", ".", "system", "(", "'cd build/html; rsync -avz . pandas@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/vbench/ -essh'", ")" ]
process the uploaded file and upload it to google cloud storage .
train
false
40,938
def redirect_aware_commmunicate(p, sys=_sys): assert (sys is not None) (out, err) = p.communicate() if redirecting_io(sys=sys): if out: sys.stdout.write(out) out = None if err: sys.stderr.write(err) err = None return (out, err)
[ "def", "redirect_aware_commmunicate", "(", "p", ",", "sys", "=", "_sys", ")", ":", "assert", "(", "sys", "is", "not", "None", ")", "(", "out", ",", "err", ")", "=", "p", ".", "communicate", "(", ")", "if", "redirecting_io", "(", "sys", "=", "sys", ")", ":", "if", "out", ":", "sys", ".", "stdout", ".", "write", "(", "out", ")", "out", "=", "None", "if", "err", ":", "sys", ".", "stderr", ".", "write", "(", "err", ")", "err", "=", "None", "return", "(", "out", ",", "err", ")" ]
variant of process .
train
false
40,939
def _check_download_dir(link, download_dir, hashes): download_path = os.path.join(download_dir, link.filename) if os.path.exists(download_path): logger.info('File was already downloaded %s', download_path) if hashes: try: hashes.check_against_path(download_path) except HashMismatch: logger.warning('Previously-downloaded file %s has bad hash. Re-downloading.', download_path) os.unlink(download_path) return None return download_path return None
[ "def", "_check_download_dir", "(", "link", ",", "download_dir", ",", "hashes", ")", ":", "download_path", "=", "os", ".", "path", ".", "join", "(", "download_dir", ",", "link", ".", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "download_path", ")", ":", "logger", ".", "info", "(", "'File was already downloaded %s'", ",", "download_path", ")", "if", "hashes", ":", "try", ":", "hashes", ".", "check_against_path", "(", "download_path", ")", "except", "HashMismatch", ":", "logger", ".", "warning", "(", "'Previously-downloaded file %s has bad hash. Re-downloading.'", ",", "download_path", ")", "os", ".", "unlink", "(", "download_path", ")", "return", "None", "return", "download_path", "return", "None" ]
check download_dir for previously downloaded file with correct hash if a correct file is found return its path else none .
train
true
40,940
def report_traceback(): try: formatted_lines = traceback.format_exc() now = time.time() return (formatted_lines, now) except AttributeError: return (None, None)
[ "def", "report_traceback", "(", ")", ":", "try", ":", "formatted_lines", "=", "traceback", ".", "format_exc", "(", ")", "now", "=", "time", ".", "time", "(", ")", "return", "(", "formatted_lines", ",", "now", ")", "except", "AttributeError", ":", "return", "(", "None", ",", "None", ")" ]
reports a timestamp and full traceback for a given exception .
train
false
40,941
def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
[ "def", "loss", "(", "logits", ",", "labels", ")", ":", "labels", "=", "tf", ".", "cast", "(", "labels", ",", "tf", ".", "int64", ")", "cross_entropy", "=", "tf", ".", "nn", ".", "sparse_softmax_cross_entropy_with_logits", "(", "labels", "=", "labels", ",", "logits", "=", "logits", ",", "name", "=", "'cross_entropy_per_example'", ")", "cross_entropy_mean", "=", "tf", ".", "reduce_mean", "(", "cross_entropy", ",", "name", "=", "'cross_entropy'", ")", "tf", ".", "add_to_collection", "(", "'losses'", ",", "cross_entropy_mean", ")", "return", "tf", ".", "add_n", "(", "tf", ".", "get_collection", "(", "'losses'", ")", ",", "name", "=", "'total_loss'", ")" ]
add l2loss to all the trainable variables .
train
true
40,942
def _merge(arr, temp, left, mid, right): i = k = left j = mid inv_count = 0 while ((i < mid) and (j <= right)): if (arr[i] < arr[j]): temp[k] = arr[i] k += 1 i += 1 else: temp[k] = arr[j] k += 1 j += 1 inv_count += (mid - i) while (i < mid): temp[k] = arr[i] k += 1 i += 1 if (j <= right): k += ((right - j) + 1) j += ((right - j) + 1) arr[left:(k + 1)] = temp[left:(k + 1)] else: arr[left:(right + 1)] = temp[left:(right + 1)] return inv_count
[ "def", "_merge", "(", "arr", ",", "temp", ",", "left", ",", "mid", ",", "right", ")", ":", "i", "=", "k", "=", "left", "j", "=", "mid", "inv_count", "=", "0", "while", "(", "(", "i", "<", "mid", ")", "and", "(", "j", "<=", "right", ")", ")", ":", "if", "(", "arr", "[", "i", "]", "<", "arr", "[", "j", "]", ")", ":", "temp", "[", "k", "]", "=", "arr", "[", "i", "]", "k", "+=", "1", "i", "+=", "1", "else", ":", "temp", "[", "k", "]", "=", "arr", "[", "j", "]", "k", "+=", "1", "j", "+=", "1", "inv_count", "+=", "(", "mid", "-", "i", ")", "while", "(", "i", "<", "mid", ")", ":", "temp", "[", "k", "]", "=", "arr", "[", "i", "]", "k", "+=", "1", "i", "+=", "1", "if", "(", "j", "<=", "right", ")", ":", "k", "+=", "(", "(", "right", "-", "j", ")", "+", "1", ")", "j", "+=", "(", "(", "right", "-", "j", ")", "+", "1", ")", "arr", "[", "left", ":", "(", "k", "+", "1", ")", "]", "=", "temp", "[", "left", ":", "(", "k", "+", "1", ")", "]", "else", ":", "arr", "[", "left", ":", "(", "right", "+", "1", ")", "]", "=", "temp", "[", "left", ":", "(", "right", "+", "1", ")", "]", "return", "inv_count" ]
merges two sorted arrays and calculates the inversion count .
train
false
40,943
def get_current_db(): from calibre.gui2.ui import get_gui gui = get_gui() if ((gui is not None) and (gui.current_db is not None)): return gui.current_db from calibre.library import db return db()
[ "def", "get_current_db", "(", ")", ":", "from", "calibre", ".", "gui2", ".", "ui", "import", "get_gui", "gui", "=", "get_gui", "(", ")", "if", "(", "(", "gui", "is", "not", "None", ")", "and", "(", "gui", ".", "current_db", "is", "not", "None", ")", ")", ":", "return", "gui", ".", "current_db", "from", "calibre", ".", "library", "import", "db", "return", "db", "(", ")" ]
this method will try to return the current database in use by the user as efficiently as possible .
train
false
40,944
@register.function @jinja2.contextfunction def docs_page_title(context, title=None): devhub = _('Add-on Documentation :: Developer Hub') title = (('%s :: %s' % (title, devhub)) if title else devhub) return page_title(context, title)
[ "@", "register", ".", "function", "@", "jinja2", ".", "contextfunction", "def", "docs_page_title", "(", "context", ",", "title", "=", "None", ")", ":", "devhub", "=", "_", "(", "'Add-on Documentation :: Developer Hub'", ")", "title", "=", "(", "(", "'%s :: %s'", "%", "(", "title", ",", "devhub", ")", ")", "if", "title", "else", "devhub", ")", "return", "page_title", "(", "context", ",", "title", ")" ]
wrapper for docs page titles .
train
false
40,945
def solc_parse_output(compiler_output): result = yaml.safe_load(compiler_output)['contracts'] if ('bin' in tuple(result.values())[0]): for value in result.values(): value['bin_hex'] = value['bin'] try: value['bin'] = decode_hex(value['bin_hex']) except TypeError: pass for json_data in ('abi', 'devdoc', 'userdoc'): if (json_data not in tuple(result.values())[0]): continue for value in result.values(): value[json_data] = yaml.safe_load(value[json_data]) return result
[ "def", "solc_parse_output", "(", "compiler_output", ")", ":", "result", "=", "yaml", ".", "safe_load", "(", "compiler_output", ")", "[", "'contracts'", "]", "if", "(", "'bin'", "in", "tuple", "(", "result", ".", "values", "(", ")", ")", "[", "0", "]", ")", ":", "for", "value", "in", "result", ".", "values", "(", ")", ":", "value", "[", "'bin_hex'", "]", "=", "value", "[", "'bin'", "]", "try", ":", "value", "[", "'bin'", "]", "=", "decode_hex", "(", "value", "[", "'bin_hex'", "]", ")", "except", "TypeError", ":", "pass", "for", "json_data", "in", "(", "'abi'", ",", "'devdoc'", ",", "'userdoc'", ")", ":", "if", "(", "json_data", "not", "in", "tuple", "(", "result", ".", "values", "(", ")", ")", "[", "0", "]", ")", ":", "continue", "for", "value", "in", "result", ".", "values", "(", ")", ":", "value", "[", "json_data", "]", "=", "yaml", ".", "safe_load", "(", "value", "[", "json_data", "]", ")", "return", "result" ]
parses the compiler output .
train
true
40,948
def violin_stats(X, method, points=100): vpstats = [] X = _reshape_2D(X) for x in X: stats = {} min_val = np.min(x) max_val = np.max(x) coords = np.linspace(min_val, max_val, points) stats[u'vals'] = method(x, coords) stats[u'coords'] = coords stats[u'mean'] = np.mean(x) stats[u'median'] = np.median(x) stats[u'min'] = min_val stats[u'max'] = max_val vpstats.append(stats) return vpstats
[ "def", "violin_stats", "(", "X", ",", "method", ",", "points", "=", "100", ")", ":", "vpstats", "=", "[", "]", "X", "=", "_reshape_2D", "(", "X", ")", "for", "x", "in", "X", ":", "stats", "=", "{", "}", "min_val", "=", "np", ".", "min", "(", "x", ")", "max_val", "=", "np", ".", "max", "(", "x", ")", "coords", "=", "np", ".", "linspace", "(", "min_val", ",", "max_val", ",", "points", ")", "stats", "[", "u'vals'", "]", "=", "method", "(", "x", ",", "coords", ")", "stats", "[", "u'coords'", "]", "=", "coords", "stats", "[", "u'mean'", "]", "=", "np", ".", "mean", "(", "x", ")", "stats", "[", "u'median'", "]", "=", "np", ".", "median", "(", "x", ")", "stats", "[", "u'min'", "]", "=", "min_val", "stats", "[", "u'max'", "]", "=", "max_val", "vpstats", ".", "append", "(", "stats", ")", "return", "vpstats" ]
returns a list of dictionaries of data which can be used to draw a series of violin plots .
train
false
40,950
@lru_cache() def system_start_time(): (start_time, parameter) = (time.time(), 'system start time') btime_line = _get_line('/proc/stat', 'btime', parameter) try: result = float(btime_line.strip().split()[1]) _log_runtime(parameter, '/proc/stat[btime]', start_time) return result except: exc = IOError(('unable to parse the /proc/stat btime entry: %s' % btime_line)) _log_failure(parameter, exc) raise exc
[ "@", "lru_cache", "(", ")", "def", "system_start_time", "(", ")", ":", "(", "start_time", ",", "parameter", ")", "=", "(", "time", ".", "time", "(", ")", ",", "'system start time'", ")", "btime_line", "=", "_get_line", "(", "'/proc/stat'", ",", "'btime'", ",", "parameter", ")", "try", ":", "result", "=", "float", "(", "btime_line", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "1", "]", ")", "_log_runtime", "(", "parameter", ",", "'/proc/stat[btime]'", ",", "start_time", ")", "return", "result", "except", ":", "exc", "=", "IOError", "(", "(", "'unable to parse the /proc/stat btime entry: %s'", "%", "btime_line", ")", ")", "_log_failure", "(", "parameter", ",", "exc", ")", "raise", "exc" ]
provides the unix time when the system started .
train
false
40,951
def _extended_lookup(connection, project, key_pbs, missing=None, deferred=None, eventual=False, transaction_id=None): if ((missing is not None) and (missing != [])): raise ValueError('missing must be None or an empty list') if ((deferred is not None) and (deferred != [])): raise ValueError('deferred must be None or an empty list') results = [] loop_num = 0 while (loop_num < _MAX_LOOPS): loop_num += 1 (results_found, missing_found, deferred_found) = connection.lookup(project=project, key_pbs=key_pbs, eventual=eventual, transaction_id=transaction_id) results.extend(results_found) if (missing is not None): missing.extend(missing_found) if (deferred is not None): deferred.extend(deferred_found) break if (len(deferred_found) == 0): break key_pbs = deferred_found return results
[ "def", "_extended_lookup", "(", "connection", ",", "project", ",", "key_pbs", ",", "missing", "=", "None", ",", "deferred", "=", "None", ",", "eventual", "=", "False", ",", "transaction_id", "=", "None", ")", ":", "if", "(", "(", "missing", "is", "not", "None", ")", "and", "(", "missing", "!=", "[", "]", ")", ")", ":", "raise", "ValueError", "(", "'missing must be None or an empty list'", ")", "if", "(", "(", "deferred", "is", "not", "None", ")", "and", "(", "deferred", "!=", "[", "]", ")", ")", ":", "raise", "ValueError", "(", "'deferred must be None or an empty list'", ")", "results", "=", "[", "]", "loop_num", "=", "0", "while", "(", "loop_num", "<", "_MAX_LOOPS", ")", ":", "loop_num", "+=", "1", "(", "results_found", ",", "missing_found", ",", "deferred_found", ")", "=", "connection", ".", "lookup", "(", "project", "=", "project", ",", "key_pbs", "=", "key_pbs", ",", "eventual", "=", "eventual", ",", "transaction_id", "=", "transaction_id", ")", "results", ".", "extend", "(", "results_found", ")", "if", "(", "missing", "is", "not", "None", ")", ":", "missing", ".", "extend", "(", "missing_found", ")", "if", "(", "deferred", "is", "not", "None", ")", ":", "deferred", ".", "extend", "(", "deferred_found", ")", "break", "if", "(", "len", "(", "deferred_found", ")", "==", "0", ")", ":", "break", "key_pbs", "=", "deferred_found", "return", "results" ]
repeat lookup until all keys found .
train
false
40,952
def get_theme_paths(themes, theme_dirs): theme_paths = [] for theme in themes: theme_base_dirs = get_theme_base_dirs(theme, theme_dirs) if (not theme_base_dirs): print ("\x1b[91m\nSkipping '{theme}': \nTheme ({theme}) not found in any of the theme dirs ({theme_dirs}). \x1b[00m".format(theme=theme, theme_dirs=', '.join(theme_dirs)),) theme_paths.extend(theme_base_dirs) return theme_paths
[ "def", "get_theme_paths", "(", "themes", ",", "theme_dirs", ")", ":", "theme_paths", "=", "[", "]", "for", "theme", "in", "themes", ":", "theme_base_dirs", "=", "get_theme_base_dirs", "(", "theme", ",", "theme_dirs", ")", "if", "(", "not", "theme_base_dirs", ")", ":", "print", "(", "\"\\x1b[91m\\nSkipping '{theme}': \\nTheme ({theme}) not found in any of the theme dirs ({theme_dirs}). \\x1b[00m\"", ".", "format", "(", "theme", "=", "theme", ",", "theme_dirs", "=", "', '", ".", "join", "(", "theme_dirs", ")", ")", ",", ")", "theme_paths", ".", "extend", "(", "theme_base_dirs", ")", "return", "theme_paths" ]
get absolute path for all the given themes .
train
false
40,954
def test_get_log_destinations_notempty(monkeypatch): monkeypatch.setenv('WALE_LOG_DESTINATION', 'syslog') out = log_help.get_log_destinations() assert (out == ['syslog'])
[ "def", "test_get_log_destinations_notempty", "(", "monkeypatch", ")", ":", "monkeypatch", ".", "setenv", "(", "'WALE_LOG_DESTINATION'", ",", "'syslog'", ")", "out", "=", "log_help", ".", "get_log_destinations", "(", ")", "assert", "(", "out", "==", "[", "'syslog'", "]", ")" ]
wale_log_destination is set .
train
false
40,955
def replace_surrogate_decode(mybytes): decoded = [] for ch in mybytes: if isinstance(ch, int): code = ch else: code = ord(ch) if (128 <= code <= 255): decoded.append(_unichr((56320 + code))) elif (code <= 127): decoded.append(_unichr(code)) else: raise NotASurrogateError return str().join(decoded)
[ "def", "replace_surrogate_decode", "(", "mybytes", ")", ":", "decoded", "=", "[", "]", "for", "ch", "in", "mybytes", ":", "if", "isinstance", "(", "ch", ",", "int", ")", ":", "code", "=", "ch", "else", ":", "code", "=", "ord", "(", "ch", ")", "if", "(", "128", "<=", "code", "<=", "255", ")", ":", "decoded", ".", "append", "(", "_unichr", "(", "(", "56320", "+", "code", ")", ")", ")", "elif", "(", "code", "<=", "127", ")", ":", "decoded", ".", "append", "(", "_unichr", "(", "code", ")", ")", "else", ":", "raise", "NotASurrogateError", "return", "str", "(", ")", ".", "join", "(", "decoded", ")" ]
returns a string .
train
true
40,956
@task.task(ignore_result=True) def extend_access_token(profile, access_token): results = profile._extend_access_token(access_token) return results
[ "@", "task", ".", "task", "(", "ignore_result", "=", "True", ")", "def", "extend_access_token", "(", "profile", ",", "access_token", ")", ":", "results", "=", "profile", ".", "_extend_access_token", "(", "access_token", ")", "return", "results" ]
extends the access token to 60 days and saves it on the profile .
train
false
40,957
def datetime_parsing(text, base_date=datetime.now()): matches = [] found_array = [] for (expression, function) in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) for (match, value, spans) in matches: subn = re.subn((('(?!<TAG[^>]*?>)' + match) + '(?![^<]*?</TAG>)'), (('<TAG>' + match) + '</TAG>'), text) text = subn[0] is_substituted = subn[1] if (is_substituted != 0): found_array.append((match, value, spans)) return sorted(found_array, key=(lambda match: (match and match[2][0])))
[ "def", "datetime_parsing", "(", "text", ",", "base_date", "=", "datetime", ".", "now", "(", ")", ")", ":", "matches", "=", "[", "]", "found_array", "=", "[", "]", "for", "(", "expression", ",", "function", ")", "in", "regex", ":", "for", "match", "in", "expression", ".", "finditer", "(", "text", ")", ":", "matches", ".", "append", "(", "(", "match", ".", "group", "(", ")", ",", "function", "(", "match", ",", "base_date", ")", ",", "match", ".", "span", "(", ")", ")", ")", "for", "(", "match", ",", "value", ",", "spans", ")", "in", "matches", ":", "subn", "=", "re", ".", "subn", "(", "(", "(", "'(?!<TAG[^>]*?>)'", "+", "match", ")", "+", "'(?![^<]*?</TAG>)'", ")", ",", "(", "(", "'<TAG>'", "+", "match", ")", "+", "'</TAG>'", ")", ",", "text", ")", "text", "=", "subn", "[", "0", "]", "is_substituted", "=", "subn", "[", "1", "]", "if", "(", "is_substituted", "!=", "0", ")", ":", "found_array", ".", "append", "(", "(", "match", ",", "value", ",", "spans", ")", ")", "return", "sorted", "(", "found_array", ",", "key", "=", "(", "lambda", "match", ":", "(", "match", "and", "match", "[", "2", "]", "[", "0", "]", ")", ")", ")" ]
extract datetime objects from a string of text .
train
true
40,960
def sift(seq, keyfunc): m = defaultdict(list) for i in seq: m[keyfunc(i)].append(i) return m
[ "def", "sift", "(", "seq", ",", "keyfunc", ")", ":", "m", "=", "defaultdict", "(", "list", ")", "for", "i", "in", "seq", ":", "m", "[", "keyfunc", "(", "i", ")", "]", ".", "append", "(", "i", ")", "return", "m" ]
sift the sequence .
train
false
40,961
@open_tab def interact_page(data, socket): event = json.dumps(data.get('interaction', {})) try: socket.tab.evaljs(('window.livePortiaPage.sendEvent(%s);' % event)) except JsError as e: print e cookies(socket)
[ "@", "open_tab", "def", "interact_page", "(", "data", ",", "socket", ")", ":", "event", "=", "json", ".", "dumps", "(", "data", ".", "get", "(", "'interaction'", ",", "{", "}", ")", ")", "try", ":", "socket", ".", "tab", ".", "evaljs", "(", "(", "'window.livePortiaPage.sendEvent(%s);'", "%", "event", ")", ")", "except", "JsError", "as", "e", ":", "print", "e", "cookies", "(", "socket", ")" ]
execute js event from front end on virtual tab .
train
false
40,962
def cliques(graph, threshold=3): a = [] for n in graph.nodes: c = clique(graph, n.id) if (len(c) >= threshold): c.sort() if (c not in a): a.append(c) return a
[ "def", "cliques", "(", "graph", ",", "threshold", "=", "3", ")", ":", "a", "=", "[", "]", "for", "n", "in", "graph", ".", "nodes", ":", "c", "=", "clique", "(", "graph", ",", "n", ".", "id", ")", "if", "(", "len", "(", "c", ")", ">=", "threshold", ")", ":", "c", ".", "sort", "(", ")", "if", "(", "c", "not", "in", "a", ")", ":", "a", ".", "append", "(", "c", ")", "return", "a" ]
returns all cliques in the graph with at least the given number of nodes .
train
true
40,963
def ListImages(region): all_images = GetImages(region, GetOwnerIDs(region)) running_images = set([i.image_id for i in ListInstances(region)]) if (len(all_images) == 0): print ('No images in region %s' % region) return print ('# %-14s %-8s %-40s %-40s' % ('ID', 'Active', 'Name', 'Description')) for i in all_images: active_str = ('ACTIVE' if (i.id in running_images) else '') print ('%-16s %-8s %-40s %-40s' % (i.id, active_str, i.name, i.description))
[ "def", "ListImages", "(", "region", ")", ":", "all_images", "=", "GetImages", "(", "region", ",", "GetOwnerIDs", "(", "region", ")", ")", "running_images", "=", "set", "(", "[", "i", ".", "image_id", "for", "i", "in", "ListInstances", "(", "region", ")", "]", ")", "if", "(", "len", "(", "all_images", ")", "==", "0", ")", ":", "print", "(", "'No images in region %s'", "%", "region", ")", "return", "print", "(", "'# %-14s %-8s %-40s %-40s'", "%", "(", "'ID'", ",", "'Active'", ",", "'Name'", ",", "'Description'", ")", ")", "for", "i", "in", "all_images", ":", "active_str", "=", "(", "'ACTIVE'", "if", "(", "i", ".", "id", "in", "running_images", ")", "else", "''", ")", "print", "(", "'%-16s %-8s %-40s %-40s'", "%", "(", "i", ".", "id", ",", "active_str", ",", "i", ".", "name", ",", "i", ".", "description", ")", ")" ]
display the list of images in a given region .
train
false
40,964
def _disable_privilege(privilege_name): return _change_privilege_state(privilege_name, False)
[ "def", "_disable_privilege", "(", "privilege_name", ")", ":", "return", "_change_privilege_state", "(", "privilege_name", ",", "False", ")" ]
disables the named privilege for this process .
train
false
40,965
def _find_matching_button(category, component_type): buttons = world.css_find('div.new-component-{} button'.format(category)) matched_buttons = [btn for btn in buttons if (btn.text == component_type)] assert_equal(len(matched_buttons), 1) return matched_buttons[0]
[ "def", "_find_matching_button", "(", "category", ",", "component_type", ")", ":", "buttons", "=", "world", ".", "css_find", "(", "'div.new-component-{} button'", ".", "format", "(", "category", ")", ")", "matched_buttons", "=", "[", "btn", "for", "btn", "in", "buttons", "if", "(", "btn", ".", "text", "==", "component_type", ")", "]", "assert_equal", "(", "len", "(", "matched_buttons", ")", ",", "1", ")", "return", "matched_buttons", "[", "0", "]" ]
find the button with the specified text .
train
false
40,967
def roots_chebyt(n, mu=False): m = int(n) if ((n < 1) or (n != m)): raise ValueError('n must be a positive integer.') x = np.cos(((np.arange(((2 * m) - 1), 0, (-2)) * pi) / (2 * m))) w = np.empty_like(x) w.fill((pi / m)) if mu: return (x, w, pi) else: return (x, w)
[ "def", "roots_chebyt", "(", "n", ",", "mu", "=", "False", ")", ":", "m", "=", "int", "(", "n", ")", "if", "(", "(", "n", "<", "1", ")", "or", "(", "n", "!=", "m", ")", ")", ":", "raise", "ValueError", "(", "'n must be a positive integer.'", ")", "x", "=", "np", ".", "cos", "(", "(", "(", "np", ".", "arange", "(", "(", "(", "2", "*", "m", ")", "-", "1", ")", ",", "0", ",", "(", "-", "2", ")", ")", "*", "pi", ")", "/", "(", "2", "*", "m", ")", ")", ")", "w", "=", "np", ".", "empty_like", "(", "x", ")", "w", ".", "fill", "(", "(", "pi", "/", "m", ")", ")", "if", "mu", ":", "return", "(", "x", ",", "w", ",", "pi", ")", "else", ":", "return", "(", "x", ",", "w", ")" ]
gauss-chebyshev quadrature .
train
false
40,968
def delay_exponential(base, growth_factor, attempts): if (base == 'rand'): base = random.random() elif (base <= 0): raise ValueError(("The 'base' param must be greater than 0, got: %s" % base)) time_to_sleep = (base * (growth_factor ** (attempts - 1))) return time_to_sleep
[ "def", "delay_exponential", "(", "base", ",", "growth_factor", ",", "attempts", ")", ":", "if", "(", "base", "==", "'rand'", ")", ":", "base", "=", "random", ".", "random", "(", ")", "elif", "(", "base", "<=", "0", ")", ":", "raise", "ValueError", "(", "(", "\"The 'base' param must be greater than 0, got: %s\"", "%", "base", ")", ")", "time_to_sleep", "=", "(", "base", "*", "(", "growth_factor", "**", "(", "attempts", "-", "1", ")", ")", ")", "return", "time_to_sleep" ]
calculate time to sleep based on exponential function .
train
true
40,969
def _create_exploration(committer_id, exploration, commit_message, commit_cmds): exploration.validate() rights_manager.create_new_exploration_rights(exploration.id, committer_id) model = exp_models.ExplorationModel(id=exploration.id, category=exploration.category, title=exploration.title, objective=exploration.objective, language_code=exploration.language_code, tags=exploration.tags, blurb=exploration.blurb, author_notes=exploration.author_notes, skin_customizations=exploration.skin_instance.to_dict()['skin_customizations'], states_schema_version=exploration.states_schema_version, init_state_name=exploration.init_state_name, states={state_name: state.to_dict() for (state_name, state) in exploration.states.iteritems()}, param_specs=exploration.param_specs_dict, param_changes=exploration.param_change_dicts) model.commit(committer_id, commit_message, commit_cmds) exploration.version += 1 create_exploration_summary(exploration.id, committer_id)
[ "def", "_create_exploration", "(", "committer_id", ",", "exploration", ",", "commit_message", ",", "commit_cmds", ")", ":", "exploration", ".", "validate", "(", ")", "rights_manager", ".", "create_new_exploration_rights", "(", "exploration", ".", "id", ",", "committer_id", ")", "model", "=", "exp_models", ".", "ExplorationModel", "(", "id", "=", "exploration", ".", "id", ",", "category", "=", "exploration", ".", "category", ",", "title", "=", "exploration", ".", "title", ",", "objective", "=", "exploration", ".", "objective", ",", "language_code", "=", "exploration", ".", "language_code", ",", "tags", "=", "exploration", ".", "tags", ",", "blurb", "=", "exploration", ".", "blurb", ",", "author_notes", "=", "exploration", ".", "author_notes", ",", "skin_customizations", "=", "exploration", ".", "skin_instance", ".", "to_dict", "(", ")", "[", "'skin_customizations'", "]", ",", "states_schema_version", "=", "exploration", ".", "states_schema_version", ",", "init_state_name", "=", "exploration", ".", "init_state_name", ",", "states", "=", "{", "state_name", ":", "state", ".", "to_dict", "(", ")", "for", "(", "state_name", ",", "state", ")", "in", "exploration", ".", "states", ".", "iteritems", "(", ")", "}", ",", "param_specs", "=", "exploration", ".", "param_specs_dict", ",", "param_changes", "=", "exploration", ".", "param_change_dicts", ")", "model", ".", "commit", "(", "committer_id", ",", "commit_message", ",", "commit_cmds", ")", "exploration", ".", "version", "+=", "1", "create_exploration_summary", "(", "exploration", ".", "id", ",", "committer_id", ")" ]
ensures that rights for a new exploration are saved first .
train
false
40,970
def _external_auth_intercept(request, mode): if (mode == 'login'): return external_auth_login(request) elif (mode == 'register'): return external_auth_register(request)
[ "def", "_external_auth_intercept", "(", "request", ",", "mode", ")", ":", "if", "(", "mode", "==", "'login'", ")", ":", "return", "external_auth_login", "(", "request", ")", "elif", "(", "mode", "==", "'register'", ")", ":", "return", "external_auth_register", "(", "request", ")" ]
allow external auth to intercept a login/registration request .
train
false
40,971
def get_finalized_params(runnertype_parameter_info, action_parameter_info, liveaction_parameters, action_context): params = render_live_params(runnertype_parameter_info, action_parameter_info, liveaction_parameters, action_context) return render_final_params(runnertype_parameter_info, action_parameter_info, params, action_context)
[ "def", "get_finalized_params", "(", "runnertype_parameter_info", ",", "action_parameter_info", ",", "liveaction_parameters", ",", "action_context", ")", ":", "params", "=", "render_live_params", "(", "runnertype_parameter_info", ",", "action_parameter_info", ",", "liveaction_parameters", ",", "action_context", ")", "return", "render_final_params", "(", "runnertype_parameter_info", ",", "action_parameter_info", ",", "params", ",", "action_context", ")" ]
left here to keep tests running .
train
false
40,972
def _csr_obtain_cert(config, le_client): (csr, typ) = config.actual_csr (certr, chain) = le_client.obtain_certificate_from_csr(config.domains, csr, typ) if config.dry_run: logger.debug('Dry run: skipping saving certificate to %s', config.cert_path) else: (cert_path, _, cert_fullchain) = le_client.save_certificate(certr, chain, config.cert_path, config.chain_path, config.fullchain_path) _report_new_cert(config, cert_path, cert_fullchain)
[ "def", "_csr_obtain_cert", "(", "config", ",", "le_client", ")", ":", "(", "csr", ",", "typ", ")", "=", "config", ".", "actual_csr", "(", "certr", ",", "chain", ")", "=", "le_client", ".", "obtain_certificate_from_csr", "(", "config", ".", "domains", ",", "csr", ",", "typ", ")", "if", "config", ".", "dry_run", ":", "logger", ".", "debug", "(", "'Dry run: skipping saving certificate to %s'", ",", "config", ".", "cert_path", ")", "else", ":", "(", "cert_path", ",", "_", ",", "cert_fullchain", ")", "=", "le_client", ".", "save_certificate", "(", "certr", ",", "chain", ",", "config", ".", "cert_path", ",", "config", ".", "chain_path", ",", "config", ".", "fullchain_path", ")", "_report_new_cert", "(", "config", ",", "cert_path", ",", "cert_fullchain", ")" ]
obtain a cert using a user-supplied csr this works differently in the csr case because we dont have the privkey .
train
false
40,973
def test_list_with_newlines(): t = ascii.read(['abc', '123\n', '456\n', '\n', '\n']) assert (t.colnames == ['abc']) assert (len(t) == 2) assert (t[0][0] == 123) assert (t[1][0] == 456)
[ "def", "test_list_with_newlines", "(", ")", ":", "t", "=", "ascii", ".", "read", "(", "[", "'abc'", ",", "'123\\n'", ",", "'456\\n'", ",", "'\\n'", ",", "'\\n'", "]", ")", "assert", "(", "t", ".", "colnames", "==", "[", "'abc'", "]", ")", "assert", "(", "len", "(", "t", ")", "==", "2", ")", "assert", "(", "t", "[", "0", "]", "[", "0", "]", "==", "123", ")", "assert", "(", "t", "[", "1", "]", "[", "0", "]", "==", "456", ")" ]
check that lists of strings where some strings consist of just a newline ") are parsed correctly .
train
false
40,975
def asymmetric_temporal_padding(x, left_pad=1, right_pad=1): input_shape = x.shape output_shape = (input_shape[0], ((input_shape[1] + left_pad) + right_pad), input_shape[2]) output = T.zeros(output_shape) return T.set_subtensor(output[:, left_pad:(x.shape[1] + left_pad), :], x)
[ "def", "asymmetric_temporal_padding", "(", "x", ",", "left_pad", "=", "1", ",", "right_pad", "=", "1", ")", ":", "input_shape", "=", "x", ".", "shape", "output_shape", "=", "(", "input_shape", "[", "0", "]", ",", "(", "(", "input_shape", "[", "1", "]", "+", "left_pad", ")", "+", "right_pad", ")", ",", "input_shape", "[", "2", "]", ")", "output", "=", "T", ".", "zeros", "(", "output_shape", ")", "return", "T", ".", "set_subtensor", "(", "output", "[", ":", ",", "left_pad", ":", "(", "x", ".", "shape", "[", "1", "]", "+", "left_pad", ")", ",", ":", "]", ",", "x", ")" ]
pad the middle dimension of a 3d tensor with "left_pad" zeros left and "right_pad" right .
train
false
40,976
def add_users_in_conference(id, user, users): if checking_conference(id): conferences = get_memcached(get_key('conferences')) for val in users: conferences[id]['users'][val] = {'messages': [], 'locations': []} set_memcached(get_key('conferences'), conferences) return get_new_message_for_user(user)
[ "def", "add_users_in_conference", "(", "id", ",", "user", ",", "users", ")", ":", "if", "checking_conference", "(", "id", ")", ":", "conferences", "=", "get_memcached", "(", "get_key", "(", "'conferences'", ")", ")", "for", "val", "in", "users", ":", "conferences", "[", "id", "]", "[", "'users'", "]", "[", "val", "]", "=", "{", "'messages'", ":", "[", "]", ",", "'locations'", ":", "[", "]", "}", "set_memcached", "(", "get_key", "(", "'conferences'", ")", ",", "conferences", ")", "return", "get_new_message_for_user", "(", "user", ")" ]
add users in conference .
train
false
40,977
def _clean_flags(args, caller): flags = '' if (args is None): return flags allowed = ('a', 'B', 'h', 'H', 'i', 'k', 'l', 'P', 't', 'T', 'x', 'v') for flag in args: if (flag in allowed): flags += flag else: raise CommandExecutionError('Invalid flag passed to {0}'.format(caller)) return flags
[ "def", "_clean_flags", "(", "args", ",", "caller", ")", ":", "flags", "=", "''", "if", "(", "args", "is", "None", ")", ":", "return", "flags", "allowed", "=", "(", "'a'", ",", "'B'", ",", "'h'", ",", "'H'", ",", "'i'", ",", "'k'", ",", "'l'", ",", "'P'", ",", "'t'", ",", "'T'", ",", "'x'", ",", "'v'", ")", "for", "flag", "in", "args", ":", "if", "(", "flag", "in", "allowed", ")", ":", "flags", "+=", "flag", "else", ":", "raise", "CommandExecutionError", "(", "'Invalid flag passed to {0}'", ".", "format", "(", "caller", ")", ")", "return", "flags" ]
sanitize flags passed into df .
train
true
40,980
def divisor_count(n, modulus=1): if (not modulus): return 0 elif (modulus != 1): (n, r) = divmod(n, modulus) if r: return 0 if (n == 0): return 0 return Mul(*[(v + 1) for (k, v) in factorint(n).items() if (k > 1)])
[ "def", "divisor_count", "(", "n", ",", "modulus", "=", "1", ")", ":", "if", "(", "not", "modulus", ")", ":", "return", "0", "elif", "(", "modulus", "!=", "1", ")", ":", "(", "n", ",", "r", ")", "=", "divmod", "(", "n", ",", "modulus", ")", "if", "r", ":", "return", "0", "if", "(", "n", "==", "0", ")", ":", "return", "0", "return", "Mul", "(", "*", "[", "(", "v", "+", "1", ")", "for", "(", "k", ",", "v", ")", "in", "factorint", "(", "n", ")", ".", "items", "(", ")", "if", "(", "k", ">", "1", ")", "]", ")" ]
return the number of divisors of n .
train
false
40,981
def urlunparse(components): (scheme, netloc, url, params, query, fragment, _coerce_result) = _coerce_args(*components) if params: url = ('%s;%s' % (url, params)) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
[ "def", "urlunparse", "(", "components", ")", ":", "(", "scheme", ",", "netloc", ",", "url", ",", "params", ",", "query", ",", "fragment", ",", "_coerce_result", ")", "=", "_coerce_args", "(", "*", "components", ")", "if", "params", ":", "url", "=", "(", "'%s;%s'", "%", "(", "url", ",", "params", ")", ")", "return", "_coerce_result", "(", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "url", ",", "query", ",", "fragment", ")", ")", ")" ]
put a parsed url back together again .
train
true
40,982
def deconv_figs(layer_name, layer_data, fm_max=8, plot_size=120): vis_keys = dict() img_keys = dict() fig_dict = dict() for (fm_num, (fm_name, deconv_data, img_data)) in enumerate(layer_data): if (fm_num >= fm_max): break (img_h, img_w) = img_data.shape x_range = Range1d(start=0, end=img_w) y_range = Range1d(start=0, end=img_h) img_fig = image_fig(img_data, img_h, img_w, x_range, y_range, plot_size) deconv_fig = image_fig(deconv_data, img_h, img_w, x_range, y_range, plot_size) title = '{}_fmap_{:04d}'.format(layer_name, fm_num) vis_keys[fm_num] = ('vis_' + title) img_keys[fm_num] = ('img_' + title) fig_dict[vis_keys[fm_num]] = deconv_fig fig_dict[img_keys[fm_num]] = img_fig return (vis_keys, img_keys, fig_dict)
[ "def", "deconv_figs", "(", "layer_name", ",", "layer_data", ",", "fm_max", "=", "8", ",", "plot_size", "=", "120", ")", ":", "vis_keys", "=", "dict", "(", ")", "img_keys", "=", "dict", "(", ")", "fig_dict", "=", "dict", "(", ")", "for", "(", "fm_num", ",", "(", "fm_name", ",", "deconv_data", ",", "img_data", ")", ")", "in", "enumerate", "(", "layer_data", ")", ":", "if", "(", "fm_num", ">=", "fm_max", ")", ":", "break", "(", "img_h", ",", "img_w", ")", "=", "img_data", ".", "shape", "x_range", "=", "Range1d", "(", "start", "=", "0", ",", "end", "=", "img_w", ")", "y_range", "=", "Range1d", "(", "start", "=", "0", ",", "end", "=", "img_h", ")", "img_fig", "=", "image_fig", "(", "img_data", ",", "img_h", ",", "img_w", ",", "x_range", ",", "y_range", ",", "plot_size", ")", "deconv_fig", "=", "image_fig", "(", "deconv_data", ",", "img_h", ",", "img_w", ",", "x_range", ",", "y_range", ",", "plot_size", ")", "title", "=", "'{}_fmap_{:04d}'", ".", "format", "(", "layer_name", ",", "fm_num", ")", "vis_keys", "[", "fm_num", "]", "=", "(", "'vis_'", "+", "title", ")", "img_keys", "[", "fm_num", "]", "=", "(", "'img_'", "+", "title", ")", "fig_dict", "[", "vis_keys", "[", "fm_num", "]", "]", "=", "deconv_fig", "fig_dict", "[", "img_keys", "[", "fm_num", "]", "]", "=", "img_fig", "return", "(", "vis_keys", ",", "img_keys", ",", "fig_dict", ")" ]
helper function to generate deconv visualization figures arguments: layer_name : layer name layer_data : layer data to plot fm_max : max layers to process plot_size : plot size returns: tuple if vis_keys .
train
false
40,983
def _safe_toposort(data): if (len(data) == 0): return t = _toposort(data) while True: try: value = next(t) (yield value) except ValueError as err: log.debug(err.args[0]) if (not data): return (yield pop_key(data)) t = _toposort(data) continue except StopIteration: return
[ "def", "_safe_toposort", "(", "data", ")", ":", "if", "(", "len", "(", "data", ")", "==", "0", ")", ":", "return", "t", "=", "_toposort", "(", "data", ")", "while", "True", ":", "try", ":", "value", "=", "next", "(", "t", ")", "(", "yield", "value", ")", "except", "ValueError", "as", "err", ":", "log", ".", "debug", "(", "err", ".", "args", "[", "0", "]", ")", "if", "(", "not", "data", ")", ":", "return", "(", "yield", "pop_key", "(", "data", ")", ")", "t", "=", "_toposort", "(", "data", ")", "continue", "except", "StopIteration", ":", "return" ]
dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items .
train
false
40,984
def generate_int_id(minimum=0, maximum=MYSQL_MAX_INT, used_ids=None): if (used_ids is None): used_ids = [] cid = random.randint(minimum, maximum) while (cid in used_ids): cid = random.randint(minimum, maximum) return cid
[ "def", "generate_int_id", "(", "minimum", "=", "0", ",", "maximum", "=", "MYSQL_MAX_INT", ",", "used_ids", "=", "None", ")", ":", "if", "(", "used_ids", "is", "None", ")", ":", "used_ids", "=", "[", "]", "cid", "=", "random", ".", "randint", "(", "minimum", ",", "maximum", ")", "while", "(", "cid", "in", "used_ids", ")", ":", "cid", "=", "random", ".", "randint", "(", "minimum", ",", "maximum", ")", "return", "cid" ]
return a unique integer in the range [minimum .
train
false
40,985
def getEvaluatedStringDefault(defaultString, key, xmlElement=None): evaluatedString = getEvaluatedString(key, xmlElement) if (evaluatedString == None): return defaultString return evaluatedString
[ "def", "getEvaluatedStringDefault", "(", "defaultString", ",", "key", ",", "xmlElement", "=", "None", ")", ":", "evaluatedString", "=", "getEvaluatedString", "(", "key", ",", "xmlElement", ")", "if", "(", "evaluatedString", "==", "None", ")", ":", "return", "defaultString", "return", "evaluatedString" ]
get the evaluated value as a string .
train
false
40,988
def gradient_darker(grad, factor): if (type(grad) is QGradient): if (grad.type() == QGradient.LinearGradient): grad = sip.cast(grad, QLinearGradient) elif (grad.type() == QGradient.RadialGradient): grad = sip.cast(grad, QRadialGradient) if isinstance(grad, QLinearGradient): new_grad = QLinearGradient(grad.start(), grad.finalStop()) elif isinstance(grad, QRadialGradient): new_grad = QRadialGradient(grad.center(), grad.radius(), grad.focalPoint()) else: raise TypeError new_grad.setCoordinateMode(grad.coordinateMode()) for (pos, color) in grad.stops(): new_grad.setColorAt(pos, color.darker(factor)) return new_grad
[ "def", "gradient_darker", "(", "grad", ",", "factor", ")", ":", "if", "(", "type", "(", "grad", ")", "is", "QGradient", ")", ":", "if", "(", "grad", ".", "type", "(", ")", "==", "QGradient", ".", "LinearGradient", ")", ":", "grad", "=", "sip", ".", "cast", "(", "grad", ",", "QLinearGradient", ")", "elif", "(", "grad", ".", "type", "(", ")", "==", "QGradient", ".", "RadialGradient", ")", ":", "grad", "=", "sip", ".", "cast", "(", "grad", ",", "QRadialGradient", ")", "if", "isinstance", "(", "grad", ",", "QLinearGradient", ")", ":", "new_grad", "=", "QLinearGradient", "(", "grad", ".", "start", "(", ")", ",", "grad", ".", "finalStop", "(", ")", ")", "elif", "isinstance", "(", "grad", ",", "QRadialGradient", ")", ":", "new_grad", "=", "QRadialGradient", "(", "grad", ".", "center", "(", ")", ",", "grad", ".", "radius", "(", ")", ",", "grad", ".", "focalPoint", "(", ")", ")", "else", ":", "raise", "TypeError", "new_grad", ".", "setCoordinateMode", "(", "grad", ".", "coordinateMode", "(", ")", ")", "for", "(", "pos", ",", "color", ")", "in", "grad", ".", "stops", "(", ")", ":", "new_grad", ".", "setColorAt", "(", "pos", ",", "color", ".", "darker", "(", "factor", ")", ")", "return", "new_grad" ]
return a copy of the qgradient darkened by factor .
train
false
40,990
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None): hashfunc = (hashfunc or hashlib.sha1) mac = hmac.new(data, None, hashfunc) def _pseudorandom(x, mac=mac): h = mac.copy() h.update(x) return map(ord, h.digest()) buf = [] for block in xrange(1, ((- ((- keylen) // mac.digest_size)) + 1)): rv = u = _pseudorandom((salt + _pack_int(block))) for i in xrange((iterations - 1)): u = _pseudorandom(''.join(map(chr, u))) rv = starmap(xor, izip(rv, u)) buf.extend(rv) return ''.join(map(chr, buf))[:keylen]
[ "def", "pbkdf2_bin", "(", "data", ",", "salt", ",", "iterations", "=", "1000", ",", "keylen", "=", "24", ",", "hashfunc", "=", "None", ")", ":", "hashfunc", "=", "(", "hashfunc", "or", "hashlib", ".", "sha1", ")", "mac", "=", "hmac", ".", "new", "(", "data", ",", "None", ",", "hashfunc", ")", "def", "_pseudorandom", "(", "x", ",", "mac", "=", "mac", ")", ":", "h", "=", "mac", ".", "copy", "(", ")", "h", ".", "update", "(", "x", ")", "return", "map", "(", "ord", ",", "h", ".", "digest", "(", ")", ")", "buf", "=", "[", "]", "for", "block", "in", "xrange", "(", "1", ",", "(", "(", "-", "(", "(", "-", "keylen", ")", "//", "mac", ".", "digest_size", ")", ")", "+", "1", ")", ")", ":", "rv", "=", "u", "=", "_pseudorandom", "(", "(", "salt", "+", "_pack_int", "(", "block", ")", ")", ")", "for", "i", "in", "xrange", "(", "(", "iterations", "-", "1", ")", ")", ":", "u", "=", "_pseudorandom", "(", "''", ".", "join", "(", "map", "(", "chr", ",", "u", ")", ")", ")", "rv", "=", "starmap", "(", "xor", ",", "izip", "(", "rv", ",", "u", ")", ")", "buf", ".", "extend", "(", "rv", ")", "return", "''", ".", "join", "(", "map", "(", "chr", ",", "buf", ")", ")", "[", ":", "keylen", "]" ]
returns a binary digest for the pbkdf2 hash algorithm of data with the given salt .
train
true
40,991
def has_binding(api): module_name = api_to_module[api] import imp try: mod = import_module(module_name) imp.find_module('QtCore', mod.__path__) imp.find_module('QtGui', mod.__path__) imp.find_module('QtSvg', mod.__path__) if (api in (QT_API_PYQT5, QT_API_PYSIDE2)): imp.find_module('QtWidgets', mod.__path__) if (api == QT_API_PYSIDE): return check_version(mod.__version__, '1.0.3') else: return True except ImportError: return False
[ "def", "has_binding", "(", "api", ")", ":", "module_name", "=", "api_to_module", "[", "api", "]", "import", "imp", "try", ":", "mod", "=", "import_module", "(", "module_name", ")", "imp", ".", "find_module", "(", "'QtCore'", ",", "mod", ".", "__path__", ")", "imp", ".", "find_module", "(", "'QtGui'", ",", "mod", ".", "__path__", ")", "imp", ".", "find_module", "(", "'QtSvg'", ",", "mod", ".", "__path__", ")", "if", "(", "api", "in", "(", "QT_API_PYQT5", ",", "QT_API_PYSIDE2", ")", ")", ":", "imp", ".", "find_module", "(", "'QtWidgets'", ",", "mod", ".", "__path__", ")", "if", "(", "api", "==", "QT_API_PYSIDE", ")", ":", "return", "check_version", "(", "mod", ".", "__version__", ",", "'1.0.3'", ")", "else", ":", "return", "True", "except", "ImportError", ":", "return", "False" ]
safely check for pyqt4/5 .
train
false
40,992
def unset_macosx_deployment_target(): if ('MACOSX_DEPLOYMENT_TARGET' in os.environ): del os.environ['MACOSX_DEPLOYMENT_TARGET']
[ "def", "unset_macosx_deployment_target", "(", ")", ":", "if", "(", "'MACOSX_DEPLOYMENT_TARGET'", "in", "os", ".", "environ", ")", ":", "del", "os", ".", "environ", "[", "'MACOSX_DEPLOYMENT_TARGET'", "]" ]
unset macosx_deployment_target because we are not building portable libraries .
train
false
40,995
def fmtPercentage(float_value, point=1): fmt = ('%' + ('0.%(b)df' % {'b': point})) return (locale.format_string(fmt, float_value) + '%')
[ "def", "fmtPercentage", "(", "float_value", ",", "point", "=", "1", ")", ":", "fmt", "=", "(", "'%'", "+", "(", "'0.%(b)df'", "%", "{", "'b'", ":", "point", "}", ")", ")", "return", "(", "locale", ".", "format_string", "(", "fmt", ",", "float_value", ")", "+", "'%'", ")" ]
return float with percentage sign .
train
false
40,996
@treeio_login_required @handle_response_format @_process_mass_form def field_view(request, field_id, response_format='html'): field = get_object_or_404(ItemField, pk=field_id) if (not request.user.profile.has_permission(field)): return user_denied(request, message="You don't have access to this Field Type", response_format=response_format) context = _get_default_context(request) context.update({'field': field}) return render_to_response('infrastructure/field_view', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "@", "_process_mass_form", "def", "field_view", "(", "request", ",", "field_id", ",", "response_format", "=", "'html'", ")", ":", "field", "=", "get_object_or_404", "(", "ItemField", ",", "pk", "=", "field_id", ")", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "field", ")", ")", ":", "return", "user_denied", "(", "request", ",", "message", "=", "\"You don't have access to this Field Type\"", ",", "response_format", "=", "response_format", ")", "context", "=", "_get_default_context", "(", "request", ")", "context", ".", "update", "(", "{", "'field'", ":", "field", "}", ")", "return", "render_to_response", "(", "'infrastructure/field_view'", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
itemfield view .
train
false
40,998
def config_shortcut(action, context, name, parent): keystr = get_shortcut(context, name) qsc = QShortcut(QKeySequence(keystr), parent, action) qsc.setContext(Qt.WidgetWithChildrenShortcut) sc = Shortcut(data=(qsc, context, name)) return sc
[ "def", "config_shortcut", "(", "action", ",", "context", ",", "name", ",", "parent", ")", ":", "keystr", "=", "get_shortcut", "(", "context", ",", "name", ")", "qsc", "=", "QShortcut", "(", "QKeySequence", "(", "keystr", ")", ",", "parent", ",", "action", ")", "qsc", ".", "setContext", "(", "Qt", ".", "WidgetWithChildrenShortcut", ")", "sc", "=", "Shortcut", "(", "data", "=", "(", "qsc", ",", "context", ",", "name", ")", ")", "return", "sc" ]
create a shortcut namedtuple for a widget the data contained in this tuple will be registered in our shortcuts preferences page .
train
true
40,999
def get_cursor(source, spelling): root_cursor = (source if isinstance(source, Cursor) else source.cursor) for cursor in root_cursor.walk_preorder(): if (cursor.spelling == spelling): return cursor return None
[ "def", "get_cursor", "(", "source", ",", "spelling", ")", ":", "root_cursor", "=", "(", "source", "if", "isinstance", "(", "source", ",", "Cursor", ")", "else", "source", ".", "cursor", ")", "for", "cursor", "in", "root_cursor", ".", "walk_preorder", "(", ")", ":", "if", "(", "cursor", ".", "spelling", "==", "spelling", ")", ":", "return", "cursor", "return", "None" ]
obtain a cursor from a source object .
train
false
41,000
def get_changectx_for_changeset(repo, changeset_revision, **kwd): for changeset in repo.changelog: ctx = repo.changectx(changeset) if (str(ctx) == changeset_revision): return ctx return None
[ "def", "get_changectx_for_changeset", "(", "repo", ",", "changeset_revision", ",", "**", "kwd", ")", ":", "for", "changeset", "in", "repo", ".", "changelog", ":", "ctx", "=", "repo", ".", "changectx", "(", "changeset", ")", "if", "(", "str", "(", "ctx", ")", "==", "changeset_revision", ")", ":", "return", "ctx", "return", "None" ]
retrieve a specified changectx from a repository .
train
false
41,001
def minute_frame_to_session_frame(minute_frame, calendar): how = OrderedDict(((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns)) return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
[ "def", "minute_frame_to_session_frame", "(", "minute_frame", ",", "calendar", ")", ":", "how", "=", "OrderedDict", "(", "(", "(", "c", ",", "_MINUTE_TO_SESSION_OHCLV_HOW", "[", "c", "]", ")", "for", "c", "in", "minute_frame", ".", "columns", ")", ")", "return", "minute_frame", ".", "groupby", "(", "calendar", ".", "minute_to_session_label", ")", ".", "agg", "(", "how", ")" ]
resample a dataframe with minute data into the frame expected by a bcolzdailybarwriter .
train
false
41,002
def register_sentry(client, worker): def send_to_sentry(job, *exc_info): client.captureException(exc_info=exc_info, extra={u'job_id': job.id, u'func': job.func_name, u'args': job.args, u'kwargs': job.kwargs, u'description': job.description}) worker.push_exc_handler(send_to_sentry)
[ "def", "register_sentry", "(", "client", ",", "worker", ")", ":", "def", "send_to_sentry", "(", "job", ",", "*", "exc_info", ")", ":", "client", ".", "captureException", "(", "exc_info", "=", "exc_info", ",", "extra", "=", "{", "u'job_id'", ":", "job", ".", "id", ",", "u'func'", ":", "job", ".", "func_name", ",", "u'args'", ":", "job", ".", "args", ",", "u'kwargs'", ":", "job", ".", "kwargs", ",", "u'description'", ":", "job", ".", "description", "}", ")", "worker", ".", "push_exc_handler", "(", "send_to_sentry", ")" ]
given a raven client and an rq worker .
train
false
41,003
def progressbar(iterable=None, length=None, label=None, show_eta=True, show_percent=None, show_pos=False, item_show_func=None, fill_char='#', empty_char='-', bar_template='%(label)s [%(bar)s] %(info)s', info_sep=' ', width=36, file=None, color=None): from ._termui_impl import ProgressBar color = resolve_color_default(color) return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, show_percent=show_percent, show_pos=show_pos, item_show_func=item_show_func, fill_char=fill_char, empty_char=empty_char, bar_template=bar_template, info_sep=info_sep, file=file, label=label, width=width, color=color)
[ "def", "progressbar", "(", "iterable", "=", "None", ",", "length", "=", "None", ",", "label", "=", "None", ",", "show_eta", "=", "True", ",", "show_percent", "=", "None", ",", "show_pos", "=", "False", ",", "item_show_func", "=", "None", ",", "fill_char", "=", "'#'", ",", "empty_char", "=", "'-'", ",", "bar_template", "=", "'%(label)s [%(bar)s] %(info)s'", ",", "info_sep", "=", "' '", ",", "width", "=", "36", ",", "file", "=", "None", ",", "color", "=", "None", ")", ":", "from", ".", "_termui_impl", "import", "ProgressBar", "color", "=", "resolve_color_default", "(", "color", ")", "return", "ProgressBar", "(", "iterable", "=", "iterable", ",", "length", "=", "length", ",", "show_eta", "=", "show_eta", ",", "show_percent", "=", "show_percent", ",", "show_pos", "=", "show_pos", ",", "item_show_func", "=", "item_show_func", ",", "fill_char", "=", "fill_char", ",", "empty_char", "=", "empty_char", ",", "bar_template", "=", "bar_template", ",", "info_sep", "=", "info_sep", ",", "file", "=", "file", ",", "label", "=", "label", ",", "width", "=", "width", ",", "color", "=", "color", ")" ]
this function creates an iterable context manager that can be used to iterate over something while showing a progress bar .
train
true
41,004
def _create_sum_pdist(numsamples): fdist = FreqDist() for x in range(1, (((1 + numsamples) // 2) + 1)): for y in range(0, ((numsamples // 2) + 1)): fdist[(x + y)] += 1 return MLEProbDist(fdist)
[ "def", "_create_sum_pdist", "(", "numsamples", ")", ":", "fdist", "=", "FreqDist", "(", ")", "for", "x", "in", "range", "(", "1", ",", "(", "(", "(", "1", "+", "numsamples", ")", "//", "2", ")", "+", "1", ")", ")", ":", "for", "y", "in", "range", "(", "0", ",", "(", "(", "numsamples", "//", "2", ")", "+", "1", ")", ")", ":", "fdist", "[", "(", "x", "+", "y", ")", "]", "+=", "1", "return", "MLEProbDist", "(", "fdist", ")" ]
return the true probability distribution for the experiment _create_rand_fdist .
train
false
41,006
def fprob(f, dfn, dfd, direction='high'): if (f < 0.0): return nan elif (direction == 'high'): return (1.0 - fdist.cdf(f, dfn, dfd)) elif (direction == 'low'): return fdist.cdf(f, dfn, dfd) else: raise ValueError('Unknown direction.')
[ "def", "fprob", "(", "f", ",", "dfn", ",", "dfd", ",", "direction", "=", "'high'", ")", ":", "if", "(", "f", "<", "0.0", ")", ":", "return", "nan", "elif", "(", "direction", "==", "'high'", ")", ":", "return", "(", "1.0", "-", "fdist", ".", "cdf", "(", "f", ",", "dfn", ",", "dfd", ")", ")", "elif", "(", "direction", "==", "'low'", ")", ":", "return", "fdist", ".", "cdf", "(", "f", ",", "dfn", ",", "dfd", ")", "else", ":", "raise", "ValueError", "(", "'Unknown direction.'", ")" ]
calculate probability from f distribution paramaters f : float value of f statistic dfn : float degrees of freedom for ??? dfd : float degrees of freedom for ??? direction : str one of low or high .
train
false
41,010
def preBuildStatic(static): pass
[ "def", "preBuildStatic", "(", "static", ")", ":", "pass" ]
called before building a static file .
train
false
41,011
def finalize_process(proc, **kwargs): proc.wait(**kwargs)
[ "def", "finalize_process", "(", "proc", ",", "**", "kwargs", ")", ":", "proc", ".", "wait", "(", "**", "kwargs", ")" ]
wait for the process and handle its errors accordingly .
train
false
41,012
def git_deploy(appname, repo): appfolder = ((applications + '/') + appname) backup = mkdir_or_backup(appname) if exists(appfolder): with cd(appfolder): sudo('git pull origin master') sudo('chown -R www-data:www-data *') else: with cd(applications): sudo(('git clone git@github.com/%s %s' % (repo, name))) sudo(('chown -R www-data:www-data %s' % name))
[ "def", "git_deploy", "(", "appname", ",", "repo", ")", ":", "appfolder", "=", "(", "(", "applications", "+", "'/'", ")", "+", "appname", ")", "backup", "=", "mkdir_or_backup", "(", "appname", ")", "if", "exists", "(", "appfolder", ")", ":", "with", "cd", "(", "appfolder", ")", ":", "sudo", "(", "'git pull origin master'", ")", "sudo", "(", "'chown -R www-data:www-data *'", ")", "else", ":", "with", "cd", "(", "applications", ")", ":", "sudo", "(", "(", "'git clone git@github.com/%s %s'", "%", "(", "repo", ",", "name", ")", ")", ")", "sudo", "(", "(", "'chown -R www-data:www-data %s'", "%", "name", ")", ")" ]
fab -h username@host git_deploy:appname .
train
false
41,013
def _get_root_dir(): root_dir = op.abspath(op.dirname(__file__)) up_dir = op.join(root_dir, '..') if (op.isfile(op.join(up_dir, 'setup.py')) and all((op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')))): root_dir = op.abspath(up_dir) return root_dir
[ "def", "_get_root_dir", "(", ")", ":", "root_dir", "=", "op", ".", "abspath", "(", "op", ".", "dirname", "(", "__file__", ")", ")", "up_dir", "=", "op", ".", "join", "(", "root_dir", ",", "'..'", ")", "if", "(", "op", ".", "isfile", "(", "op", ".", "join", "(", "up_dir", ",", "'setup.py'", ")", ")", "and", "all", "(", "(", "op", ".", "isdir", "(", "op", ".", "join", "(", "up_dir", ",", "x", ")", ")", "for", "x", "in", "(", "'mne'", ",", "'examples'", ",", "'doc'", ")", ")", ")", ")", ":", "root_dir", "=", "op", ".", "abspath", "(", "up_dir", ")", "return", "root_dir" ]
get as close to the repo root as possible .
train
false
41,014
def _clean_check(cmd, target): try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise
[ "def", "_clean_check", "(", "cmd", ",", "target", ")", ":", "try", ":", "subprocess", ".", "check_call", "(", "cmd", ")", "except", "subprocess", ".", "CalledProcessError", ":", "if", "os", ".", "access", "(", "target", ",", "os", ".", "F_OK", ")", ":", "os", ".", "unlink", "(", "target", ")", "raise" ]
run the command to download target .
train
true
41,016
def get_sample_data(fname, asfileobj=True): import matplotlib if matplotlib.rcParams[u'examples.directory']: root = matplotlib.rcParams[u'examples.directory'] else: root = os.path.join(matplotlib._get_data_path(), u'sample_data') path = os.path.join(root, fname) if asfileobj: if (os.path.splitext(fname)[(-1)].lower() in (u'.csv', u'.xrc', u'.txt')): mode = u'r' else: mode = u'rb' (base, ext) = os.path.splitext(fname) if (ext == u'.gz'): return gzip.open(path, mode) else: return open(path, mode) else: return path
[ "def", "get_sample_data", "(", "fname", ",", "asfileobj", "=", "True", ")", ":", "import", "matplotlib", "if", "matplotlib", ".", "rcParams", "[", "u'examples.directory'", "]", ":", "root", "=", "matplotlib", ".", "rcParams", "[", "u'examples.directory'", "]", "else", ":", "root", "=", "os", ".", "path", ".", "join", "(", "matplotlib", ".", "_get_data_path", "(", ")", ",", "u'sample_data'", ")", "path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "fname", ")", "if", "asfileobj", ":", "if", "(", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "(", "-", "1", ")", "]", ".", "lower", "(", ")", "in", "(", "u'.csv'", ",", "u'.xrc'", ",", "u'.txt'", ")", ")", ":", "mode", "=", "u'r'", "else", ":", "mode", "=", "u'rb'", "(", "base", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "if", "(", "ext", "==", "u'.gz'", ")", ":", "return", "gzip", ".", "open", "(", "path", ",", "mode", ")", "else", ":", "return", "open", "(", "path", ",", "mode", ")", "else", ":", "return", "path" ]
return a sample data file .
train
false
41,017
def get_resource_container(context): return context.get(RESOURCE_CONTAINER_VAR_NAME)
[ "def", "get_resource_container", "(", "context", ")", ":", "return", "context", ".", "get", "(", "RESOURCE_CONTAINER_VAR_NAME", ")" ]
get a resourcecontainer from a rendering context .
train
false
41,018
def entry_analyze(fig, data, n): data2 = data.sort_index(by='exit_profit') data_long = data2[data2['islong']] data_short = data2[(data2['islong'] == False)] exit_profit = data2['exit_profit'] entry_best = pd.concat([data_long.high_profit, data_short.low_profit]).reindex(data2.index) entry_worst = pd.concat([data_long.low_profit, data_short.high_profit]).reindex(data2.index) try: entry_nbar_best = data2['entry_nbar_best'] entry_nbar_worst = data2['entry_nbar_worst'] except Exception as e: entry_nbar_best = [] entry_nbar_worst = [] return plot_entry(fig, exit_profit, entry_best, entry_worst, entry_nbar_best, entry_nbar_worst, n)
[ "def", "entry_analyze", "(", "fig", ",", "data", ",", "n", ")", ":", "data2", "=", "data", ".", "sort_index", "(", "by", "=", "'exit_profit'", ")", "data_long", "=", "data2", "[", "data2", "[", "'islong'", "]", "]", "data_short", "=", "data2", "[", "(", "data2", "[", "'islong'", "]", "==", "False", ")", "]", "exit_profit", "=", "data2", "[", "'exit_profit'", "]", "entry_best", "=", "pd", ".", "concat", "(", "[", "data_long", ".", "high_profit", ",", "data_short", ".", "low_profit", "]", ")", ".", "reindex", "(", "data2", ".", "index", ")", "entry_worst", "=", "pd", ".", "concat", "(", "[", "data_long", ".", "low_profit", ",", "data_short", ".", "high_profit", "]", ")", ".", "reindex", "(", "data2", ".", "index", ")", "try", ":", "entry_nbar_best", "=", "data2", "[", "'entry_nbar_best'", "]", "entry_nbar_worst", "=", "data2", "[", "'entry_nbar_worst'", "]", "except", "Exception", "as", "e", ":", "entry_nbar_best", "=", "[", "]", "entry_nbar_worst", "=", "[", "]", "return", "plot_entry", "(", "fig", ",", "exit_profit", ",", "entry_best", ",", "entry_worst", ",", "entry_nbar_best", ",", "entry_nbar_worst", ",", "n", ")" ]
data: ���� .
train
false
41,019
def ConvertSpecials(p): if (p is None): p = 'None' elif (p.lower() == T('Default').lower()): p = '' return p
[ "def", "ConvertSpecials", "(", "p", ")", ":", "if", "(", "p", "is", "None", ")", ":", "p", "=", "'None'", "elif", "(", "p", ".", "lower", "(", ")", "==", "T", "(", "'Default'", ")", ".", "lower", "(", ")", ")", ":", "p", "=", "''", "return", "p" ]
convert none to none and default to .
train
false
41,020
def _resolve_path(path): result = '' for m in _ESCAPE_RE.finditer(path): if m.group('escaped'): result += m.group('escaped') elif m.group('unescaped'): result += expand_path(m.group('unescaped')) else: raise ValueError('No escaped character') return result
[ "def", "_resolve_path", "(", "path", ")", ":", "result", "=", "''", "for", "m", "in", "_ESCAPE_RE", ".", "finditer", "(", "path", ")", ":", "if", "m", ".", "group", "(", "'escaped'", ")", ":", "result", "+=", "m", ".", "group", "(", "'escaped'", ")", "elif", "m", ".", "group", "(", "'unescaped'", ")", ":", "result", "+=", "expand_path", "(", "m", ".", "group", "(", "'unescaped'", ")", ")", "else", ":", "raise", "ValueError", "(", "'No escaped character'", ")", "return", "result" ]
helper for :py:func:parse_setup_cmd .
train
false
41,021
def AuthSubTokenFromHttpBody(http_body): token_value = token_from_http_body(http_body) if token_value: return ('%s%s' % (AUTHSUB_AUTH_LABEL, token_value)) return None
[ "def", "AuthSubTokenFromHttpBody", "(", "http_body", ")", ":", "token_value", "=", "token_from_http_body", "(", "http_body", ")", "if", "token_value", ":", "return", "(", "'%s%s'", "%", "(", "AUTHSUB_AUTH_LABEL", ",", "token_value", ")", ")", "return", "None" ]
extracts the authsub token from an http body string .
train
false
41,022
def test_main1(): import tempfile tempdir = tempfile.mkdtemp() arglist = ('--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000' % tempdir) arglist = arglist.split(' ') tf.reset_default_graph() ts2s = CommandLine(arglist=arglist) assert os.path.exists(ts2s.weights_output_fn) os.system(('rm -rf %s' % tempdir))
[ "def", "test_main1", "(", ")", ":", "import", "tempfile", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "arglist", "=", "(", "'--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000'", "%", "tempdir", ")", "arglist", "=", "arglist", ".", "split", "(", "' '", ")", "tf", ".", "reset_default_graph", "(", ")", "ts2s", "=", "CommandLine", "(", "arglist", "=", "arglist", ")", "assert", "os", ".", "path", ".", "exists", "(", "ts2s", ".", "weights_output_fn", ")", "os", ".", "system", "(", "(", "'rm -rf %s'", "%", "tempdir", ")", ")" ]
integration test - training .
train
false
41,023
def _param_may_exist(may_exist): if may_exist: return '--may-exist ' else: return ''
[ "def", "_param_may_exist", "(", "may_exist", ")", ":", "if", "may_exist", ":", "return", "'--may-exist '", "else", ":", "return", "''" ]
returns --may-exist parameter for open vswitch command .
train
false
41,024
def getEvaluatedBoolean(defaultValue, elementNode, key): if (elementNode == None): return defaultValue if (key in elementNode.attributes): return euclidean.getBooleanFromValue(getEvaluatedValueObliviously(elementNode, key)) return defaultValue
[ "def", "getEvaluatedBoolean", "(", "defaultValue", ",", "elementNode", ",", "key", ")", ":", "if", "(", "elementNode", "==", "None", ")", ":", "return", "defaultValue", "if", "(", "key", "in", "elementNode", ".", "attributes", ")", ":", "return", "euclidean", ".", "getBooleanFromValue", "(", "getEvaluatedValueObliviously", "(", "elementNode", ",", "key", ")", ")", "return", "defaultValue" ]
get the evaluated boolean .
train
false
41,026
def get_course_overview_with_access(user, action, course_key, check_if_enrolled=False): try: course_overview = CourseOverview.get_from_id(course_key) except CourseOverview.DoesNotExist: raise Http404('Course not found.') check_course_access(course_overview, user, action, check_if_enrolled) return course_overview
[ "def", "get_course_overview_with_access", "(", "user", ",", "action", ",", "course_key", ",", "check_if_enrolled", "=", "False", ")", ":", "try", ":", "course_overview", "=", "CourseOverview", ".", "get_from_id", "(", "course_key", ")", "except", "CourseOverview", ".", "DoesNotExist", ":", "raise", "Http404", "(", "'Course not found.'", ")", "check_course_access", "(", "course_overview", ",", "user", ",", "action", ",", "check_if_enrolled", ")", "return", "course_overview" ]
given a course_key .
train
false
41,028
def config_present(name, value): name = name.lower() ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} config = _load_config() if isinstance(value, bool): value = ('true' if value else 'false') if (not value): value = '' if (name in config): if (str(config[name]) == str(value)): ret['result'] = True ret['comment'] = 'property {0} already has value "{1}"'.format(name, value) else: ret['result'] = True ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value) ret['changes'][name] = value config[name] = value else: ret['result'] = True ret['comment'] = 'added property {0} with value "{1}"'.format(name, value) ret['changes'][name] = value config[name] = value if ((not __opts__['test']) and (len(ret['changes']) > 0)): ret['result'] = _write_config(config) return ret
[ "def", "config_present", "(", "name", ",", "value", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "config", "=", "_load_config", "(", ")", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "value", "=", "(", "'true'", "if", "value", "else", "'false'", ")", "if", "(", "not", "value", ")", ":", "value", "=", "''", "if", "(", "name", "in", "config", ")", ":", "if", "(", "str", "(", "config", "[", "name", "]", ")", "==", "str", "(", "value", ")", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'property {0} already has value \"{1}\"'", ".", "format", "(", "name", ",", "value", ")", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'updated property {0} with value \"{1}\"'", ".", "format", "(", "name", ",", "value", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "value", "config", "[", "name", "]", "=", "value", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'added property {0} with value \"{1}\"'", ".", "format", "(", "name", ",", "value", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "value", "config", "[", "name", "]", "=", "value", "if", "(", "(", "not", "__opts__", "[", "'test'", "]", ")", "and", "(", "len", "(", "ret", "[", "'changes'", "]", ")", ">", "0", ")", ")", ":", "ret", "[", "'result'", "]", "=", "_write_config", "(", "config", ")", "return", "ret" ]
ensure a specific configuration line exists in the running config name config line to set examples: .
train
false
41,029
def find_module_file(module, dirlist): list = find_file(module, [], dirlist) if (not list): return module if (len(list) > 1): log.info(('WARNING: multiple copies of %s found' % module)) return os.path.join(list[0], module)
[ "def", "find_module_file", "(", "module", ",", "dirlist", ")", ":", "list", "=", "find_file", "(", "module", ",", "[", "]", ",", "dirlist", ")", "if", "(", "not", "list", ")", ":", "return", "module", "if", "(", "len", "(", "list", ")", ">", "1", ")", ":", "log", ".", "info", "(", "(", "'WARNING: multiple copies of %s found'", "%", "module", ")", ")", "return", "os", ".", "path", ".", "join", "(", "list", "[", "0", "]", ",", "module", ")" ]
find a module in a set of possible folders .
train
false
41,030
def monkeypatch_method(cls): def decorator(func): setattr(cls, func.__name__, func) return func return decorator
[ "def", "monkeypatch_method", "(", "cls", ")", ":", "def", "decorator", "(", "func", ")", ":", "setattr", "(", "cls", ",", "func", ".", "__name__", ",", "func", ")", "return", "func", "return", "decorator" ]
a decorator to add a single method to an existing class:: @monkeypatch_method def <newmethod>: pass .
train
true
41,031
def get_all_metrics(limit=None): metrics = [] def get_leafs_of_node(nodepath): for obj in get_matching_metrics(nodepath)['metrics']: if (int(obj['is_leaf']) == 1): metrics.append(obj['path']) else: get_leafs_of_node(obj['path']) get_leafs_of_node('') return metrics
[ "def", "get_all_metrics", "(", "limit", "=", "None", ")", ":", "metrics", "=", "[", "]", "def", "get_leafs_of_node", "(", "nodepath", ")", ":", "for", "obj", "in", "get_matching_metrics", "(", "nodepath", ")", "[", "'metrics'", "]", ":", "if", "(", "int", "(", "obj", "[", "'is_leaf'", "]", ")", "==", "1", ")", ":", "metrics", ".", "append", "(", "obj", "[", "'path'", "]", ")", "else", ":", "get_leafs_of_node", "(", "obj", "[", "'path'", "]", ")", "get_leafs_of_node", "(", "''", ")", "return", "metrics" ]
grabs all metrics by navigating find api recursively .
train
false
41,032
def empty_node_local_state(ideployer): return NodeLocalState(node_state=NodeState(uuid=ideployer.node_uuid, hostname=ideployer.hostname))
[ "def", "empty_node_local_state", "(", "ideployer", ")", ":", "return", "NodeLocalState", "(", "node_state", "=", "NodeState", "(", "uuid", "=", "ideployer", ".", "node_uuid", ",", "hostname", "=", "ideployer", ".", "hostname", ")", ")" ]
constructs an nodelocalstate from an ideployer .
train
false
41,034
def set_networks_kwarg(network, kwargs=None): params = (copy.copy(kwargs) or {}) if (kwargs and ('networks' in kwargs)): return params if network: if ('id' in network.keys()): params.update({'networks': [{'uuid': network['id']}]}) else: LOG.warning('The provided network dict: %s was invalid and did not contain an id', network) return params
[ "def", "set_networks_kwarg", "(", "network", ",", "kwargs", "=", "None", ")", ":", "params", "=", "(", "copy", ".", "copy", "(", "kwargs", ")", "or", "{", "}", ")", "if", "(", "kwargs", "and", "(", "'networks'", "in", "kwargs", ")", ")", ":", "return", "params", "if", "network", ":", "if", "(", "'id'", "in", "network", ".", "keys", "(", ")", ")", ":", "params", ".", "update", "(", "{", "'networks'", ":", "[", "{", "'uuid'", ":", "network", "[", "'id'", "]", "}", "]", "}", ")", "else", ":", "LOG", ".", "warning", "(", "'The provided network dict: %s was invalid and did not contain an id'", ",", "network", ")", "return", "params" ]
set networks kwargs for a server create if missing .
train
false
41,035
def _retry_recalculate_subsection_grade(task_func, exc=None, **kwargs): task_func.retry(kwargs=kwargs, exc=exc)
[ "def", "_retry_recalculate_subsection_grade", "(", "task_func", ",", "exc", "=", "None", ",", "**", "kwargs", ")", ":", "task_func", ".", "retry", "(", "kwargs", "=", "kwargs", ",", "exc", "=", "exc", ")" ]
calls retry for the recalculate_subsection_grade task with the given inputs .
train
false
41,036
def get_settings_var(directory=DIRECTORY): settings_var = {} settings_var['EXTENSIONS'] = EXTENSIONS settings_var['SELECT_FORMATS'] = SELECT_FORMATS settings_var['ADMIN_VERSIONS'] = ADMIN_VERSIONS settings_var['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL settings_var['MAX_UPLOAD_SIZE'] = MAX_UPLOAD_SIZE settings_var['NORMALIZE_FILENAME'] = NORMALIZE_FILENAME settings_var['CONVERT_FILENAME'] = CONVERT_FILENAME settings_var['SEARCH_TRAVERSE'] = SEARCH_TRAVERSE return settings_var
[ "def", "get_settings_var", "(", "directory", "=", "DIRECTORY", ")", ":", "settings_var", "=", "{", "}", "settings_var", "[", "'EXTENSIONS'", "]", "=", "EXTENSIONS", "settings_var", "[", "'SELECT_FORMATS'", "]", "=", "SELECT_FORMATS", "settings_var", "[", "'ADMIN_VERSIONS'", "]", "=", "ADMIN_VERSIONS", "settings_var", "[", "'ADMIN_THUMBNAIL'", "]", "=", "ADMIN_THUMBNAIL", "settings_var", "[", "'MAX_UPLOAD_SIZE'", "]", "=", "MAX_UPLOAD_SIZE", "settings_var", "[", "'NORMALIZE_FILENAME'", "]", "=", "NORMALIZE_FILENAME", "settings_var", "[", "'CONVERT_FILENAME'", "]", "=", "CONVERT_FILENAME", "settings_var", "[", "'SEARCH_TRAVERSE'", "]", "=", "SEARCH_TRAVERSE", "return", "settings_var" ]
get settings variables used for filebrowser listing .
train
false
41,038
def uniform(stdev, size): return numpy.random.uniform(low=((- stdev) * numpy.sqrt(3)), high=(stdev * numpy.sqrt(3)), size=size).astype(theano.config.floatX)
[ "def", "uniform", "(", "stdev", ",", "size", ")", ":", "return", "numpy", ".", "random", ".", "uniform", "(", "low", "=", "(", "(", "-", "stdev", ")", "*", "numpy", ".", "sqrt", "(", "3", ")", ")", ",", "high", "=", "(", "stdev", "*", "numpy", ".", "sqrt", "(", "3", ")", ")", ",", "size", "=", "size", ")", ".", "astype", "(", "theano", ".", "config", ".", "floatX", ")" ]
returns an array of uniformly-distributed samples over an interval .
train
false
41,040
def get_(*keyname): mdata = _check_mdata_get() ret = {} if (len(keyname) == 0): keyname = list_() for k in keyname: if mdata: cmd = '{0} {1}'.format(mdata, k) res = __salt__['cmd.run_all'](cmd) ret[k] = (res['stdout'] if (res['retcode'] == 0) else '') else: ret[k] = '' return ret
[ "def", "get_", "(", "*", "keyname", ")", ":", "mdata", "=", "_check_mdata_get", "(", ")", "ret", "=", "{", "}", "if", "(", "len", "(", "keyname", ")", "==", "0", ")", ":", "keyname", "=", "list_", "(", ")", "for", "k", "in", "keyname", ":", "if", "mdata", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "mdata", ",", "k", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "ret", "[", "k", "]", "=", "(", "res", "[", "'stdout'", "]", "if", "(", "res", "[", "'retcode'", "]", "==", "0", ")", "else", "''", ")", "else", ":", "ret", "[", "k", "]", "=", "''", "return", "ret" ]
get metadata keyname : string name of key .
train
false
41,041
def trigger_event(event=None, **kwargs): res = {'result': False, 'message': 'Something went wrong'} data = {} for value in ('value1', 'value2', 'value3', 'Value1', 'Value2', 'Value3'): if (value in kwargs): data[value.lower()] = kwargs[value] data['occurredat'] = time.strftime('%B %d, %Y %I:%M%p', time.localtime()) result = _query(event=event, method='POST', data=json.dumps(data)) if ('status' in result): if (result['status'] == 200): res['result'] = True res['message'] = result['text'] elif ('error' in result): res['message'] = result['error'] return res
[ "def", "trigger_event", "(", "event", "=", "None", ",", "**", "kwargs", ")", ":", "res", "=", "{", "'result'", ":", "False", ",", "'message'", ":", "'Something went wrong'", "}", "data", "=", "{", "}", "for", "value", "in", "(", "'value1'", ",", "'value2'", ",", "'value3'", ",", "'Value1'", ",", "'Value2'", ",", "'Value3'", ")", ":", "if", "(", "value", "in", "kwargs", ")", ":", "data", "[", "value", ".", "lower", "(", ")", "]", "=", "kwargs", "[", "value", "]", "data", "[", "'occurredat'", "]", "=", "time", ".", "strftime", "(", "'%B %d, %Y %I:%M%p'", ",", "time", ".", "localtime", "(", ")", ")", "result", "=", "_query", "(", "event", "=", "event", ",", "method", "=", "'POST'", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "if", "(", "'status'", "in", "result", ")", ":", "if", "(", "result", "[", "'status'", "]", "==", "200", ")", ":", "res", "[", "'result'", "]", "=", "True", "res", "[", "'message'", "]", "=", "result", "[", "'text'", "]", "elif", "(", "'error'", "in", "result", ")", ":", "res", "[", "'message'", "]", "=", "result", "[", "'error'", "]", "return", "res" ]
trigger a configured event in ifttt .
train
true
41,042
def skip_regex(lines_enum, options): skip_regex = (options.skip_requirements_regex if options else None) if skip_regex: pattern = re.compile(skip_regex) lines_enum = filterfalse((lambda e: pattern.search(e[1])), lines_enum) return lines_enum
[ "def", "skip_regex", "(", "lines_enum", ",", "options", ")", ":", "skip_regex", "=", "(", "options", ".", "skip_requirements_regex", "if", "options", "else", "None", ")", "if", "skip_regex", ":", "pattern", "=", "re", ".", "compile", "(", "skip_regex", ")", "lines_enum", "=", "filterfalse", "(", "(", "lambda", "e", ":", "pattern", ".", "search", "(", "e", "[", "1", "]", ")", ")", ",", "lines_enum", ")", "return", "lines_enum" ]
skip lines that match --skip-requirements-regex pattern note: the regex pattern is only built once .
train
true
41,043
@retry_on_failure def test_getfqdn(): pass
[ "@", "retry_on_failure", "def", "test_getfqdn", "(", ")", ":", "pass" ]
tests socket .
train
false
41,044
@pytest.mark.parametrize(u'testframe', totest_frames) def test_gcrs_altaz_bothroutes(testframe): sun = get_sun(testframe.obstime) sunaa_viaicrs = sun.transform_to(ICRS).transform_to(testframe) sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonaa_viaicrs = moon.transform_to(ICRS).transform_to(testframe) moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz) assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "u'testframe'", ",", "totest_frames", ")", "def", "test_gcrs_altaz_bothroutes", "(", "testframe", ")", ":", "sun", "=", "get_sun", "(", "testframe", ".", "obstime", ")", "sunaa_viaicrs", "=", "sun", ".", "transform_to", "(", "ICRS", ")", ".", "transform_to", "(", "testframe", ")", "sunaa_viaitrs", "=", "sun", ".", "transform_to", "(", "ITRS", "(", "obstime", "=", "testframe", ".", "obstime", ")", ")", ".", "transform_to", "(", "testframe", ")", "moon", "=", "GCRS", "(", "MOONDIST_CART", ",", "obstime", "=", "testframe", ".", "obstime", ")", "moonaa_viaicrs", "=", "moon", ".", "transform_to", "(", "ICRS", ")", ".", "transform_to", "(", "testframe", ")", "moonaa_viaitrs", "=", "moon", ".", "transform_to", "(", "ITRS", "(", "obstime", "=", "testframe", ".", "obstime", ")", ")", ".", "transform_to", "(", "testframe", ")", "assert_allclose", "(", "sunaa_viaicrs", ".", "cartesian", ".", "xyz", ",", "sunaa_viaitrs", ".", "cartesian", ".", "xyz", ")", "assert_allclose", "(", "moonaa_viaicrs", ".", "cartesian", ".", "xyz", ",", "moonaa_viaitrs", ".", "cartesian", ".", "xyz", ")" ]
repeat of both the moonish and sunish tests above to make sure the two routes through the coordinate graph are consistent with each other .
train
false
41,046
def mirror_x(clip, apply_to='mask'): return clip.fl_image((lambda f: f[:, ::(-1)]), apply_to=apply_to)
[ "def", "mirror_x", "(", "clip", ",", "apply_to", "=", "'mask'", ")", ":", "return", "clip", ".", "fl_image", "(", "(", "lambda", "f", ":", "f", "[", ":", ",", ":", ":", "(", "-", "1", ")", "]", ")", ",", "apply_to", "=", "apply_to", ")" ]
flips the clip horizontally .
train
false
41,048
def hashed(source_filename, prepared_options, thumbnail_extension, **kwargs): parts = u':'.join(([source_filename] + prepared_options)) short_sha = hashlib.sha1(parts.encode(u'utf-8')).digest() short_hash = base64.urlsafe_b64encode(short_sha[:9]).decode(u'utf-8') return u'.'.join([short_hash, thumbnail_extension])
[ "def", "hashed", "(", "source_filename", ",", "prepared_options", ",", "thumbnail_extension", ",", "**", "kwargs", ")", ":", "parts", "=", "u':'", ".", "join", "(", "(", "[", "source_filename", "]", "+", "prepared_options", ")", ")", "short_sha", "=", "hashlib", ".", "sha1", "(", "parts", ".", "encode", "(", "u'utf-8'", ")", ")", ".", "digest", "(", ")", "short_hash", "=", "base64", ".", "urlsafe_b64encode", "(", "short_sha", "[", ":", "9", "]", ")", ".", "decode", "(", "u'utf-8'", ")", "return", "u'.'", ".", "join", "(", "[", "short_hash", ",", "thumbnail_extension", "]", ")" ]
generate a short hashed thumbnail filename .
train
true
41,049
def mergedicts(d1, d2, remove=True): _setmerged(d1, d2) for k in d2: if ((k in d1) and isinstance(d1[k], dict) and isinstance(d2[k], dict)): mergedicts(d1[k], d2[k], remove) elif (remove and (d2[k] is REMOVE_THIS_KEY)): d1.pop(k, None) else: if (remove and isinstance(d2[k], dict)): _clear_special_values(d2[k]) d1[k] = d2[k]
[ "def", "mergedicts", "(", "d1", ",", "d2", ",", "remove", "=", "True", ")", ":", "_setmerged", "(", "d1", ",", "d2", ")", "for", "k", "in", "d2", ":", "if", "(", "(", "k", "in", "d1", ")", "and", "isinstance", "(", "d1", "[", "k", "]", ",", "dict", ")", "and", "isinstance", "(", "d2", "[", "k", "]", ",", "dict", ")", ")", ":", "mergedicts", "(", "d1", "[", "k", "]", ",", "d2", "[", "k", "]", ",", "remove", ")", "elif", "(", "remove", "and", "(", "d2", "[", "k", "]", "is", "REMOVE_THIS_KEY", ")", ")", ":", "d1", ".", "pop", "(", "k", ",", "None", ")", "else", ":", "if", "(", "remove", "and", "isinstance", "(", "d2", "[", "k", "]", ",", "dict", ")", ")", ":", "_clear_special_values", "(", "d2", "[", "k", "]", ")", "d1", "[", "k", "]", "=", "d2", "[", "k", "]" ]
recursively merge two dictionaries first dictionary is modified in-place .
train
false
41,050
def remove_site(name): pscmd = [] current_sites = list_sites() if (name not in current_sites): _LOG.debug('Site already absent: %s', name) return True pscmd.append("Remove-WebSite -Name '{0}'".format(name)) cmd_ret = _srvmgr(str().join(pscmd)) if (cmd_ret['retcode'] == 0): _LOG.debug('Site removed successfully: %s', name) return True _LOG.error('Unable to remove site: %s', name) return False
[ "def", "remove_site", "(", "name", ")", ":", "pscmd", "=", "[", "]", "current_sites", "=", "list_sites", "(", ")", "if", "(", "name", "not", "in", "current_sites", ")", ":", "_LOG", ".", "debug", "(", "'Site already absent: %s'", ",", "name", ")", "return", "True", "pscmd", ".", "append", "(", "\"Remove-WebSite -Name '{0}'\"", ".", "format", "(", "name", ")", ")", "cmd_ret", "=", "_srvmgr", "(", "str", "(", ")", ".", "join", "(", "pscmd", ")", ")", "if", "(", "cmd_ret", "[", "'retcode'", "]", "==", "0", ")", ":", "_LOG", ".", "debug", "(", "'Site removed successfully: %s'", ",", "name", ")", "return", "True", "_LOG", ".", "error", "(", "'Unable to remove site: %s'", ",", "name", ")", "return", "False" ]
delete a website from iis .
train
false
41,051
def freemem(): return utils.run('sync && echo 3 > /proc/sys/vm/drop_caches')
[ "def", "freemem", "(", ")", ":", "return", "utils", ".", "run", "(", "'sync && echo 3 > /proc/sys/vm/drop_caches'", ")" ]
return an int representing the amount of memory that has not been given to virtual machines on this node cli example: .
train
false
41,053
def create_vlan_interface(namespace, port_name, mac_address, ip_address, vlan_tag): ip_wrap = ip_lib.IPWrapper(namespace) dev_name = ('%s.%d' % (port_name, vlan_tag)) ip_wrap.add_vlan(dev_name, port_name, vlan_tag) dev = ip_wrap.device(dev_name) dev.addr.add(str(ip_address)) dev.link.set_address(mac_address) dev.link.set_up() return dev
[ "def", "create_vlan_interface", "(", "namespace", ",", "port_name", ",", "mac_address", ",", "ip_address", ",", "vlan_tag", ")", ":", "ip_wrap", "=", "ip_lib", ".", "IPWrapper", "(", "namespace", ")", "dev_name", "=", "(", "'%s.%d'", "%", "(", "port_name", ",", "vlan_tag", ")", ")", "ip_wrap", ".", "add_vlan", "(", "dev_name", ",", "port_name", ",", "vlan_tag", ")", "dev", "=", "ip_wrap", ".", "device", "(", "dev_name", ")", "dev", ".", "addr", ".", "add", "(", "str", "(", "ip_address", ")", ")", "dev", ".", "link", ".", "set_address", "(", "mac_address", ")", "dev", ".", "link", ".", "set_up", "(", ")", "return", "dev" ]
create a vlan interface in namespace with ip address .
train
false
41,054
def distrib_codename(): with settings(hide('running', 'stdout')): return run('lsb_release --codename --short')
[ "def", "distrib_codename", "(", ")", ":", "with", "settings", "(", "hide", "(", "'running'", ",", "'stdout'", ")", ")", ":", "return", "run", "(", "'lsb_release --codename --short'", ")" ]
get the codename of the linux distribution .
train
false
41,055
def get_default_username(distribution): return 'root'
[ "def", "get_default_username", "(", "distribution", ")", ":", "return", "'root'" ]
return the username available by default on a system .
train
false
41,056
def _is_one_arg_pos_call(call): return (isinstance(call, astroid.CallFunc) and (len(call.args) == 1) and (not isinstance(call.args[0], astroid.Keyword)))
[ "def", "_is_one_arg_pos_call", "(", "call", ")", ":", "return", "(", "isinstance", "(", "call", ",", "astroid", ".", "CallFunc", ")", "and", "(", "len", "(", "call", ".", "args", ")", "==", "1", ")", "and", "(", "not", "isinstance", "(", "call", ".", "args", "[", "0", "]", ",", "astroid", ".", "Keyword", ")", ")", ")" ]
is this a call with exactly 1 argument .
train
false
41,057
def get_debug_info(func): def debug_info_replacement(self, **form): try: if ('debugcount' not in form): raise ValueError('You must provide a debugcount parameter') debugcount = form.pop('debugcount') try: debugcount = int(debugcount) except ValueError: raise ValueError('Bad value for debugcount') if (debugcount not in self.debug_infos): raise ValueError(('Debug %s no longer found (maybe it has expired?)' % debugcount)) debug_info = self.debug_infos[debugcount] return func(self, debug_info=debug_info, **form) except ValueError as e: form['headers']['status'] = '500 Server Error' return ('<html>There was an error: %s</html>' % html_quote(e)) return debug_info_replacement
[ "def", "get_debug_info", "(", "func", ")", ":", "def", "debug_info_replacement", "(", "self", ",", "**", "form", ")", ":", "try", ":", "if", "(", "'debugcount'", "not", "in", "form", ")", ":", "raise", "ValueError", "(", "'You must provide a debugcount parameter'", ")", "debugcount", "=", "form", ".", "pop", "(", "'debugcount'", ")", "try", ":", "debugcount", "=", "int", "(", "debugcount", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Bad value for debugcount'", ")", "if", "(", "debugcount", "not", "in", "self", ".", "debug_infos", ")", ":", "raise", "ValueError", "(", "(", "'Debug %s no longer found (maybe it has expired?)'", "%", "debugcount", ")", ")", "debug_info", "=", "self", ".", "debug_infos", "[", "debugcount", "]", "return", "func", "(", "self", ",", "debug_info", "=", "debug_info", ",", "**", "form", ")", "except", "ValueError", "as", "e", ":", "form", "[", "'headers'", "]", "[", "'status'", "]", "=", "'500 Server Error'", "return", "(", "'<html>There was an error: %s</html>'", "%", "html_quote", "(", "e", ")", ")", "return", "debug_info_replacement" ]
a decorator (meant to be used under wsgiapp()) that resolves the debugcount variable to a debuginfo object .
train
false
41,058
def addLineXSegmentIntersection(lineLoopsIntersections, segmentFirstX, segmentSecondX, vector3First, vector3Second, y): xIntersection = euclidean.getXIntersectionIfExists(vector3First, vector3Second, y) if (xIntersection == None): return if (xIntersection < min(segmentFirstX, segmentSecondX)): return if (xIntersection <= max(segmentFirstX, segmentSecondX)): lineLoopsIntersections.append(xIntersection)
[ "def", "addLineXSegmentIntersection", "(", "lineLoopsIntersections", ",", "segmentFirstX", ",", "segmentSecondX", ",", "vector3First", ",", "vector3Second", ",", "y", ")", ":", "xIntersection", "=", "euclidean", ".", "getXIntersectionIfExists", "(", "vector3First", ",", "vector3Second", ",", "y", ")", "if", "(", "xIntersection", "==", "None", ")", ":", "return", "if", "(", "xIntersection", "<", "min", "(", "segmentFirstX", ",", "segmentSecondX", ")", ")", ":", "return", "if", "(", "xIntersection", "<=", "max", "(", "segmentFirstX", ",", "segmentSecondX", ")", ")", ":", "lineLoopsIntersections", ".", "append", "(", "xIntersection", ")" ]
add intersections of the line with the x segment .
train
false
41,059
@synchronized(DIR_LOCK) def create_dirs(dirpath): if (not os.path.exists(dirpath)): logging.info('Creating directories: %s', dirpath) if (not create_all_dirs(dirpath, True)): logging.error(T('Failed making (%s)'), clip_path(dirpath)) return None return dirpath
[ "@", "synchronized", "(", "DIR_LOCK", ")", "def", "create_dirs", "(", "dirpath", ")", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "dirpath", ")", ")", ":", "logging", ".", "info", "(", "'Creating directories: %s'", ",", "dirpath", ")", "if", "(", "not", "create_all_dirs", "(", "dirpath", ",", "True", ")", ")", ":", "logging", ".", "error", "(", "T", "(", "'Failed making (%s)'", ")", ",", "clip_path", "(", "dirpath", ")", ")", "return", "None", "return", "dirpath" ]
create directory tree .
train
false
41,060
def _make_upload_dt(): return datetime.datetime.utcnow().replace(tzinfo=utc)
[ "def", "_make_upload_dt", "(", ")", ":", "return", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")" ]
generate a server-side timestamp for the upload .
train
false
41,061
def update_jail(name): if is_jail(name): cmd = 'poudriere jail -u -j {0}'.format(name) ret = __salt__['cmd.run'](cmd) return ret else: return 'Could not find jail {0}'.format(name)
[ "def", "update_jail", "(", "name", ")", ":", "if", "is_jail", "(", "name", ")", ":", "cmd", "=", "'poudriere jail -u -j {0}'", ".", "format", "(", "name", ")", "ret", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "return", "ret", "else", ":", "return", "'Could not find jail {0}'", ".", "format", "(", "name", ")" ]
run freebsd-update on name poudriere jail cli example: .
train
true
41,062
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false