id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
22,823
def apt_sources(attrs=None, where=None): if (__grains__['os_family'] == 'Debian'): return _osquery_cmd(table='apt_sources', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Debian based systems.'}
[ "def", "apt_sources", "(", "attrs", "=", "None", ",", "where", "=", "None", ")", ":", "if", "(", "__grains__", "[", "'os_family'", "]", "==", "'Debian'", ")", ":", "return", "_osquery_cmd", "(", "table", "=", "'apt_sources'", ",", "attrs", "=", "attrs", ",", "where", "=", "where", ")", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'Only available on Debian based systems.'", "}" ]
return apt_sources information from osquery cli example: .
train
true
22,825
def _getText(nodelist): rc = [] for node in nodelist: if (node.nodeType == node.TEXT_NODE): rc.append(node.data) return ''.join(rc)
[ "def", "_getText", "(", "nodelist", ")", ":", "rc", "=", "[", "]", "for", "node", "in", "nodelist", ":", "if", "(", "node", ".", "nodeType", "==", "node", ".", "TEXT_NODE", ")", ":", "rc", ".", "append", "(", "node", ".", "data", ")", "return", "''", ".", "join", "(", "rc", ")" ]
simple function to return value from xml .
train
true
22,826
def cpuload(): cmd = list2cmdline(['wmic', 'cpu']) info = __salt__['cmd.run'](cmd).split('\r\n') column = info[0].index('LoadPercentage') end = info[1].index(' ', (column + 1)) return int(info[1][column:end])
[ "def", "cpuload", "(", ")", ":", "cmd", "=", "list2cmdline", "(", "[", "'wmic'", ",", "'cpu'", "]", ")", "info", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "split", "(", "'\\r\\n'", ")", "column", "=", "info", "[", "0", "]", ".", "index", "(", "'LoadPercentage'", ")", "end", "=", "info", "[", "1", "]", ".", "index", "(", "' '", ",", "(", "column", "+", "1", ")", ")", "return", "int", "(", "info", "[", "1", "]", "[", "column", ":", "end", "]", ")" ]
calculate average cpu usage .
train
false
22,827
def normalize_interface(if_name): def _get_number(if_name): digits = '' for char in if_name: if (char.isdigit() or (char == '/')): digits += char return digits if if_name.lower().startswith('et'): if_type = 'Ethernet' elif if_name.lower().startswith('vl'): if_type = 'Vlan' elif if_name.lower().startswith('lo'): if_type = 'loopback' elif if_name.lower().startswith('po'): if_type = 'port-channel' elif if_name.lower().startswith('nv'): if_type = 'nve' else: if_type = None number_list = if_name.split(' ') if (len(number_list) == 2): number = number_list[(-1)].strip() else: number = _get_number(if_name) if if_type: proper_interface = (if_type + number) else: proper_interface = if_name return proper_interface
[ "def", "normalize_interface", "(", "if_name", ")", ":", "def", "_get_number", "(", "if_name", ")", ":", "digits", "=", "''", "for", "char", "in", "if_name", ":", "if", "(", "char", ".", "isdigit", "(", ")", "or", "(", "char", "==", "'/'", ")", ")", ":", "digits", "+=", "char", "return", "digits", "if", "if_name", ".", "lower", "(", ")", ".", "startswith", "(", "'et'", ")", ":", "if_type", "=", "'Ethernet'", "elif", "if_name", ".", "lower", "(", ")", ".", "startswith", "(", "'vl'", ")", ":", "if_type", "=", "'Vlan'", "elif", "if_name", ".", "lower", "(", ")", ".", "startswith", "(", "'lo'", ")", ":", "if_type", "=", "'loopback'", "elif", "if_name", ".", "lower", "(", ")", ".", "startswith", "(", "'po'", ")", ":", "if_type", "=", "'port-channel'", "elif", "if_name", ".", "lower", "(", ")", ".", "startswith", "(", "'nv'", ")", ":", "if_type", "=", "'nve'", "else", ":", "if_type", "=", "None", "number_list", "=", "if_name", ".", "split", "(", "' '", ")", "if", "(", "len", "(", "number_list", ")", "==", "2", ")", ":", "number", "=", "number_list", "[", "(", "-", "1", ")", "]", ".", "strip", "(", ")", "else", ":", "number", "=", "_get_number", "(", "if_name", ")", "if", "if_type", ":", "proper_interface", "=", "(", "if_type", "+", "number", ")", "else", ":", "proper_interface", "=", "if_name", "return", "proper_interface" ]
return the normalized interface name .
train
false
22,828
def dag_state(args): dag = get_dag(args) dr = DagRun.find(dag.dag_id, execution_date=args.execution_date) print((dr[0].state if (len(dr) > 0) else None))
[ "def", "dag_state", "(", "args", ")", ":", "dag", "=", "get_dag", "(", "args", ")", "dr", "=", "DagRun", ".", "find", "(", "dag", ".", "dag_id", ",", "execution_date", "=", "args", ".", "execution_date", ")", "print", "(", "(", "dr", "[", "0", "]", ".", "state", "if", "(", "len", "(", "dr", ")", ">", "0", ")", "else", "None", ")", ")" ]
returns the state of a dagrun at the command line .
train
true
22,829
def _open_yaml_file(file_str): with file_str.open(mode='r') as file: try: yaml_file = yaml.load(file) return yaml_file except yaml.YAMLError as exc: LOGGER.error(('error in configuration file: %s' % str(exc))) sys.exit(1)
[ "def", "_open_yaml_file", "(", "file_str", ")", ":", "with", "file_str", ".", "open", "(", "mode", "=", "'r'", ")", "as", "file", ":", "try", ":", "yaml_file", "=", "yaml", ".", "load", "(", "file", ")", "return", "yaml_file", "except", "yaml", ".", "YAMLError", "as", "exc", ":", "LOGGER", ".", "error", "(", "(", "'error in configuration file: %s'", "%", "str", "(", "exc", ")", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
opens yaml file .
train
false
22,830
@app.route('/robots.txt') def view_robots_page(): response = make_response() response.data = ROBOT_TXT response.content_type = 'text/plain' return response
[ "@", "app", ".", "route", "(", "'/robots.txt'", ")", "def", "view_robots_page", "(", ")", ":", "response", "=", "make_response", "(", ")", "response", ".", "data", "=", "ROBOT_TXT", "response", ".", "content_type", "=", "'text/plain'", "return", "response" ]
simple html page .
train
false
22,834
def merge_hash(a, b): _validate_mutable_mappings(a, b) if ((a == {}) or (a == b)): return b.copy() result = a.copy() for (k, v) in iteritems(b): if ((k in result) and isinstance(result[k], MutableMapping) and isinstance(v, MutableMapping)): result[k] = merge_hash(result[k], v) else: result[k] = v return result
[ "def", "merge_hash", "(", "a", ",", "b", ")", ":", "_validate_mutable_mappings", "(", "a", ",", "b", ")", "if", "(", "(", "a", "==", "{", "}", ")", "or", "(", "a", "==", "b", ")", ")", ":", "return", "b", ".", "copy", "(", ")", "result", "=", "a", ".", "copy", "(", ")", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "b", ")", ":", "if", "(", "(", "k", "in", "result", ")", "and", "isinstance", "(", "result", "[", "k", "]", ",", "MutableMapping", ")", "and", "isinstance", "(", "v", ",", "MutableMapping", ")", ")", ":", "result", "[", "k", "]", "=", "merge_hash", "(", "result", "[", "k", "]", ",", "v", ")", "else", ":", "result", "[", "k", "]", "=", "v", "return", "result" ]
recursively merges hash b into a so that keys from b take precedence over keys from a .
train
false
22,835
def spectrogram(x, fs=1.0, window=('tukey', 0.25), nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=(-1), mode='psd'): (window, nperseg) = _triage_segments(window, nperseg, input_length=x.shape[(-1)]) if (noverlap is None): noverlap = (nperseg // 8) (freqs, time, Pxy) = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode=mode) return (freqs, time, Pxy)
[ "def", "spectrogram", "(", "x", ",", "fs", "=", "1.0", ",", "window", "=", "(", "'tukey'", ",", "0.25", ")", ",", "nperseg", "=", "None", ",", "noverlap", "=", "None", ",", "nfft", "=", "None", ",", "detrend", "=", "'constant'", ",", "return_onesided", "=", "True", ",", "scaling", "=", "'density'", ",", "axis", "=", "(", "-", "1", ")", ",", "mode", "=", "'psd'", ")", ":", "(", "window", ",", "nperseg", ")", "=", "_triage_segments", "(", "window", ",", "nperseg", ",", "input_length", "=", "x", ".", "shape", "[", "(", "-", "1", ")", "]", ")", "if", "(", "noverlap", "is", "None", ")", ":", "noverlap", "=", "(", "nperseg", "//", "8", ")", "(", "freqs", ",", "time", ",", "Pxy", ")", "=", "_spectral_helper", "(", "x", ",", "x", ",", "fs", ",", "window", ",", "nperseg", ",", "noverlap", ",", "nfft", ",", "detrend", ",", "return_onesided", ",", "scaling", ",", "axis", ",", "mode", "=", "mode", ")", "return", "(", "freqs", ",", "time", ",", "Pxy", ")" ]
compute a spectrogram with consecutive fourier transforms .
train
false
22,836
def has_sound(sound): try: if (ctypes.windll.winmm.mixerGetNumDevs() is 0): return False key = _winreg.OpenKeyEx(_winreg.HKEY_CURRENT_USER, 'AppEvents\\Schemes\\Apps\\.Default\\{0}\\.Default'.format(sound)) value = _winreg.EnumValue(key, 0)[1] if (value is not u''): return True else: return False except WindowsError: return False
[ "def", "has_sound", "(", "sound", ")", ":", "try", ":", "if", "(", "ctypes", ".", "windll", ".", "winmm", ".", "mixerGetNumDevs", "(", ")", "is", "0", ")", ":", "return", "False", "key", "=", "_winreg", ".", "OpenKeyEx", "(", "_winreg", ".", "HKEY_CURRENT_USER", ",", "'AppEvents\\\\Schemes\\\\Apps\\\\.Default\\\\{0}\\\\.Default'", ".", "format", "(", "sound", ")", ")", "value", "=", "_winreg", ".", "EnumValue", "(", "key", ",", "0", ")", "[", "1", "]", "if", "(", "value", "is", "not", "u''", ")", ":", "return", "True", "else", ":", "return", "False", "except", "WindowsError", ":", "return", "False" ]
find out if a particular event is configured with a default sound .
train
false
22,837
def tkVersionWarning(root): if isCocoaTk(): patchlevel = root.tk.call('info', 'patchlevel') if (patchlevel not in ('8.5.7', '8.5.9')): return False return 'WARNING: The version of Tcl/Tk ({0}) in use may be unstable.\\nVisit http://www.python.org/download/mac/tcltk/ for current information.'.format(patchlevel) else: return False
[ "def", "tkVersionWarning", "(", "root", ")", ":", "if", "isCocoaTk", "(", ")", ":", "patchlevel", "=", "root", ".", "tk", ".", "call", "(", "'info'", ",", "'patchlevel'", ")", "if", "(", "patchlevel", "not", "in", "(", "'8.5.7'", ",", "'8.5.9'", ")", ")", ":", "return", "False", "return", "'WARNING: The version of Tcl/Tk ({0}) in use may be unstable.\\\\nVisit http://www.python.org/download/mac/tcltk/ for current information.'", ".", "format", "(", "patchlevel", ")", "else", ":", "return", "False" ]
returns a string warning message if the tk version in use appears to be one known to cause problems with idle .
train
false
22,838
def indentcount_lines(lines): indentno = sys.maxsize for line in lines: stripped = line.lstrip() if stripped: indentno = min(indentno, (len(line) - len(stripped))) if (indentno == sys.maxsize): return 0 return indentno
[ "def", "indentcount_lines", "(", "lines", ")", ":", "indentno", "=", "sys", ".", "maxsize", "for", "line", "in", "lines", ":", "stripped", "=", "line", ".", "lstrip", "(", ")", "if", "stripped", ":", "indentno", "=", "min", "(", "indentno", ",", "(", "len", "(", "line", ")", "-", "len", "(", "stripped", ")", ")", ")", "if", "(", "indentno", "==", "sys", ".", "maxsize", ")", ":", "return", "0", "return", "indentno" ]
minimum indent for all lines in line list .
train
false
22,839
def setRequest(request, openid_request): if openid_request: request.session['openid_request'] = openid_request else: request.session['openid_request'] = None
[ "def", "setRequest", "(", "request", ",", "openid_request", ")", ":", "if", "openid_request", ":", "request", ".", "session", "[", "'openid_request'", "]", "=", "openid_request", "else", ":", "request", ".", "session", "[", "'openid_request'", "]", "=", "None" ]
store the openid request information in the session .
train
false
22,840
def getTricomplexskewY(transformWords): skewY = math.tan(math.radians(float(transformWords[0]))) return [complex(1.0, skewY), complex(0.0, 1.0), complex()]
[ "def", "getTricomplexskewY", "(", "transformWords", ")", ":", "skewY", "=", "math", ".", "tan", "(", "math", ".", "radians", "(", "float", "(", "transformWords", "[", "0", "]", ")", ")", ")", "return", "[", "complex", "(", "1.0", ",", "skewY", ")", ",", "complex", "(", "0.0", ",", "1.0", ")", ",", "complex", "(", ")", "]" ]
get matrixsvg by transformwords .
train
false
22,841
def get_sensibleness(state): feature = np.zeros((1, state.size, state.size)) for (x, y) in state.get_legal_moves(include_eyes=False): feature[(0, x, y)] = 1 return feature
[ "def", "get_sensibleness", "(", "state", ")", ":", "feature", "=", "np", ".", "zeros", "(", "(", "1", ",", "state", ".", "size", ",", "state", ".", "size", ")", ")", "for", "(", "x", ",", "y", ")", "in", "state", ".", "get_legal_moves", "(", "include_eyes", "=", "False", ")", ":", "feature", "[", "(", "0", ",", "x", ",", "y", ")", "]", "=", "1", "return", "feature" ]
a move is sensible if it is legal and if it does not fill the current_players own eye .
train
false
22,842
def _is_unittest_unexpected_success_a_failure(): return (sys.version_info >= (3, 4))
[ "def", "_is_unittest_unexpected_success_a_failure", "(", ")", ":", "return", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ")" ]
return if the test suite should fail if a @expectedfailure unittest test passes .
train
false
22,843
def generate_counts(component, start_date, end_date): result = {} for translation in component.translation_set.all(): authors = Change.objects.content().filter(translation=translation, timestamp__range=(start_date, end_date)).values_list(u'author__email', u'author__first_name', u'unit__num_words') for (email, name, words) in authors: if (words is None): continue if (email not in result): result[email] = {u'name': name, u'email': email, u'words': words, u'count': 1} else: result[email][u'words'] += words result[email][u'count'] += 1 return list(result.values())
[ "def", "generate_counts", "(", "component", ",", "start_date", ",", "end_date", ")", ":", "result", "=", "{", "}", "for", "translation", "in", "component", ".", "translation_set", ".", "all", "(", ")", ":", "authors", "=", "Change", ".", "objects", ".", "content", "(", ")", ".", "filter", "(", "translation", "=", "translation", ",", "timestamp__range", "=", "(", "start_date", ",", "end_date", ")", ")", ".", "values_list", "(", "u'author__email'", ",", "u'author__first_name'", ",", "u'unit__num_words'", ")", "for", "(", "email", ",", "name", ",", "words", ")", "in", "authors", ":", "if", "(", "words", "is", "None", ")", ":", "continue", "if", "(", "email", "not", "in", "result", ")", ":", "result", "[", "email", "]", "=", "{", "u'name'", ":", "name", ",", "u'email'", ":", "email", ",", "u'words'", ":", "words", ",", "u'count'", ":", "1", "}", "else", ":", "result", "[", "email", "]", "[", "u'words'", "]", "+=", "words", "result", "[", "email", "]", "[", "u'count'", "]", "+=", "1", "return", "list", "(", "result", ".", "values", "(", ")", ")" ]
generates credits data for given component .
train
false
22,844
def process_rules(app, rules, prefix=''): for rule in rules: if callable(rule.view_func_or_data): view_func = rule.view_func_or_data renderer_name = getattr(rule.renderer, '__name__', rule.renderer.__class__.__name__) endpoint = '{}__{}'.format(renderer_name, rule.view_func_or_data.__name__) view_functions[endpoint] = rule.view_func_or_data else: view_func = data_to_lambda(rule.view_func_or_data) endpoint = '__'.join((route.replace('/', '') for route in rule.routes)) wrapped_view_func = wrap_with_renderer(view_func, rule.renderer, rule.view_kwargs, debug_mode=app.debug) for url in rule.routes: try: app.add_url_rule((prefix + url), endpoint=(endpoint + rule.endpoint_suffix), view_func=wrapped_view_func, methods=rule.methods) except AssertionError: raise AssertionError("URLRule({}, {})'s view function name is overwriting an existing endpoint".format((prefix + url), (view_func.__name__ + rule.endpoint_suffix)))
[ "def", "process_rules", "(", "app", ",", "rules", ",", "prefix", "=", "''", ")", ":", "for", "rule", "in", "rules", ":", "if", "callable", "(", "rule", ".", "view_func_or_data", ")", ":", "view_func", "=", "rule", ".", "view_func_or_data", "renderer_name", "=", "getattr", "(", "rule", ".", "renderer", ",", "'__name__'", ",", "rule", ".", "renderer", ".", "__class__", ".", "__name__", ")", "endpoint", "=", "'{}__{}'", ".", "format", "(", "renderer_name", ",", "rule", ".", "view_func_or_data", ".", "__name__", ")", "view_functions", "[", "endpoint", "]", "=", "rule", ".", "view_func_or_data", "else", ":", "view_func", "=", "data_to_lambda", "(", "rule", ".", "view_func_or_data", ")", "endpoint", "=", "'__'", ".", "join", "(", "(", "route", ".", "replace", "(", "'/'", ",", "''", ")", "for", "route", "in", "rule", ".", "routes", ")", ")", "wrapped_view_func", "=", "wrap_with_renderer", "(", "view_func", ",", "rule", ".", "renderer", ",", "rule", ".", "view_kwargs", ",", "debug_mode", "=", "app", ".", "debug", ")", "for", "url", "in", "rule", ".", "routes", ":", "try", ":", "app", ".", "add_url_rule", "(", "(", "prefix", "+", "url", ")", ",", "endpoint", "=", "(", "endpoint", "+", "rule", ".", "endpoint_suffix", ")", ",", "view_func", "=", "wrapped_view_func", ",", "methods", "=", "rule", ".", "methods", ")", "except", "AssertionError", ":", "raise", "AssertionError", "(", "\"URLRule({}, {})'s view function name is overwriting an existing endpoint\"", ".", "format", "(", "(", "prefix", "+", "url", ")", ",", "(", "view_func", ".", "__name__", "+", "rule", ".", "endpoint_suffix", ")", ")", ")" ]
add url routes to flask / werkzeug lookup table .
train
false
22,847
def get_transform(bounds, size=1024): (tx, ty) = (bounds[0], bounds[1]) (sx, sy) = (((bounds[2] - bounds[0]) / size), ((bounds[3] - bounds[1]) / size)) def forward(lon, lat): ' Transform a longitude and latitude to TopoJSON integer space.\n ' return (int(round(((lon - tx) / sx))), int(round(((lat - ty) / sy)))) return (dict(translate=(tx, ty), scale=(sx, sy)), forward)
[ "def", "get_transform", "(", "bounds", ",", "size", "=", "1024", ")", ":", "(", "tx", ",", "ty", ")", "=", "(", "bounds", "[", "0", "]", ",", "bounds", "[", "1", "]", ")", "(", "sx", ",", "sy", ")", "=", "(", "(", "(", "bounds", "[", "2", "]", "-", "bounds", "[", "0", "]", ")", "/", "size", ")", ",", "(", "(", "bounds", "[", "3", "]", "-", "bounds", "[", "1", "]", ")", "/", "size", ")", ")", "def", "forward", "(", "lon", ",", "lat", ")", ":", "return", "(", "int", "(", "round", "(", "(", "(", "lon", "-", "tx", ")", "/", "sx", ")", ")", ")", ",", "int", "(", "round", "(", "(", "(", "lat", "-", "ty", ")", "/", "sy", ")", ")", ")", ")", "return", "(", "dict", "(", "translate", "=", "(", "tx", ",", "ty", ")", ",", "scale", "=", "(", "sx", ",", "sy", ")", ")", ",", "forward", ")" ]
return a topojson transform dictionary and a point-transforming function .
train
true
22,848
def read_denoiser_mapping(mapping_fh): denoiser_mapping = {} for (i, line) in enumerate(mapping_fh): if (line == ''): continue (centroid, members) = line.split(':') denoiser_mapping[centroid] = members.split() return denoiser_mapping
[ "def", "read_denoiser_mapping", "(", "mapping_fh", ")", ":", "denoiser_mapping", "=", "{", "}", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "mapping_fh", ")", ":", "if", "(", "line", "==", "''", ")", ":", "continue", "(", "centroid", ",", "members", ")", "=", "line", ".", "split", "(", "':'", ")", "denoiser_mapping", "[", "centroid", "]", "=", "members", ".", "split", "(", ")", "return", "denoiser_mapping" ]
read the cluster mapping file handle mapping_fh: an open file handle to a cluster file .
train
false
22,849
def _is_include_directive(entry): return (isinstance(entry, list) and (len(entry) == 2) and (entry[0] == 'include') and isinstance(entry[1], str))
[ "def", "_is_include_directive", "(", "entry", ")", ":", "return", "(", "isinstance", "(", "entry", ",", "list", ")", "and", "(", "len", "(", "entry", ")", "==", "2", ")", "and", "(", "entry", "[", "0", "]", "==", "'include'", ")", "and", "isinstance", "(", "entry", "[", "1", "]", ",", "str", ")", ")" ]
checks if an nginx parsed entry is an include directive .
train
false
22,850
def adb_binary_or32bit_support(): tools_dir = os.path.join(settings.BASE_DIR, 'DynamicAnalyzer/tools/') adb_path = getADB(tools_dir) try: fnull = open(os.devnull, 'w') subprocess.call([adb_path], stdout=fnull, stderr=fnull) except: msg = "\n[WARNING] You don't have 32 bit execution support enabled or MobSF shipped ADB binary is not compatible with your OS.\nPlease set the 'ADB_BINARY' path in settings.py" if (platform.system != 'Windows'): print (((Color.BOLD + Color.ORANGE) + msg) + Color.END) else: print msg
[ "def", "adb_binary_or32bit_support", "(", ")", ":", "tools_dir", "=", "os", ".", "path", ".", "join", "(", "settings", ".", "BASE_DIR", ",", "'DynamicAnalyzer/tools/'", ")", "adb_path", "=", "getADB", "(", "tools_dir", ")", "try", ":", "fnull", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "subprocess", ".", "call", "(", "[", "adb_path", "]", ",", "stdout", "=", "fnull", ",", "stderr", "=", "fnull", ")", "except", ":", "msg", "=", "\"\\n[WARNING] You don't have 32 bit execution support enabled or MobSF shipped ADB binary is not compatible with your OS.\\nPlease set the 'ADB_BINARY' path in settings.py\"", "if", "(", "platform", ".", "system", "!=", "'Windows'", ")", ":", "print", "(", "(", "(", "Color", ".", "BOLD", "+", "Color", ".", "ORANGE", ")", "+", "msg", ")", "+", "Color", ".", "END", ")", "else", ":", "print", "msg" ]
check if 32bit is supported .
train
false
22,851
def map_run(path, **kwargs): client = _get_client() info = client.map_run(path, **_filter_kwargs(kwargs)) return info
[ "def", "map_run", "(", "path", ",", "**", "kwargs", ")", ":", "client", "=", "_get_client", "(", ")", "info", "=", "client", ".", "map_run", "(", "path", ",", "**", "_filter_kwargs", "(", "kwargs", ")", ")", "return", "info" ]
execute a salt cloud map file .
train
true
22,852
def trim_cxxflags(value): return trim_var('CXXFLAGS', value)
[ "def", "trim_cxxflags", "(", "value", ")", ":", "return", "trim_var", "(", "'CXXFLAGS'", ",", "value", ")" ]
remove a value from cxxflags variable in the make .
train
false
22,853
def buildFlavorString(): return ('valid build flavors: %s' % ' '.join(sorted(isoURLs)))
[ "def", "buildFlavorString", "(", ")", ":", "return", "(", "'valid build flavors: %s'", "%", "' '", ".", "join", "(", "sorted", "(", "isoURLs", ")", ")", ")" ]
return string listing valid build flavors .
train
false
22,857
def test_skip_pre(): simple = 'http://xx.com <pre>http://xx.com</pre>' linked = '<a href="http://xx.com" rel="nofollow">http://xx.com</a> <pre>http://xx.com</pre>' all_linked = '<a href="http://xx.com" rel="nofollow">http://xx.com</a> <pre><a href="http://xx.com" rel="nofollow">http://xx.com</a></pre>' eq_(linked, linkify(simple, skip_pre=True)) eq_(all_linked, linkify(simple)) already_linked = '<pre><a href="http://xx.com">xx</a></pre>' nofollowed = '<pre><a href="http://xx.com" rel="nofollow">xx</a></pre>' eq_(nofollowed, linkify(already_linked)) eq_(nofollowed, linkify(already_linked, skip_pre=True))
[ "def", "test_skip_pre", "(", ")", ":", "simple", "=", "'http://xx.com <pre>http://xx.com</pre>'", "linked", "=", "'<a href=\"http://xx.com\" rel=\"nofollow\">http://xx.com</a> <pre>http://xx.com</pre>'", "all_linked", "=", "'<a href=\"http://xx.com\" rel=\"nofollow\">http://xx.com</a> <pre><a href=\"http://xx.com\" rel=\"nofollow\">http://xx.com</a></pre>'", "eq_", "(", "linked", ",", "linkify", "(", "simple", ",", "skip_pre", "=", "True", ")", ")", "eq_", "(", "all_linked", ",", "linkify", "(", "simple", ")", ")", "already_linked", "=", "'<pre><a href=\"http://xx.com\">xx</a></pre>'", "nofollowed", "=", "'<pre><a href=\"http://xx.com\" rel=\"nofollow\">xx</a></pre>'", "eq_", "(", "nofollowed", ",", "linkify", "(", "already_linked", ")", ")", "eq_", "(", "nofollowed", ",", "linkify", "(", "already_linked", ",", "skip_pre", "=", "True", ")", ")" ]
skip linkification in <pre> tags .
train
false
22,858
def closest_ds_partition(table, ds, before=True, schema='default', metastore_conn_id='metastore_default'): from airflow.hooks.hive_hooks import HiveMetastoreHook if ('.' in table): (schema, table) = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) partitions = hh.get_partitions(schema=schema, table_name=table) if (not partitions): return None part_vals = [list(p.values())[0] for p in partitions] if (ds in part_vals): return ds else: parts = [datetime.datetime.strptime(pv, '%Y-%m-%d') for pv in part_vals] target_dt = datetime.datetime.strptime(ds, '%Y-%m-%d') closest_ds = _closest_date(target_dt, parts, before_target=before) return closest_ds.isoformat()
[ "def", "closest_ds_partition", "(", "table", ",", "ds", ",", "before", "=", "True", ",", "schema", "=", "'default'", ",", "metastore_conn_id", "=", "'metastore_default'", ")", ":", "from", "airflow", ".", "hooks", ".", "hive_hooks", "import", "HiveMetastoreHook", "if", "(", "'.'", "in", "table", ")", ":", "(", "schema", ",", "table", ")", "=", "table", ".", "split", "(", "'.'", ")", "hh", "=", "HiveMetastoreHook", "(", "metastore_conn_id", "=", "metastore_conn_id", ")", "partitions", "=", "hh", ".", "get_partitions", "(", "schema", "=", "schema", ",", "table_name", "=", "table", ")", "if", "(", "not", "partitions", ")", ":", "return", "None", "part_vals", "=", "[", "list", "(", "p", ".", "values", "(", ")", ")", "[", "0", "]", "for", "p", "in", "partitions", "]", "if", "(", "ds", "in", "part_vals", ")", ":", "return", "ds", "else", ":", "parts", "=", "[", "datetime", ".", "datetime", ".", "strptime", "(", "pv", ",", "'%Y-%m-%d'", ")", "for", "pv", "in", "part_vals", "]", "target_dt", "=", "datetime", ".", "datetime", ".", "strptime", "(", "ds", ",", "'%Y-%m-%d'", ")", "closest_ds", "=", "_closest_date", "(", "target_dt", ",", "parts", ",", "before_target", "=", "before", ")", "return", "closest_ds", ".", "isoformat", "(", ")" ]
this function finds the date in a list closest to the target date .
train
true
22,861
def get_currents_privs(): GetTokenInformation = ctypes.windll.advapi32.GetTokenInformation GetTokenInformation.argtypes = [wintypes.HANDLE, ctypes.c_uint, ctypes.c_void_p, wintypes.DWORD, ctypes.POINTER(wintypes.DWORD)] GetTokenInformation.restype = wintypes.BOOL return_length = wintypes.DWORD() params = [get_process_token(), TOKEN_INFORMATION_CLASS.TokenPrivileges, None, 0, return_length] res = GetTokenInformation(*params) buffer = ctypes.create_string_buffer(return_length.value) params[2] = buffer params[3] = return_length.value res = GetTokenInformation(*params) assert (res > 0), ('Error in second GetTokenInformation (%d)' % res) privileges = ctypes.cast(buffer, ctypes.POINTER(TOKEN_PRIVS)).contents return privileges
[ "def", "get_currents_privs", "(", ")", ":", "GetTokenInformation", "=", "ctypes", ".", "windll", ".", "advapi32", ".", "GetTokenInformation", "GetTokenInformation", ".", "argtypes", "=", "[", "wintypes", ".", "HANDLE", ",", "ctypes", ".", "c_uint", ",", "ctypes", ".", "c_void_p", ",", "wintypes", ".", "DWORD", ",", "ctypes", ".", "POINTER", "(", "wintypes", ".", "DWORD", ")", "]", "GetTokenInformation", ".", "restype", "=", "wintypes", ".", "BOOL", "return_length", "=", "wintypes", ".", "DWORD", "(", ")", "params", "=", "[", "get_process_token", "(", ")", ",", "TOKEN_INFORMATION_CLASS", ".", "TokenPrivileges", ",", "None", ",", "0", ",", "return_length", "]", "res", "=", "GetTokenInformation", "(", "*", "params", ")", "buffer", "=", "ctypes", ".", "create_string_buffer", "(", "return_length", ".", "value", ")", "params", "[", "2", "]", "=", "buffer", "params", "[", "3", "]", "=", "return_length", ".", "value", "res", "=", "GetTokenInformation", "(", "*", "params", ")", "assert", "(", "res", ">", "0", ")", ",", "(", "'Error in second GetTokenInformation (%d)'", "%", "res", ")", "privileges", "=", "ctypes", ".", "cast", "(", "buffer", ",", "ctypes", ".", "POINTER", "(", "TOKEN_PRIVS", ")", ")", ".", "contents", "return", "privileges" ]
get all privileges associated with the current process .
train
false
22,862
def find_ranges(input_list): if (len(input_list) == 0): return [] start = None end = None previous = None range_list = [] for num in input_list: if (start is None): start = num previous = num continue if (num == (previous + 1)): previous = num continue else: end = previous if (start != end): range_list.append('{}->{}'.format(start, end)) start = num previous = num end = input_list[(-1)] if (start != end): range_list.append('{}->{}'.format(start, end)) return range_list
[ "def", "find_ranges", "(", "input_list", ")", ":", "if", "(", "len", "(", "input_list", ")", "==", "0", ")", ":", "return", "[", "]", "start", "=", "None", "end", "=", "None", "previous", "=", "None", "range_list", "=", "[", "]", "for", "num", "in", "input_list", ":", "if", "(", "start", "is", "None", ")", ":", "start", "=", "num", "previous", "=", "num", "continue", "if", "(", "num", "==", "(", "previous", "+", "1", ")", ")", ":", "previous", "=", "num", "continue", "else", ":", "end", "=", "previous", "if", "(", "start", "!=", "end", ")", ":", "range_list", ".", "append", "(", "'{}->{}'", ".", "format", "(", "start", ",", "end", ")", ")", "start", "=", "num", "previous", "=", "num", "end", "=", "input_list", "[", "(", "-", "1", ")", "]", "if", "(", "start", "!=", "end", ")", ":", "range_list", ".", "append", "(", "'{}->{}'", ".", "format", "(", "start", ",", "end", ")", ")", "return", "range_list" ]
find the differing character between strings .
train
false
22,863
def create_pip_index(scratch_directory, packages): index_file = scratch_directory.child('index.html') with index_file.open('w') as f: template.flatten(None, PackagesElement(packages), f.write) return index_file
[ "def", "create_pip_index", "(", "scratch_directory", ",", "packages", ")", ":", "index_file", "=", "scratch_directory", ".", "child", "(", "'index.html'", ")", "with", "index_file", ".", "open", "(", "'w'", ")", "as", "f", ":", "template", ".", "flatten", "(", "None", ",", "PackagesElement", "(", "packages", ")", ",", "f", ".", "write", ")", "return", "index_file" ]
create an index file for pip .
train
false
22,864
def wait_for_volume_retype(client, volume_id, new_volume_type): body = client.show_volume(volume_id)['volume'] current_volume_type = body['volume_type'] start = int(time.time()) while (current_volume_type != new_volume_type): time.sleep(client.build_interval) body = client.show_volume(volume_id)['volume'] current_volume_type = body['volume_type'] if ((int(time.time()) - start) >= client.build_timeout): message = ('Volume %s failed to reach %s volume type (current %s) within the required time (%s s).' % (volume_id, new_volume_type, current_volume_type, client.build_timeout)) raise exceptions.TimeoutException(message)
[ "def", "wait_for_volume_retype", "(", "client", ",", "volume_id", ",", "new_volume_type", ")", ":", "body", "=", "client", ".", "show_volume", "(", "volume_id", ")", "[", "'volume'", "]", "current_volume_type", "=", "body", "[", "'volume_type'", "]", "start", "=", "int", "(", "time", ".", "time", "(", ")", ")", "while", "(", "current_volume_type", "!=", "new_volume_type", ")", ":", "time", ".", "sleep", "(", "client", ".", "build_interval", ")", "body", "=", "client", ".", "show_volume", "(", "volume_id", ")", "[", "'volume'", "]", "current_volume_type", "=", "body", "[", "'volume_type'", "]", "if", "(", "(", "int", "(", "time", ".", "time", "(", ")", ")", "-", "start", ")", ">=", "client", ".", "build_timeout", ")", ":", "message", "=", "(", "'Volume %s failed to reach %s volume type (current %s) within the required time (%s s).'", "%", "(", "volume_id", ",", "new_volume_type", ",", "current_volume_type", ",", "client", ".", "build_timeout", ")", ")", "raise", "exceptions", ".", "TimeoutException", "(", "message", ")" ]
waits for a volume to have a new volume type .
train
false
22,867
def batch_set_value(tuples): if tuples: assign_ops = [] feed_dict = {} for (x, value) in tuples: value = np.asarray(value) tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op assign_ops.append(assign_op) feed_dict[assign_placeholder] = value get_session().run(assign_ops, feed_dict=feed_dict)
[ "def", "batch_set_value", "(", "tuples", ")", ":", "if", "tuples", ":", "assign_ops", "=", "[", "]", "feed_dict", "=", "{", "}", "for", "(", "x", ",", "value", ")", "in", "tuples", ":", "value", "=", "np", ".", "asarray", "(", "value", ")", "tf_dtype", "=", "_convert_string_dtype", "(", "x", ".", "dtype", ".", "name", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "if", "hasattr", "(", "x", ",", "'_assign_placeholder'", ")", ":", "assign_placeholder", "=", "x", ".", "_assign_placeholder", "assign_op", "=", "x", ".", "_assign_op", "else", ":", "assign_placeholder", "=", "tf", ".", "placeholder", "(", "tf_dtype", ",", "shape", "=", "value", ".", "shape", ")", "assign_op", "=", "x", ".", "assign", "(", "assign_placeholder", ")", "x", ".", "_assign_placeholder", "=", "assign_placeholder", "x", ".", "_assign_op", "=", "assign_op", "assign_ops", ".", "append", "(", "assign_op", ")", "feed_dict", "[", "assign_placeholder", "]", "=", "value", "get_session", "(", ")", ".", "run", "(", "assign_ops", ",", "feed_dict", "=", "feed_dict", ")" ]
sets the values of many tensor variables at once .
train
false
22,868
def compute_node_delete(context, compute_id): return IMPL.compute_node_delete(context, compute_id)
[ "def", "compute_node_delete", "(", "context", ",", "compute_id", ")", ":", "return", "IMPL", ".", "compute_node_delete", "(", "context", ",", "compute_id", ")" ]
delete a computenode record .
train
false
22,869
def max_flow_min_cost(G, s, t, capacity='capacity', weight='weight'): maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity) H = nx.DiGraph(G) H.add_node(s, demand=(- maxFlow)) H.add_node(t, demand=maxFlow) return min_cost_flow(H, capacity=capacity, weight=weight)
[ "def", "max_flow_min_cost", "(", "G", ",", "s", ",", "t", ",", "capacity", "=", "'capacity'", ",", "weight", "=", "'weight'", ")", ":", "maxFlow", "=", "nx", ".", "maximum_flow_value", "(", "G", ",", "s", ",", "t", ",", "capacity", "=", "capacity", ")", "H", "=", "nx", ".", "DiGraph", "(", "G", ")", "H", ".", "add_node", "(", "s", ",", "demand", "=", "(", "-", "maxFlow", ")", ")", "H", ".", "add_node", "(", "t", ",", "demand", "=", "maxFlow", ")", "return", "min_cost_flow", "(", "H", ",", "capacity", "=", "capacity", ",", "weight", "=", "weight", ")" ]
return a maximum -flow of minimum cost .
train
false
22,870
def image_to_string(im, cleanup=cleanup_scratch_flag, bool_digits=False): try: util.image_to_scratch(im, scratch_image_name) call_tesseract(scratch_image_name, scratch_text_name_root, bool_digits) text = util.retrieve_text(scratch_text_name_root) finally: if cleanup: util.perform_cleanup(scratch_image_name, scratch_text_name_root) return text
[ "def", "image_to_string", "(", "im", ",", "cleanup", "=", "cleanup_scratch_flag", ",", "bool_digits", "=", "False", ")", ":", "try", ":", "util", ".", "image_to_scratch", "(", "im", ",", "scratch_image_name", ")", "call_tesseract", "(", "scratch_image_name", ",", "scratch_text_name_root", ",", "bool_digits", ")", "text", "=", "util", ".", "retrieve_text", "(", "scratch_text_name_root", ")", "finally", ":", "if", "cleanup", ":", "util", ".", "perform_cleanup", "(", "scratch_image_name", ",", "scratch_text_name_root", ")", "return", "text" ]
converts im to file .
train
false
22,872
def test_shell_env(): with shell_env(KEY='value'): eq_(env.shell_env['KEY'], 'value') eq_(env.shell_env, {})
[ "def", "test_shell_env", "(", ")", ":", "with", "shell_env", "(", "KEY", "=", "'value'", ")", ":", "eq_", "(", "env", ".", "shell_env", "[", "'KEY'", "]", ",", "'value'", ")", "eq_", "(", "env", ".", "shell_env", ",", "{", "}", ")" ]
shell_env() sets the shell_env attribute in the env dict .
train
false
22,875
def process_mathjax_script(mathjax_settings): with open((os.path.dirname(os.path.realpath(__file__)) + '/mathjax_script_template'), 'r') as mathjax_script_template: mathjax_template = mathjax_script_template.read() return mathjax_template.format(**mathjax_settings)
[ "def", "process_mathjax_script", "(", "mathjax_settings", ")", ":", "with", "open", "(", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "+", "'/mathjax_script_template'", ")", ",", "'r'", ")", "as", "mathjax_script_template", ":", "mathjax_template", "=", "mathjax_script_template", ".", "read", "(", ")", "return", "mathjax_template", ".", "format", "(", "**", "mathjax_settings", ")" ]
load the mathjax script template from file .
train
true
22,877
def get_db(): if (not hasattr(g, 'sqlite_db')): g.sqlite_db = connect_db() return g.sqlite_db
[ "def", "get_db", "(", ")", ":", "if", "(", "not", "hasattr", "(", "g", ",", "'sqlite_db'", ")", ")", ":", "g", ".", "sqlite_db", "=", "connect_db", "(", ")", "return", "g", ".", "sqlite_db" ]
opens a new database connection if there is none yet for the current application context .
train
false
22,878
@cache_permission def can_upload_translation(user, translation): return (can_edit(user, translation, 'trans.upload_translation') and (can_translate(user, translation) or can_suggest(user, translation)))
[ "@", "cache_permission", "def", "can_upload_translation", "(", "user", ",", "translation", ")", ":", "return", "(", "can_edit", "(", "user", ",", "translation", ",", "'trans.upload_translation'", ")", "and", "(", "can_translate", "(", "user", ",", "translation", ")", "or", "can_suggest", "(", "user", ",", "translation", ")", ")", ")" ]
checks whether user can translate given translation .
train
false
22,879
def add_user_with_status_unrequested(user): _add_user(user, CourseCreator.UNREQUESTED)
[ "def", "add_user_with_status_unrequested", "(", "user", ")", ":", "_add_user", "(", "user", ",", "CourseCreator", ".", "UNREQUESTED", ")" ]
adds a user to the course creator table with status unrequested .
train
false
22,880
def floored_twelfth_of_a_year(date): timetuple = date.timetuple() year = timetuple.tm_year day_of_year = timetuple.tm_yday month0 = floor(((day_of_year / ((isleap(year) and 366.0) or 365.0)) * 12)) return ((((year - start_year) * 12) + month0) - start_month_0_indexed)
[ "def", "floored_twelfth_of_a_year", "(", "date", ")", ":", "timetuple", "=", "date", ".", "timetuple", "(", ")", "year", "=", "timetuple", ".", "tm_year", "day_of_year", "=", "timetuple", ".", "tm_yday", "month0", "=", "floor", "(", "(", "(", "day_of_year", "/", "(", "(", "isleap", "(", "year", ")", "and", "366.0", ")", "or", "365.0", ")", ")", "*", "12", ")", ")", "return", "(", "(", "(", "(", "year", "-", "start_year", ")", "*", "12", ")", "+", "month0", ")", "-", "start_month_0_indexed", ")" ]
this function converts a date to a month number by flooring to the nearest 12th of a year .
train
false
22,881
@image_comparison(baseline_images=[u'EventCollection_plot__set_ls_dash'], remove_text=True) def test__EventCollection__set_linestyle_single_dash(): (splt, coll, _) = generate_EventCollection_plot() new_linestyle = (0, (6.0, 6.0)) coll.set_linestyle(new_linestyle) assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))]) splt.set_title(u'EventCollection: set_linestyle')
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'EventCollection_plot__set_ls_dash'", "]", ",", "remove_text", "=", "True", ")", "def", "test__EventCollection__set_linestyle_single_dash", "(", ")", ":", "(", "splt", ",", "coll", ",", "_", ")", "=", "generate_EventCollection_plot", "(", ")", "new_linestyle", "=", "(", "0", ",", "(", "6.0", ",", "6.0", ")", ")", "coll", ".", "set_linestyle", "(", "new_linestyle", ")", "assert_equal", "(", "coll", ".", "get_linestyle", "(", ")", ",", "[", "(", "0", ",", "(", "6.0", ",", "6.0", ")", ")", "]", ")", "splt", ".", "set_title", "(", "u'EventCollection: set_linestyle'", ")" ]
check to make sure set_linestyle accepts a single dash pattern .
train
false
22,884
def get_cert_days(module, cert_file): if (not os.path.exists(cert_file)): return (-1) openssl_bin = module.get_bin_path('openssl', True) openssl_cert_cmd = [openssl_bin, 'x509', '-in', cert_file, '-noout', '-text'] (_, out, _) = module.run_command(openssl_cert_cmd, check_rc=True) try: not_after_str = re.search('\\s+Not After\\s*:\\s+(.*)', out.decode('utf8')).group(1) not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str, '%b %d %H:%M:%S %Y %Z'))) except AttributeError: module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file)) except ValueError: module.fail_json(msg="Failed to parse 'Not after' date of {0}".format(cert_file)) now = datetime.datetime.utcnow() return (not_after - now).days
[ "def", "get_cert_days", "(", "module", ",", "cert_file", ")", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "cert_file", ")", ")", ":", "return", "(", "-", "1", ")", "openssl_bin", "=", "module", ".", "get_bin_path", "(", "'openssl'", ",", "True", ")", "openssl_cert_cmd", "=", "[", "openssl_bin", ",", "'x509'", ",", "'-in'", ",", "cert_file", ",", "'-noout'", ",", "'-text'", "]", "(", "_", ",", "out", ",", "_", ")", "=", "module", ".", "run_command", "(", "openssl_cert_cmd", ",", "check_rc", "=", "True", ")", "try", ":", "not_after_str", "=", "re", ".", "search", "(", "'\\\\s+Not After\\\\s*:\\\\s+(.*)'", ",", "out", ".", "decode", "(", "'utf8'", ")", ")", ".", "group", "(", "1", ")", "not_after", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "time", ".", "mktime", "(", "time", ".", "strptime", "(", "not_after_str", ",", "'%b %d %H:%M:%S %Y %Z'", ")", ")", ")", "except", "AttributeError", ":", "module", ".", "fail_json", "(", "msg", "=", "\"No 'Not after' date found in {0}\"", ".", "format", "(", "cert_file", ")", ")", "except", "ValueError", ":", "module", ".", "fail_json", "(", "msg", "=", "\"Failed to parse 'Not after' date of {0}\"", ".", "format", "(", "cert_file", ")", ")", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "return", "(", "not_after", "-", "now", ")", ".", "days" ]
return the days the certificate in cert_file remains valid and -1 if the file was not found .
train
false
22,885
def log_to_stdout(runlevel): get_logger().addHandler(_StdoutLogger(runlevel))
[ "def", "log_to_stdout", "(", "runlevel", ")", ":", "get_logger", "(", ")", ".", "addHandler", "(", "_StdoutLogger", "(", "runlevel", ")", ")" ]
logs further events to stdout .
train
false
22,886
def embed_item(item, imagepath, maxwidth=None, itempath=None, compare_threshold=0, ifempty=False, as_album=False): if compare_threshold: if (not check_art_similarity(item, imagepath, compare_threshold)): log.warn(u'Image not similar; skipping.') return if ifempty: art = get_art(item) if (not art): pass else: log.debug(u'embedart: media file contained art already {0}'.format(displayable_path(imagepath))) return if (maxwidth and (not as_album)): imagepath = resize_image(imagepath, maxwidth) try: log.debug(u'embedart: embedding {0}'.format(displayable_path(imagepath))) item['images'] = [_mediafile_image(imagepath, maxwidth)] except IOError as exc: log.error(u'embedart: could not read image file: {0}'.format(exc)) else: item.try_write(itempath) del item['images']
[ "def", "embed_item", "(", "item", ",", "imagepath", ",", "maxwidth", "=", "None", ",", "itempath", "=", "None", ",", "compare_threshold", "=", "0", ",", "ifempty", "=", "False", ",", "as_album", "=", "False", ")", ":", "if", "compare_threshold", ":", "if", "(", "not", "check_art_similarity", "(", "item", ",", "imagepath", ",", "compare_threshold", ")", ")", ":", "log", ".", "warn", "(", "u'Image not similar; skipping.'", ")", "return", "if", "ifempty", ":", "art", "=", "get_art", "(", "item", ")", "if", "(", "not", "art", ")", ":", "pass", "else", ":", "log", ".", "debug", "(", "u'embedart: media file contained art already {0}'", ".", "format", "(", "displayable_path", "(", "imagepath", ")", ")", ")", "return", "if", "(", "maxwidth", "and", "(", "not", "as_album", ")", ")", ":", "imagepath", "=", "resize_image", "(", "imagepath", ",", "maxwidth", ")", "try", ":", "log", ".", "debug", "(", "u'embedart: embedding {0}'", ".", "format", "(", "displayable_path", "(", "imagepath", ")", ")", ")", "item", "[", "'images'", "]", "=", "[", "_mediafile_image", "(", "imagepath", ",", "maxwidth", ")", "]", "except", "IOError", "as", "exc", ":", "log", ".", "error", "(", "u'embedart: could not read image file: {0}'", ".", "format", "(", "exc", ")", ")", "else", ":", "item", ".", "try_write", "(", "itempath", ")", "del", "item", "[", "'images'", "]" ]
embed an image into the items media file .
train
false
22,887
def unpad_dims(output, input, leftdims, rightdims): if (output.ndim == input.ndim): return output outshp = tensor.join(0, input.shape[:(- rightdims)], output.shape[(- rightdims):]) return GpuReshape(input.ndim)(output, outshp)
[ "def", "unpad_dims", "(", "output", ",", "input", ",", "leftdims", ",", "rightdims", ")", ":", "if", "(", "output", ".", "ndim", "==", "input", ".", "ndim", ")", ":", "return", "output", "outshp", "=", "tensor", ".", "join", "(", "0", ",", "input", ".", "shape", "[", ":", "(", "-", "rightdims", ")", "]", ",", "output", ".", "shape", "[", "(", "-", "rightdims", ")", ":", "]", ")", "return", "GpuReshape", "(", "input", ".", "ndim", ")", "(", "output", ",", "outshp", ")" ]
reshapes the output after pad_dims .
train
false
22,888
def should_transcode(item, fmt): if (config['convert']['never_convert_lossy_files'] and (not (item.format.lower() in LOSSLESS_FORMATS))): return False maxbr = config['convert']['max_bitrate'].get(int) return ((fmt.lower() != item.format.lower()) or (item.bitrate >= (1000 * maxbr)))
[ "def", "should_transcode", "(", "item", ",", "fmt", ")", ":", "if", "(", "config", "[", "'convert'", "]", "[", "'never_convert_lossy_files'", "]", "and", "(", "not", "(", "item", ".", "format", ".", "lower", "(", ")", "in", "LOSSLESS_FORMATS", ")", ")", ")", ":", "return", "False", "maxbr", "=", "config", "[", "'convert'", "]", "[", "'max_bitrate'", "]", ".", "get", "(", "int", ")", "return", "(", "(", "fmt", ".", "lower", "(", ")", "!=", "item", ".", "format", ".", "lower", "(", ")", ")", "or", "(", "item", ".", "bitrate", ">=", "(", "1000", "*", "maxbr", ")", ")", ")" ]
determine whether the item should be transcoded as part of conversion .
train
false
22,889
def load_data_wrapper(): (tr_d, va_d, te_d) = load_data() training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]] training_results = [vectorized_result(y) for y in tr_d[1]] training_data = zip(training_inputs, training_results) validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]] validation_data = zip(validation_inputs, va_d[1]) test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]] test_data = zip(test_inputs, te_d[1]) return (training_data, validation_data, test_data)
[ "def", "load_data_wrapper", "(", ")", ":", "(", "tr_d", ",", "va_d", ",", "te_d", ")", "=", "load_data", "(", ")", "training_inputs", "=", "[", "np", ".", "reshape", "(", "x", ",", "(", "784", ",", "1", ")", ")", "for", "x", "in", "tr_d", "[", "0", "]", "]", "training_results", "=", "[", "vectorized_result", "(", "y", ")", "for", "y", "in", "tr_d", "[", "1", "]", "]", "training_data", "=", "zip", "(", "training_inputs", ",", "training_results", ")", "validation_inputs", "=", "[", "np", ".", "reshape", "(", "x", ",", "(", "784", ",", "1", ")", ")", "for", "x", "in", "va_d", "[", "0", "]", "]", "validation_data", "=", "zip", "(", "validation_inputs", ",", "va_d", "[", "1", "]", ")", "test_inputs", "=", "[", "np", ".", "reshape", "(", "x", ",", "(", "784", ",", "1", ")", ")", "for", "x", "in", "te_d", "[", "0", "]", "]", "test_data", "=", "zip", "(", "test_inputs", ",", "te_d", "[", "1", "]", ")", "return", "(", "training_data", ",", "validation_data", ",", "test_data", ")" ]
return a tuple containing .
train
false
22,891
def calc_last_modified(request, *args, **kwargs): assert ('cache_name' in kwargs), 'Must specify cache_name as a keyword arg.' try: cache = get_cache(kwargs['cache_name']) assert (isinstance(cache, FileBasedCache) or isinstance(cache, LocMemCache)), 'requires file-based or mem-based cache.' except InvalidCacheBackendError: return None key = django_get_cache_key(request, cache=cache) if ((key is None) or (not cache.has_key(key))): return None if isinstance(cache, FileBasedCache): fname = cache._key_to_file(cache.make_key(key)) if (not os.path.exists(fname)): return None last_modified = datetime.datetime.fromtimestamp(os.path.getmtime(fname)) elif isinstance(cache, LocMemCache): creation_time = (cache._expire_info[cache.make_key(key)] - settings.CACHE_TIME) last_modified = datetime.datetime.fromtimestamp(creation_time) return last_modified
[ "def", "calc_last_modified", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "assert", "(", "'cache_name'", "in", "kwargs", ")", ",", "'Must specify cache_name as a keyword arg.'", "try", ":", "cache", "=", "get_cache", "(", "kwargs", "[", "'cache_name'", "]", ")", "assert", "(", "isinstance", "(", "cache", ",", "FileBasedCache", ")", "or", "isinstance", "(", "cache", ",", "LocMemCache", ")", ")", ",", "'requires file-based or mem-based cache.'", "except", "InvalidCacheBackendError", ":", "return", "None", "key", "=", "django_get_cache_key", "(", "request", ",", "cache", "=", "cache", ")", "if", "(", "(", "key", "is", "None", ")", "or", "(", "not", "cache", ".", "has_key", "(", "key", ")", ")", ")", ":", "return", "None", "if", "isinstance", "(", "cache", ",", "FileBasedCache", ")", ":", "fname", "=", "cache", ".", "_key_to_file", "(", "cache", ".", "make_key", "(", "key", ")", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ")", ":", "return", "None", "last_modified", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "fname", ")", ")", "elif", "isinstance", "(", "cache", ",", "LocMemCache", ")", ":", "creation_time", "=", "(", "cache", ".", "_expire_info", "[", "cache", ".", "make_key", "(", "key", ")", "]", "-", "settings", ".", "CACHE_TIME", ")", "last_modified", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "creation_time", ")", "return", "last_modified" ]
returns the files modified time as the last-modified date .
train
false
22,893
def metric_cleanup(): logging.debug('metric_cleanup') pass
[ "def", "metric_cleanup", "(", ")", ":", "logging", ".", "debug", "(", "'metric_cleanup'", ")", "pass" ]
clean up the metric module .
train
false
22,894
def depth2space(X, r): return Depth2Space(r)(X)
[ "def", "depth2space", "(", "X", ",", "r", ")", ":", "return", "Depth2Space", "(", "r", ")", "(", "X", ")" ]
computes the depth2space transformation for subpixel calculations .
train
false
22,895
def read_callback(): for host in ZK_HOSTS: try: zk = ZooKeeperServer(host) stats = zk.get_stats() for (k, v) in stats.items(): try: val = collectd.Values(plugin='zookeeper', meta={'0': True}) val.type = ('counter' if (k in COUNTERS) else 'gauge') val.type_instance = k val.values = [v] val.dispatch() except (TypeError, ValueError): collectd.error(('error dispatching stat; host=%s, key=%s, val=%s' % (host, k, v))) pass except socket.error as e: log(('unable to connect to server "%s"' % host)) return stats
[ "def", "read_callback", "(", ")", ":", "for", "host", "in", "ZK_HOSTS", ":", "try", ":", "zk", "=", "ZooKeeperServer", "(", "host", ")", "stats", "=", "zk", ".", "get_stats", "(", ")", "for", "(", "k", ",", "v", ")", "in", "stats", ".", "items", "(", ")", ":", "try", ":", "val", "=", "collectd", ".", "Values", "(", "plugin", "=", "'zookeeper'", ",", "meta", "=", "{", "'0'", ":", "True", "}", ")", "val", ".", "type", "=", "(", "'counter'", "if", "(", "k", "in", "COUNTERS", ")", "else", "'gauge'", ")", "val", ".", "type_instance", "=", "k", "val", ".", "values", "=", "[", "v", "]", "val", ".", "dispatch", "(", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "collectd", ".", "error", "(", "(", "'error dispatching stat; host=%s, key=%s, val=%s'", "%", "(", "host", ",", "k", ",", "v", ")", ")", ")", "pass", "except", "socket", ".", "error", "as", "e", ":", "log", "(", "(", "'unable to connect to server \"%s\"'", "%", "host", ")", ")", "return", "stats" ]
parse stats response from marathon .
train
false
22,897
def dedup(l, suffix='__'): new_l = [] seen = {} for s in l: if (s in seen): seen[s] += 1 s += (suffix + str(seen[s])) else: seen[s] = 0 new_l.append(s) return new_l
[ "def", "dedup", "(", "l", ",", "suffix", "=", "'__'", ")", ":", "new_l", "=", "[", "]", "seen", "=", "{", "}", "for", "s", "in", "l", ":", "if", "(", "s", "in", "seen", ")", ":", "seen", "[", "s", "]", "+=", "1", "s", "+=", "(", "suffix", "+", "str", "(", "seen", "[", "s", "]", ")", ")", "else", ":", "seen", "[", "s", "]", "=", "0", "new_l", ".", "append", "(", "s", ")", "return", "new_l" ]
de-duplicates a list of string by suffixing a counter always returns the same number of entries as provided .
train
false
22,899
def onload(options, tags): pass
[ "def", "onload", "(", "options", ",", "tags", ")", ":", "pass" ]
function called by tcollector when it starts up .
train
false
22,900
def handle_xblock_callback(request, course_id, usage_id, handler, suffix=None): if (not request.user.is_authenticated()): return HttpResponse('Unauthenticated', status=403) try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: raise Http404('Invalid location') with modulestore().bulk_operations(course_key): try: course = modulestore().get_course(course_key) except ItemNotFoundError: raise Http404('invalid location') return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
[ "def", "handle_xblock_callback", "(", "request", ",", "course_id", ",", "usage_id", ",", "handler", ",", "suffix", "=", "None", ")", ":", "if", "(", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "HttpResponse", "(", "'Unauthenticated'", ",", "status", "=", "403", ")", "try", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "except", "InvalidKeyError", ":", "raise", "Http404", "(", "'Invalid location'", ")", "with", "modulestore", "(", ")", ".", "bulk_operations", "(", "course_key", ")", ":", "try", ":", "course", "=", "modulestore", "(", ")", ".", "get_course", "(", "course_key", ")", "except", "ItemNotFoundError", ":", "raise", "Http404", "(", "'invalid location'", ")", "return", "_invoke_xblock_handler", "(", "request", ",", "course_id", ",", "usage_id", ",", "handler", ",", "suffix", ",", "course", "=", "course", ")" ]
generic view for extensions .
train
false
22,902
def version_check(version, op=operator.ge): return op(pkg_resources.parse_version(qVersion()), pkg_resources.parse_version(version))
[ "def", "version_check", "(", "version", ",", "op", "=", "operator", ".", "ge", ")", ":", "return", "op", "(", "pkg_resources", ".", "parse_version", "(", "qVersion", "(", ")", ")", ",", "pkg_resources", ".", "parse_version", "(", "version", ")", ")" ]
check if the qt runtime version is the version supplied or newer .
train
false
22,903
def gn_graph(n, kernel=None, create_using=None, seed=None): if (create_using is None): create_using = nx.DiGraph() elif (not create_using.is_directed()): raise nx.NetworkXError('Directed Graph required in create_using') if (kernel is None): kernel = (lambda x: x) if (seed is not None): random.seed(seed) G = empty_graph(1, create_using) G.name = 'gn_graph({})'.format(n) if (n == 1): return G G.add_edge(1, 0) ds = [1, 1] for source in range(2, n): dist = [kernel(d) for d in ds] target = discrete_sequence(1, distribution=dist)[0] G.add_edge(source, target) ds.append(1) ds[target] += 1 return G
[ "def", "gn_graph", "(", "n", ",", "kernel", "=", "None", ",", "create_using", "=", "None", ",", "seed", "=", "None", ")", ":", "if", "(", "create_using", "is", "None", ")", ":", "create_using", "=", "nx", ".", "DiGraph", "(", ")", "elif", "(", "not", "create_using", ".", "is_directed", "(", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'Directed Graph required in create_using'", ")", "if", "(", "kernel", "is", "None", ")", ":", "kernel", "=", "(", "lambda", "x", ":", "x", ")", "if", "(", "seed", "is", "not", "None", ")", ":", "random", ".", "seed", "(", "seed", ")", "G", "=", "empty_graph", "(", "1", ",", "create_using", ")", "G", ".", "name", "=", "'gn_graph({})'", ".", "format", "(", "n", ")", "if", "(", "n", "==", "1", ")", ":", "return", "G", "G", ".", "add_edge", "(", "1", ",", "0", ")", "ds", "=", "[", "1", ",", "1", "]", "for", "source", "in", "range", "(", "2", ",", "n", ")", ":", "dist", "=", "[", "kernel", "(", "d", ")", "for", "d", "in", "ds", "]", "target", "=", "discrete_sequence", "(", "1", ",", "distribution", "=", "dist", ")", "[", "0", "]", "G", ".", "add_edge", "(", "source", ",", "target", ")", "ds", ".", "append", "(", "1", ")", "ds", "[", "target", "]", "+=", "1", "return", "G" ]
return the growing network digraph with n nodes .
train
false
22,905
def latest(name, rev=None, target=None, clean=False, user=None, identity=None, force=False, opts=False, update_head=True): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if (not target): return _fail(ret, '"target option is required') is_repository = (os.path.isdir(target) and os.path.isdir('{0}/.hg'.format(target))) if is_repository: ret = _update_repo(ret, name, target, clean, user, identity, rev, opts, update_head) else: if os.path.isdir(target): fail = _handle_existing(ret, target, force) if (fail is not None): return fail else: log.debug('target {0} is not found, "hg clone" is required'.format(target)) if __opts__['test']: return _neutral_test(ret, 'Repository {0} is about to be cloned to {1}'.format(name, target)) _clone_repo(ret, target, name, user, identity, rev, opts) return ret
[ "def", "latest", "(", "name", ",", "rev", "=", "None", ",", "target", "=", "None", ",", "clean", "=", "False", ",", "user", "=", "None", ",", "identity", "=", "None", ",", "force", "=", "False", ",", "opts", "=", "False", ",", "update_head", "=", "True", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "(", "not", "target", ")", ":", "return", "_fail", "(", "ret", ",", "'\"target option is required'", ")", "is_repository", "=", "(", "os", ".", "path", ".", "isdir", "(", "target", ")", "and", "os", ".", "path", ".", "isdir", "(", "'{0}/.hg'", ".", "format", "(", "target", ")", ")", ")", "if", "is_repository", ":", "ret", "=", "_update_repo", "(", "ret", ",", "name", ",", "target", ",", "clean", ",", "user", ",", "identity", ",", "rev", ",", "opts", ",", "update_head", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "target", ")", ":", "fail", "=", "_handle_existing", "(", "ret", ",", "target", ",", "force", ")", "if", "(", "fail", "is", "not", "None", ")", ":", "return", "fail", "else", ":", "log", ".", "debug", "(", "'target {0} is not found, \"hg clone\" is required'", ".", "format", "(", "target", ")", ")", "if", "__opts__", "[", "'test'", "]", ":", "return", "_neutral_test", "(", "ret", ",", "'Repository {0} is about to be cloned to {1}'", ".", "format", "(", "name", ",", "target", ")", ")", "_clone_repo", "(", "ret", ",", "target", ",", "name", ",", "user", ",", "identity", ",", "rev", ",", "opts", ")", "return", "ret" ]
topics ordered by created time .
train
true
22,907
def get_seqs_to_keep_lookup_from_fasta_file(fasta_f): return set([seq_id.split()[0] for (seq_id, seq) in parse_fasta(fasta_f)])
[ "def", "get_seqs_to_keep_lookup_from_fasta_file", "(", "fasta_f", ")", ":", "return", "set", "(", "[", "seq_id", ".", "split", "(", ")", "[", "0", "]", "for", "(", "seq_id", ",", "seq", ")", "in", "parse_fasta", "(", "fasta_f", ")", "]", ")" ]
return the sequence ids within the fasta file .
train
false
22,908
def det_minor(M): n = M.rows if (n == 2): return ((M[(0, 0)] * M[(1, 1)]) - (M[(1, 0)] * M[(0, 1)])) else: return sum([(((1, (-1))[(i % 2)] * Add(*[(M[(0, i)] * d) for d in Add.make_args(det_minor(M.minorMatrix(0, i)))])) if M[(0, i)] else S.Zero) for i in range(n)])
[ "def", "det_minor", "(", "M", ")", ":", "n", "=", "M", ".", "rows", "if", "(", "n", "==", "2", ")", ":", "return", "(", "(", "M", "[", "(", "0", ",", "0", ")", "]", "*", "M", "[", "(", "1", ",", "1", ")", "]", ")", "-", "(", "M", "[", "(", "1", ",", "0", ")", "]", "*", "M", "[", "(", "0", ",", "1", ")", "]", ")", ")", "else", ":", "return", "sum", "(", "[", "(", "(", "(", "1", ",", "(", "-", "1", ")", ")", "[", "(", "i", "%", "2", ")", "]", "*", "Add", "(", "*", "[", "(", "M", "[", "(", "0", ",", "i", ")", "]", "*", "d", ")", "for", "d", "in", "Add", ".", "make_args", "(", "det_minor", "(", "M", ".", "minorMatrix", "(", "0", ",", "i", ")", ")", ")", "]", ")", ")", "if", "M", "[", "(", "0", ",", "i", ")", "]", "else", "S", ".", "Zero", ")", "for", "i", "in", "range", "(", "n", ")", "]", ")" ]
return the det(m) computed from minors without introducing new nesting in products .
train
false
22,910
def _config_file(): return __salt__['config.option']('poudriere.config')
[ "def", "_config_file", "(", ")", ":", "return", "__salt__", "[", "'config.option'", "]", "(", "'poudriere.config'", ")" ]
return the config file location to use .
train
false
22,911
def get_ip_info_for_instance(context, instance): if isinstance(instance, obj_base.NovaObject): nw_info = instance.info_cache.network_info else: info_cache = (instance.info_cache or {}) nw_info = info_cache.get('network_info') if (not nw_info): nw_info = [] return get_ip_info_for_instance_from_nw_info(nw_info)
[ "def", "get_ip_info_for_instance", "(", "context", ",", "instance", ")", ":", "if", "isinstance", "(", "instance", ",", "obj_base", ".", "NovaObject", ")", ":", "nw_info", "=", "instance", ".", "info_cache", ".", "network_info", "else", ":", "info_cache", "=", "(", "instance", ".", "info_cache", "or", "{", "}", ")", "nw_info", "=", "info_cache", ".", "get", "(", "'network_info'", ")", "if", "(", "not", "nw_info", ")", ":", "nw_info", "=", "[", "]", "return", "get_ip_info_for_instance_from_nw_info", "(", "nw_info", ")" ]
return a dictionary of ip information for an instance .
train
false
22,912
def del_export(exports='/etc/exports', path=None): edict = list_exports(exports) del edict[path] _write_exports(exports, edict) return edict
[ "def", "del_export", "(", "exports", "=", "'/etc/exports'", ",", "path", "=", "None", ")", ":", "edict", "=", "list_exports", "(", "exports", ")", "del", "edict", "[", "path", "]", "_write_exports", "(", "exports", ",", "edict", ")", "return", "edict" ]
remove an export cli example: .
train
true
22,913
def addWithLeastLength(loops, point, shortestAdditionalLength): shortestLoop = None shortestPointIndex = None for loop in loops: if (len(loop) > 2): for pointIndex in xrange(len(loop)): additionalLength = getAdditionalLoopLength(loop, point, pointIndex) if (additionalLength < shortestAdditionalLength): shortestAdditionalLength = additionalLength shortestLoop = loop shortestPointIndex = pointIndex if (shortestPointIndex != None): afterCenterComplex = shortestLoop[shortestPointIndex] afterEndComplex = shortestLoop[((shortestPointIndex + 1) % len(shortestLoop))] isInlineAfter = isInline(point, afterCenterComplex, afterEndComplex) beforeCenterComplex = shortestLoop[(((shortestPointIndex + len(shortestLoop)) - 1) % len(shortestLoop))] beforeEndComplex = shortestLoop[(((shortestPointIndex + len(shortestLoop)) - 2) % len(shortestLoop))] isInlineBefore = isInline(point, beforeCenterComplex, beforeEndComplex) if (isInlineAfter or isInlineBefore): shortestLoop.insert(shortestPointIndex, point)
[ "def", "addWithLeastLength", "(", "loops", ",", "point", ",", "shortestAdditionalLength", ")", ":", "shortestLoop", "=", "None", "shortestPointIndex", "=", "None", "for", "loop", "in", "loops", ":", "if", "(", "len", "(", "loop", ")", ">", "2", ")", ":", "for", "pointIndex", "in", "xrange", "(", "len", "(", "loop", ")", ")", ":", "additionalLength", "=", "getAdditionalLoopLength", "(", "loop", ",", "point", ",", "pointIndex", ")", "if", "(", "additionalLength", "<", "shortestAdditionalLength", ")", ":", "shortestAdditionalLength", "=", "additionalLength", "shortestLoop", "=", "loop", "shortestPointIndex", "=", "pointIndex", "if", "(", "shortestPointIndex", "!=", "None", ")", ":", "afterCenterComplex", "=", "shortestLoop", "[", "shortestPointIndex", "]", "afterEndComplex", "=", "shortestLoop", "[", "(", "(", "shortestPointIndex", "+", "1", ")", "%", "len", "(", "shortestLoop", ")", ")", "]", "isInlineAfter", "=", "isInline", "(", "point", ",", "afterCenterComplex", ",", "afterEndComplex", ")", "beforeCenterComplex", "=", "shortestLoop", "[", "(", "(", "(", "shortestPointIndex", "+", "len", "(", "shortestLoop", ")", ")", "-", "1", ")", "%", "len", "(", "shortestLoop", ")", ")", "]", "beforeEndComplex", "=", "shortestLoop", "[", "(", "(", "(", "shortestPointIndex", "+", "len", "(", "shortestLoop", ")", ")", "-", "2", ")", "%", "len", "(", "shortestLoop", ")", ")", "]", "isInlineBefore", "=", "isInline", "(", "point", ",", "beforeCenterComplex", ",", "beforeEndComplex", ")", "if", "(", "isInlineAfter", "or", "isInlineBefore", ")", ":", "shortestLoop", ".", "insert", "(", "shortestPointIndex", ",", "point", ")" ]
insert a point into a loop .
train
false
22,915
def get_calendar(name): return holiday_calendars[name]()
[ "def", "get_calendar", "(", "name", ")", ":", "return", "holiday_calendars", "[", "name", "]", "(", ")" ]
return an instance of a calendar based on its name .
train
false
22,916
def _read_int32(f): return np.int32(struct.unpack('>i', f.read(4))[0])
[ "def", "_read_int32", "(", "f", ")", ":", "return", "np", ".", "int32", "(", "struct", ".", "unpack", "(", "'>i'", ",", "f", ".", "read", "(", "4", ")", ")", "[", "0", "]", ")" ]
unpack a 4-byte integer from the current position in file f .
train
false
22,917
def options(name, conf_file=None): config = _read_config(conf_file) section_name = 'program:{0}'.format(name) if (section_name not in config.sections()): raise CommandExecutionError("Process '{0}' not found".format(name)) ret = {} for (key, val) in config.items(section_name): val = salt.utils.str_to_num(val.split(';')[0].strip()) if isinstance(val, string_types): if (val.lower() == 'true'): val = True elif (val.lower() == 'false'): val = False ret[key] = val return ret
[ "def", "options", "(", "name", ",", "conf_file", "=", "None", ")", ":", "config", "=", "_read_config", "(", "conf_file", ")", "section_name", "=", "'program:{0}'", ".", "format", "(", "name", ")", "if", "(", "section_name", "not", "in", "config", ".", "sections", "(", ")", ")", ":", "raise", "CommandExecutionError", "(", "\"Process '{0}' not found\"", ".", "format", "(", "name", ")", ")", "ret", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "config", ".", "items", "(", "section_name", ")", ":", "val", "=", "salt", ".", "utils", ".", "str_to_num", "(", "val", ".", "split", "(", "';'", ")", "[", "0", "]", ".", "strip", "(", ")", ")", "if", "isinstance", "(", "val", ",", "string_types", ")", ":", "if", "(", "val", ".", "lower", "(", ")", "==", "'true'", ")", ":", "val", "=", "True", "elif", "(", "val", ".", "lower", "(", ")", "==", "'false'", ")", ":", "val", "=", "False", "ret", "[", "key", "]", "=", "val", "return", "ret" ]
sends a options request .
train
true
22,918
def validate_max_staleness(option, value): if ((value == (-1)) or (value == '-1')): return (-1) return validate_positive_integer(option, value)
[ "def", "validate_max_staleness", "(", "option", ",", "value", ")", ":", "if", "(", "(", "value", "==", "(", "-", "1", ")", ")", "or", "(", "value", "==", "'-1'", ")", ")", ":", "return", "(", "-", "1", ")", "return", "validate_positive_integer", "(", "option", ",", "value", ")" ]
validates maxstalenessseconds according to the max staleness spec .
train
false
22,921
def tag_model(cls, admin_cls=None, field_name=u'tags', sort_tags=False, select_field=False, auto_add_admin_field=True, admin_list_display=True): try: from tagging.registry import register as tagging_register except ImportError: from tagging import register as tagging_register cls.add_to_class(field_name, (TagSelectField if select_field else TagField)(field_name.capitalize(), blank=True)) try: tagging_register(cls, tag_descriptor_attr=(u'tagging_' + field_name)) except AlreadyRegistered: return if admin_cls: if admin_list_display: admin_cls.list_display.append(field_name) admin_cls.list_filter.append(field_name) if (auto_add_admin_field and hasattr(admin_cls, u'add_extension_options')): admin_cls.add_extension_options(_(u'Tagging'), {u'fields': (field_name,)}) if sort_tags: pre_save.connect(pre_save_handler, sender=cls)
[ "def", "tag_model", "(", "cls", ",", "admin_cls", "=", "None", ",", "field_name", "=", "u'tags'", ",", "sort_tags", "=", "False", ",", "select_field", "=", "False", ",", "auto_add_admin_field", "=", "True", ",", "admin_list_display", "=", "True", ")", ":", "try", ":", "from", "tagging", ".", "registry", "import", "register", "as", "tagging_register", "except", "ImportError", ":", "from", "tagging", "import", "register", "as", "tagging_register", "cls", ".", "add_to_class", "(", "field_name", ",", "(", "TagSelectField", "if", "select_field", "else", "TagField", ")", "(", "field_name", ".", "capitalize", "(", ")", ",", "blank", "=", "True", ")", ")", "try", ":", "tagging_register", "(", "cls", ",", "tag_descriptor_attr", "=", "(", "u'tagging_'", "+", "field_name", ")", ")", "except", "AlreadyRegistered", ":", "return", "if", "admin_cls", ":", "if", "admin_list_display", ":", "admin_cls", ".", "list_display", ".", "append", "(", "field_name", ")", "admin_cls", ".", "list_filter", ".", "append", "(", "field_name", ")", "if", "(", "auto_add_admin_field", "and", "hasattr", "(", "admin_cls", ",", "u'add_extension_options'", ")", ")", ":", "admin_cls", ".", "add_extension_options", "(", "_", "(", "u'Tagging'", ")", ",", "{", "u'fields'", ":", "(", "field_name", ",", ")", "}", ")", "if", "sort_tags", ":", "pre_save", ".", "connect", "(", "pre_save_handler", ",", "sender", "=", "cls", ")" ]
tag_model accepts a number of named parameters: admin_cls if set to a subclass of modeladmin .
train
false
22,923
def getWpadFiles(): domainParts = socket.gethostname().split('.') pacURLs = [] for ii in range(len(domainParts)): domain = '.'.join(domainParts[ii:]) pacURLs.append((('http://wpad.' + domain) + '/wpad.dat')) return list(set(pacURLs))
[ "def", "getWpadFiles", "(", ")", ":", "domainParts", "=", "socket", ".", "gethostname", "(", ")", ".", "split", "(", "'.'", ")", "pacURLs", "=", "[", "]", "for", "ii", "in", "range", "(", "len", "(", "domainParts", ")", ")", ":", "domain", "=", "'.'", ".", "join", "(", "domainParts", "[", "ii", ":", "]", ")", "pacURLs", ".", "append", "(", "(", "(", "'http://wpad.'", "+", "domain", ")", "+", "'/wpad.dat'", ")", ")", "return", "list", "(", "set", "(", "pacURLs", ")", ")" ]
return possible pac file locations from the standard set of .
train
false
22,924
def test_no_mpkg(data): finder = PackageFinder([data.find_links], [], session=PipSession()) req = InstallRequirement.from_line('pkgwithmpkg') found = finder.find_requirement(req, False) assert found.url.endswith('pkgwithmpkg-1.0.tar.gz'), found
[ "def", "test_no_mpkg", "(", "data", ")", ":", "finder", "=", "PackageFinder", "(", "[", "data", ".", "find_links", "]", ",", "[", "]", ",", "session", "=", "PipSession", "(", ")", ")", "req", "=", "InstallRequirement", ".", "from_line", "(", "'pkgwithmpkg'", ")", "found", "=", "finder", ".", "find_requirement", "(", "req", ",", "False", ")", "assert", "found", ".", "url", ".", "endswith", "(", "'pkgwithmpkg-1.0.tar.gz'", ")", ",", "found" ]
finder skips zipfiles with "macosx10" in the name .
train
false
22,925
def _cron_matched(cron, cmd, identifier=None): (ret, id_matched) = (False, None) cid = _cron_id(cron) if cid: if (not identifier): identifier = SALT_CRON_NO_IDENTIFIER eidentifier = _encode(identifier) if ((cron.get('cmd', None) != cmd) and (cid == SALT_CRON_NO_IDENTIFIER) and (eidentifier == SALT_CRON_NO_IDENTIFIER)): id_matched = False else: if ((cron.get('cmd', None) == cmd) and (cid == SALT_CRON_NO_IDENTIFIER) and identifier): cid = eidentifier id_matched = (eidentifier == cid) if (((id_matched is None) and (cmd == cron.get('cmd', None))) or id_matched): ret = True return ret
[ "def", "_cron_matched", "(", "cron", ",", "cmd", ",", "identifier", "=", "None", ")", ":", "(", "ret", ",", "id_matched", ")", "=", "(", "False", ",", "None", ")", "cid", "=", "_cron_id", "(", "cron", ")", "if", "cid", ":", "if", "(", "not", "identifier", ")", ":", "identifier", "=", "SALT_CRON_NO_IDENTIFIER", "eidentifier", "=", "_encode", "(", "identifier", ")", "if", "(", "(", "cron", ".", "get", "(", "'cmd'", ",", "None", ")", "!=", "cmd", ")", "and", "(", "cid", "==", "SALT_CRON_NO_IDENTIFIER", ")", "and", "(", "eidentifier", "==", "SALT_CRON_NO_IDENTIFIER", ")", ")", ":", "id_matched", "=", "False", "else", ":", "if", "(", "(", "cron", ".", "get", "(", "'cmd'", ",", "None", ")", "==", "cmd", ")", "and", "(", "cid", "==", "SALT_CRON_NO_IDENTIFIER", ")", "and", "identifier", ")", ":", "cid", "=", "eidentifier", "id_matched", "=", "(", "eidentifier", "==", "cid", ")", "if", "(", "(", "(", "id_matched", "is", "None", ")", "and", "(", "cmd", "==", "cron", ".", "get", "(", "'cmd'", ",", "None", ")", ")", ")", "or", "id_matched", ")", ":", "ret", "=", "True", "return", "ret" ]
check if: - we find a cron with same cmd .
train
true
22,926
def get_disk_info(virt_type, instance, block_device_info=None, image_meta=None, rescue=False): disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, 'disk') cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, 'cdrom') mapping = get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, block_device_info, image_meta, rescue) return {'disk_bus': disk_bus, 'cdrom_bus': cdrom_bus, 'mapping': mapping}
[ "def", "get_disk_info", "(", "virt_type", ",", "instance", ",", "block_device_info", "=", "None", ",", "image_meta", "=", "None", ",", "rescue", "=", "False", ")", ":", "disk_bus", "=", "get_disk_bus_for_device_type", "(", "virt_type", ",", "image_meta", ",", "'disk'", ")", "cdrom_bus", "=", "get_disk_bus_for_device_type", "(", "virt_type", ",", "image_meta", ",", "'cdrom'", ")", "mapping", "=", "get_disk_mapping", "(", "virt_type", ",", "instance", ",", "disk_bus", ",", "cdrom_bus", ",", "block_device_info", ",", "image_meta", ",", "rescue", ")", "return", "{", "'disk_bus'", ":", "disk_bus", ",", "'cdrom_bus'", ":", "cdrom_bus", ",", "'mapping'", ":", "mapping", "}" ]
determine guest disk mapping info .
train
false
22,927
def haddr_to_bin(string): try: return addrconv.mac.text_to_bin(string) except: raise ValueError
[ "def", "haddr_to_bin", "(", "string", ")", ":", "try", ":", "return", "addrconv", ".", "mac", ".", "text_to_bin", "(", "string", ")", "except", ":", "raise", "ValueError" ]
parse mac address string in human readable format into internal representation .
train
false
22,928
def get_purchase_params(cart, callback_url=None, extra_data=None): total_cost = cart.total_cost amount = '{0:0.2f}'.format(total_cost) params = OrderedDict() params['amount'] = amount params['currency'] = cart.currency params['orderNumber'] = 'OrderId: {0:d}'.format(cart.id) params['access_key'] = get_processor_config().get('ACCESS_KEY', '') params['profile_id'] = get_processor_config().get('PROFILE_ID', '') params['reference_number'] = cart.id params['transaction_type'] = 'sale' params['locale'] = 'en' params['signed_date_time'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') params['signed_field_names'] = 'access_key,profile_id,amount,currency,transaction_type,reference_number,signed_date_time,locale,transaction_uuid,signed_field_names,unsigned_field_names,orderNumber' params['unsigned_field_names'] = '' params['transaction_uuid'] = uuid.uuid4().hex params['payment_method'] = 'card' if (callback_url is not None): params['override_custom_receipt_page'] = callback_url params['override_custom_cancel_page'] = callback_url if (extra_data is not None): for (num, item) in enumerate(extra_data, start=1): key = u'merchant_defined_data{num}'.format(num=num) params[key] = item return params
[ "def", "get_purchase_params", "(", "cart", ",", "callback_url", "=", "None", ",", "extra_data", "=", "None", ")", ":", "total_cost", "=", "cart", ".", "total_cost", "amount", "=", "'{0:0.2f}'", ".", "format", "(", "total_cost", ")", "params", "=", "OrderedDict", "(", ")", "params", "[", "'amount'", "]", "=", "amount", "params", "[", "'currency'", "]", "=", "cart", ".", "currency", "params", "[", "'orderNumber'", "]", "=", "'OrderId: {0:d}'", ".", "format", "(", "cart", ".", "id", ")", "params", "[", "'access_key'", "]", "=", "get_processor_config", "(", ")", ".", "get", "(", "'ACCESS_KEY'", ",", "''", ")", "params", "[", "'profile_id'", "]", "=", "get_processor_config", "(", ")", ".", "get", "(", "'PROFILE_ID'", ",", "''", ")", "params", "[", "'reference_number'", "]", "=", "cart", ".", "id", "params", "[", "'transaction_type'", "]", "=", "'sale'", "params", "[", "'locale'", "]", "=", "'en'", "params", "[", "'signed_date_time'", "]", "=", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "params", "[", "'signed_field_names'", "]", "=", "'access_key,profile_id,amount,currency,transaction_type,reference_number,signed_date_time,locale,transaction_uuid,signed_field_names,unsigned_field_names,orderNumber'", "params", "[", "'unsigned_field_names'", "]", "=", "''", "params", "[", "'transaction_uuid'", "]", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "params", "[", "'payment_method'", "]", "=", "'card'", "if", "(", "callback_url", "is", "not", "None", ")", ":", "params", "[", "'override_custom_receipt_page'", "]", "=", "callback_url", "params", "[", "'override_custom_cancel_page'", "]", "=", "callback_url", "if", "(", "extra_data", "is", "not", "None", ")", ":", "for", "(", "num", ",", "item", ")", "in", "enumerate", "(", "extra_data", ",", "start", "=", "1", ")", ":", "key", "=", "u'merchant_defined_data{num}'", ".", "format", "(", "num", "=", "num", ")", "params", "[", "key", "]", "=", "item", "return", "params" ]
this method will build out a dictionary of parameters needed by cybersource to complete the transaction args: cart : the order model representing items in the users cart .
train
false
22,929
def find_config(config_path): possible_locations = [config_path, os.path.join(FLAGS.state_path, 'etc', 'cinder', config_path), os.path.join(FLAGS.state_path, 'etc', config_path), os.path.join(FLAGS.state_path, config_path), ('/etc/cinder/%s' % config_path)] for path in possible_locations: if os.path.exists(path): return os.path.abspath(path) raise exception.ConfigNotFound(path=os.path.abspath(config_path))
[ "def", "find_config", "(", "config_path", ")", ":", "possible_locations", "=", "[", "config_path", ",", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "state_path", ",", "'etc'", ",", "'cinder'", ",", "config_path", ")", ",", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "state_path", ",", "'etc'", ",", "config_path", ")", ",", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "state_path", ",", "config_path", ")", ",", "(", "'/etc/cinder/%s'", "%", "config_path", ")", "]", "for", "path", "in", "possible_locations", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "path", ")", "raise", "exception", ".", "ConfigNotFound", "(", "path", "=", "os", ".", "path", ".", "abspath", "(", "config_path", ")", ")" ]
find a configuration file using the given hint .
train
false
22,930
def writeXMLObject(absoluteFolderDirectory, derivation, fileNames, target, xmlObject): extension = evaluate.getEvaluatedString(xmlObject.getFabricationExtension(), derivation.elementNode, 'extension') fileNameRoot = derivation.fileName if (fileNameRoot == ''): fileNameRoot = evaluate.getEvaluatedString('', target, 'name') fileNameRoot = evaluate.getEvaluatedString(fileNameRoot, target, 'id') fileNameRoot += derivation.suffix fileName = ('%s.%s' % (fileNameRoot, extension)) suffixIndex = 2 while (fileName in fileNames): fileName = ('%s_%s.%s' % (fileNameRoot, suffixIndex, extension)) suffixIndex += 1 absoluteFileName = os.path.join(absoluteFolderDirectory, fileName) fileNames.append(fileName) archive.makeDirectory(absoluteFolderDirectory) if (not derivation.writeMatrix): xmlObject.matrix4X4 = matrix.Matrix() print 'The write tool generated the file:' print absoluteFileName archive.writeFileText(absoluteFileName, xmlObject.getFabricationText(derivation.addLayerTemplate))
[ "def", "writeXMLObject", "(", "absoluteFolderDirectory", ",", "derivation", ",", "fileNames", ",", "target", ",", "xmlObject", ")", ":", "extension", "=", "evaluate", ".", "getEvaluatedString", "(", "xmlObject", ".", "getFabricationExtension", "(", ")", ",", "derivation", ".", "elementNode", ",", "'extension'", ")", "fileNameRoot", "=", "derivation", ".", "fileName", "if", "(", "fileNameRoot", "==", "''", ")", ":", "fileNameRoot", "=", "evaluate", ".", "getEvaluatedString", "(", "''", ",", "target", ",", "'name'", ")", "fileNameRoot", "=", "evaluate", ".", "getEvaluatedString", "(", "fileNameRoot", ",", "target", ",", "'id'", ")", "fileNameRoot", "+=", "derivation", ".", "suffix", "fileName", "=", "(", "'%s.%s'", "%", "(", "fileNameRoot", ",", "extension", ")", ")", "suffixIndex", "=", "2", "while", "(", "fileName", "in", "fileNames", ")", ":", "fileName", "=", "(", "'%s_%s.%s'", "%", "(", "fileNameRoot", ",", "suffixIndex", ",", "extension", ")", ")", "suffixIndex", "+=", "1", "absoluteFileName", "=", "os", ".", "path", ".", "join", "(", "absoluteFolderDirectory", ",", "fileName", ")", "fileNames", ".", "append", "(", "fileName", ")", "archive", ".", "makeDirectory", "(", "absoluteFolderDirectory", ")", "if", "(", "not", "derivation", ".", "writeMatrix", ")", ":", "xmlObject", ".", "matrix4X4", "=", "matrix", ".", "Matrix", "(", ")", "print", "'The write tool generated the file:'", "print", "absoluteFileName", "archive", ".", "writeFileText", "(", "absoluteFileName", ",", "xmlObject", ".", "getFabricationText", "(", "derivation", ".", "addLayerTemplate", ")", ")" ]
write one instance of the xmlobject .
train
false
22,931
@require_role('admin') def asset_upload(request): if (request.method == 'POST'): excel_file = request.FILES.get('file_name', '') ret = excel_to_db(excel_file) if ret: smg = u'\u6279\u91cf\u6dfb\u52a0\u6210\u529f' else: emg = u'\u6279\u91cf\u6dfb\u52a0\u5931\u8d25,\u8bf7\u68c0\u67e5\u683c\u5f0f.' return my_render('jasset/asset_add_batch.html', locals(), request)
[ "@", "require_role", "(", "'admin'", ")", "def", "asset_upload", "(", "request", ")", ":", "if", "(", "request", ".", "method", "==", "'POST'", ")", ":", "excel_file", "=", "request", ".", "FILES", ".", "get", "(", "'file_name'", ",", "''", ")", "ret", "=", "excel_to_db", "(", "excel_file", ")", "if", "ret", ":", "smg", "=", "u'\\u6279\\u91cf\\u6dfb\\u52a0\\u6210\\u529f'", "else", ":", "emg", "=", "u'\\u6279\\u91cf\\u6dfb\\u52a0\\u5931\\u8d25,\\u8bf7\\u68c0\\u67e5\\u683c\\u5f0f.'", "return", "my_render", "(", "'jasset/asset_add_batch.html'", ",", "locals", "(", ")", ",", "request", ")" ]
upload asset excel file view .
train
false
22,932
def _dense_to_sparse(batch): if isinstance(batch, tuple): raise TypeError('Composite batches not supported.') assert (not isinstance(batch, list)) if is_symbolic_batch(batch): assert isinstance(batch, theano.tensor.TensorVariable) return theano.sparse.csr_from_dense(batch) else: assert isinstance(batch, np.ndarray), ('type of batch: %s' % type(batch)) return scipy.sparse.csr_matrix(batch)
[ "def", "_dense_to_sparse", "(", "batch", ")", ":", "if", "isinstance", "(", "batch", ",", "tuple", ")", ":", "raise", "TypeError", "(", "'Composite batches not supported.'", ")", "assert", "(", "not", "isinstance", "(", "batch", ",", "list", ")", ")", "if", "is_symbolic_batch", "(", "batch", ")", ":", "assert", "isinstance", "(", "batch", ",", "theano", ".", "tensor", ".", "TensorVariable", ")", "return", "theano", ".", "sparse", ".", "csr_from_dense", "(", "batch", ")", "else", ":", "assert", "isinstance", "(", "batch", ",", "np", ".", "ndarray", ")", ",", "(", "'type of batch: %s'", "%", "type", "(", "batch", ")", ")", "return", "scipy", ".", "sparse", ".", "csr_matrix", "(", "batch", ")" ]
casts dense batches to sparse batches .
train
false
22,933
def askyesnocancel(title=None, message=None, **options): s = _show(title, message, QUESTION, YESNOCANCEL, **options) s = str(s) if (s == CANCEL): return None return (s == YES)
[ "def", "askyesnocancel", "(", "title", "=", "None", ",", "message", "=", "None", ",", "**", "options", ")", ":", "s", "=", "_show", "(", "title", ",", "message", ",", "QUESTION", ",", "YESNOCANCEL", ",", "**", "options", ")", "s", "=", "str", "(", "s", ")", "if", "(", "s", "==", "CANCEL", ")", ":", "return", "None", "return", "(", "s", "==", "YES", ")" ]
ask a question; return true if the answer is yes .
train
false
22,934
def _parse_request(header_data, ignore_bad_cookies=False): cookies_dict = {} for line in Definitions.EOL.split(header_data.strip()): matches = Definitions.COOKIE_RE.finditer(line) matches = [item for item in matches] for match in matches: invalid = match.group('invalid') if invalid: if (not ignore_bad_cookies): raise InvalidCookieError(data=invalid) _report_invalid_cookie(invalid) continue name = match.group('name') values = cookies_dict.get(name) value = match.group('value').strip('"') if values: values.append(value) else: cookies_dict[name] = [value] if (not matches): if (not ignore_bad_cookies): raise InvalidCookieError(data=line) _report_invalid_cookie(line) return cookies_dict
[ "def", "_parse_request", "(", "header_data", ",", "ignore_bad_cookies", "=", "False", ")", ":", "cookies_dict", "=", "{", "}", "for", "line", "in", "Definitions", ".", "EOL", ".", "split", "(", "header_data", ".", "strip", "(", ")", ")", ":", "matches", "=", "Definitions", ".", "COOKIE_RE", ".", "finditer", "(", "line", ")", "matches", "=", "[", "item", "for", "item", "in", "matches", "]", "for", "match", "in", "matches", ":", "invalid", "=", "match", ".", "group", "(", "'invalid'", ")", "if", "invalid", ":", "if", "(", "not", "ignore_bad_cookies", ")", ":", "raise", "InvalidCookieError", "(", "data", "=", "invalid", ")", "_report_invalid_cookie", "(", "invalid", ")", "continue", "name", "=", "match", ".", "group", "(", "'name'", ")", "values", "=", "cookies_dict", ".", "get", "(", "name", ")", "value", "=", "match", ".", "group", "(", "'value'", ")", ".", "strip", "(", "'\"'", ")", "if", "values", ":", "values", ".", "append", "(", "value", ")", "else", ":", "cookies_dict", "[", "name", "]", "=", "[", "value", "]", "if", "(", "not", "matches", ")", ":", "if", "(", "not", "ignore_bad_cookies", ")", ":", "raise", "InvalidCookieError", "(", "data", "=", "line", ")", "_report_invalid_cookie", "(", "line", ")", "return", "cookies_dict" ]
turn one or more lines of cookie: header data into a dict mapping cookie names to cookie values .
train
true
22,935
@verbose def find_events(raw, stim_channel=None, output='onset', consecutive='increasing', min_duration=0, shortest_event=2, mask=None, uint_cast=False, mask_type='not_and', verbose=None): min_samples = (min_duration * raw.info['sfreq']) stim_channel = _get_stim_channel(stim_channel, raw.info) pick = pick_channels(raw.info['ch_names'], include=stim_channel) if (len(pick) == 0): raise ValueError('No stim channel found to extract event triggers.') (data, _) = raw[pick, :] events = _find_events(data, raw.first_samp, verbose=verbose, output=output, consecutive=consecutive, min_samples=min_samples, mask=mask, uint_cast=uint_cast, mask_type=mask_type) n_short_events = np.sum((np.diff(events[:, 0]) < shortest_event)) if (n_short_events > 0): raise ValueError(("You have %i events shorter than the shortest_event. These are very unusual and you may want to set min_duration to a larger value e.g. x / raw.info['sfreq']. Where x = 1 sample shorter than the shortest event length." % n_short_events)) return events
[ "@", "verbose", "def", "find_events", "(", "raw", ",", "stim_channel", "=", "None", ",", "output", "=", "'onset'", ",", "consecutive", "=", "'increasing'", ",", "min_duration", "=", "0", ",", "shortest_event", "=", "2", ",", "mask", "=", "None", ",", "uint_cast", "=", "False", ",", "mask_type", "=", "'not_and'", ",", "verbose", "=", "None", ")", ":", "min_samples", "=", "(", "min_duration", "*", "raw", ".", "info", "[", "'sfreq'", "]", ")", "stim_channel", "=", "_get_stim_channel", "(", "stim_channel", ",", "raw", ".", "info", ")", "pick", "=", "pick_channels", "(", "raw", ".", "info", "[", "'ch_names'", "]", ",", "include", "=", "stim_channel", ")", "if", "(", "len", "(", "pick", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'No stim channel found to extract event triggers.'", ")", "(", "data", ",", "_", ")", "=", "raw", "[", "pick", ",", ":", "]", "events", "=", "_find_events", "(", "data", ",", "raw", ".", "first_samp", ",", "verbose", "=", "verbose", ",", "output", "=", "output", ",", "consecutive", "=", "consecutive", ",", "min_samples", "=", "min_samples", ",", "mask", "=", "mask", ",", "uint_cast", "=", "uint_cast", ",", "mask_type", "=", "mask_type", ")", "n_short_events", "=", "np", ".", "sum", "(", "(", "np", ".", "diff", "(", "events", "[", ":", ",", "0", "]", ")", "<", "shortest_event", ")", ")", "if", "(", "n_short_events", ">", "0", ")", ":", "raise", "ValueError", "(", "(", "\"You have %i events shorter than the shortest_event. These are very unusual and you may want to set min_duration to a larger value e.g. x / raw.info['sfreq']. Where x = 1 sample shorter than the shortest event length.\"", "%", "n_short_events", ")", ")", "return", "events" ]
find events from raw file .
train
false
22,936
def _validate_sequenced_vhds(staging_path): seq_num = 0 filenames = os.listdir(staging_path) for filename in filenames: if (not filename.endswith('.vhd')): continue if (filename == 'swap.vhd'): continue vhd_path = os.path.join(staging_path, ('%d.vhd' % seq_num)) if (not os.path.exists(vhd_path)): raise Exception(('Corrupt image. Expected seq number: %d. Files: %s' % (seq_num, filenames))) seq_num += 1
[ "def", "_validate_sequenced_vhds", "(", "staging_path", ")", ":", "seq_num", "=", "0", "filenames", "=", "os", ".", "listdir", "(", "staging_path", ")", "for", "filename", "in", "filenames", ":", "if", "(", "not", "filename", ".", "endswith", "(", "'.vhd'", ")", ")", ":", "continue", "if", "(", "filename", "==", "'swap.vhd'", ")", ":", "continue", "vhd_path", "=", "os", ".", "path", ".", "join", "(", "staging_path", ",", "(", "'%d.vhd'", "%", "seq_num", ")", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "vhd_path", ")", ")", ":", "raise", "Exception", "(", "(", "'Corrupt image. Expected seq number: %d. Files: %s'", "%", "(", "seq_num", ",", "filenames", ")", ")", ")", "seq_num", "+=", "1" ]
this check ensures that the vhds in the staging area are sequenced properly from 0 to n-1 with no gaps .
train
false
22,937
def exp_about(extended=False): info = _('See http://openerp.com') if extended: return (info, odoo.release.version) return info
[ "def", "exp_about", "(", "extended", "=", "False", ")", ":", "info", "=", "_", "(", "'See http://openerp.com'", ")", "if", "extended", ":", "return", "(", "info", ",", "odoo", ".", "release", ".", "version", ")", "return", "info" ]
return information about the openerp server .
train
false
22,939
def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True, seed=None): random.seed(seed) if with_replacement: create_using = nx.MultiDiGraph() def sample(v, nodes): if (not self_loops): nodes = (nodes - {v}) return (random.choice(list(nodes)) for i in range(k)) else: create_using = nx.DiGraph() def sample(v, nodes): if (not self_loops): nodes = (nodes - {v}) return random.sample(nodes, k) G = nx.empty_graph(n, create_using=create_using) nodes = set(G) for u in G: G.add_edges_from(((u, v) for v in sample(u, nodes))) G.name = 'random_uniform_k_out_graph({}, {})'.format(n, k) return G
[ "def", "random_uniform_k_out_graph", "(", "n", ",", "k", ",", "self_loops", "=", "True", ",", "with_replacement", "=", "True", ",", "seed", "=", "None", ")", ":", "random", ".", "seed", "(", "seed", ")", "if", "with_replacement", ":", "create_using", "=", "nx", ".", "MultiDiGraph", "(", ")", "def", "sample", "(", "v", ",", "nodes", ")", ":", "if", "(", "not", "self_loops", ")", ":", "nodes", "=", "(", "nodes", "-", "{", "v", "}", ")", "return", "(", "random", ".", "choice", "(", "list", "(", "nodes", ")", ")", "for", "i", "in", "range", "(", "k", ")", ")", "else", ":", "create_using", "=", "nx", ".", "DiGraph", "(", ")", "def", "sample", "(", "v", ",", "nodes", ")", ":", "if", "(", "not", "self_loops", ")", ":", "nodes", "=", "(", "nodes", "-", "{", "v", "}", ")", "return", "random", ".", "sample", "(", "nodes", ",", "k", ")", "G", "=", "nx", ".", "empty_graph", "(", "n", ",", "create_using", "=", "create_using", ")", "nodes", "=", "set", "(", "G", ")", "for", "u", "in", "G", ":", "G", ".", "add_edges_from", "(", "(", "(", "u", ",", "v", ")", "for", "v", "in", "sample", "(", "u", ",", "nodes", ")", ")", ")", "G", ".", "name", "=", "'random_uniform_k_out_graph({}, {})'", ".", "format", "(", "n", ",", "k", ")", "return", "G" ]
returns a random k-out graph with uniform attachment .
train
false
22,940
def get_dict_attr(obj, attr, default=None): for obj in ([obj] + obj.__class__.mro()): if (attr in obj.__dict__): return obj.__dict__[attr] return default
[ "def", "get_dict_attr", "(", "obj", ",", "attr", ",", "default", "=", "None", ")", ":", "for", "obj", "in", "(", "[", "obj", "]", "+", "obj", ".", "__class__", ".", "mro", "(", ")", ")", ":", "if", "(", "attr", "in", "obj", ".", "__dict__", ")", ":", "return", "obj", ".", "__dict__", "[", "attr", "]", "return", "default" ]
get attribute of the object without triggering its __getattr__ .
train
false
22,941
def ip_interface(address): try: return IPv4Interface(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Interface(address) except (AddressValueError, NetmaskValueError): pass raise ValueError(('%r does not appear to be an IPv4 or IPv6 interface' % address))
[ "def", "ip_interface", "(", "address", ")", ":", "try", ":", "return", "IPv4Interface", "(", "address", ")", "except", "(", "AddressValueError", ",", "NetmaskValueError", ")", ":", "pass", "try", ":", "return", "IPv6Interface", "(", "address", ")", "except", "(", "AddressValueError", ",", "NetmaskValueError", ")", ":", "pass", "raise", "ValueError", "(", "(", "'%r does not appear to be an IPv4 or IPv6 interface'", "%", "address", ")", ")" ]
take an ip string/int and return an object of the correct type .
train
true
22,944
def etag_response_processor(page, request, response): etag = page.etag(request) if (etag is not None): response[u'ETag'] = ((u'"' + etag) + u'"')
[ "def", "etag_response_processor", "(", "page", ",", "request", ",", "response", ")", ":", "etag", "=", "page", ".", "etag", "(", "request", ")", "if", "(", "etag", "is", "not", "None", ")", ":", "response", "[", "u'ETag'", "]", "=", "(", "(", "u'\"'", "+", "etag", ")", "+", "u'\"'", ")" ]
response processor to set an etag header on outgoing responses .
train
false
22,945
def document(): return s3_rest_controller()
[ "def", "document", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
22,947
def parseproxyuri(proxyurl): groups = PROXY_REGEX.match(proxyurl).groups() return (groups[1], groups[3], groups[4])
[ "def", "parseproxyuri", "(", "proxyurl", ")", ":", "groups", "=", "PROXY_REGEX", ".", "match", "(", "proxyurl", ")", ".", "groups", "(", ")", "return", "(", "groups", "[", "1", "]", ",", "groups", "[", "3", "]", ",", "groups", "[", "4", "]", ")" ]
parses a http proxy uri in the format x://a .
train
false
22,949
def sum_smallest(x, k): x = Expression.cast_to_const(x) return (- sum_largest((- x), k))
[ "def", "sum_smallest", "(", "x", ",", "k", ")", ":", "x", "=", "Expression", ".", "cast_to_const", "(", "x", ")", "return", "(", "-", "sum_largest", "(", "(", "-", "x", ")", ",", "k", ")", ")" ]
sum of the smallest k values .
train
false
22,950
def test_install_from_future_wheel_version(script, data): from tests.lib import TestFailure package = data.packages.join('futurewheel-3.0-py2.py3-none-any.whl') result = script.pip('install', package, '--no-index', expect_error=True) with pytest.raises(TestFailure): result.assert_installed('futurewheel', without_egg_link=True, editable=False) package = data.packages.join('futurewheel-1.9-py2.py3-none-any.whl') result = script.pip('install', package, '--no-index', expect_error=False, expect_stderr=True) result.assert_installed('futurewheel', without_egg_link=True, editable=False)
[ "def", "test_install_from_future_wheel_version", "(", "script", ",", "data", ")", ":", "from", "tests", ".", "lib", "import", "TestFailure", "package", "=", "data", ".", "packages", ".", "join", "(", "'futurewheel-3.0-py2.py3-none-any.whl'", ")", "result", "=", "script", ".", "pip", "(", "'install'", ",", "package", ",", "'--no-index'", ",", "expect_error", "=", "True", ")", "with", "pytest", ".", "raises", "(", "TestFailure", ")", ":", "result", ".", "assert_installed", "(", "'futurewheel'", ",", "without_egg_link", "=", "True", ",", "editable", "=", "False", ")", "package", "=", "data", ".", "packages", ".", "join", "(", "'futurewheel-1.9-py2.py3-none-any.whl'", ")", "result", "=", "script", ".", "pip", "(", "'install'", ",", "package", ",", "'--no-index'", ",", "expect_error", "=", "False", ",", "expect_stderr", "=", "True", ")", "result", ".", "assert_installed", "(", "'futurewheel'", ",", "without_egg_link", "=", "True", ",", "editable", "=", "False", ")" ]
test installing a future wheel .
train
false
22,952
def getTransformedOutlineByPaths(paths, xmlElement, yAxisPointingUpward): aroundsFromPaths = intercircle.getAroundsFromPaths(paths, getStrokeRadius(xmlElement)) return getChainMatrixSVGIfNecessary(xmlElement, yAxisPointingUpward).getTransformedPaths(aroundsFromPaths)
[ "def", "getTransformedOutlineByPaths", "(", "paths", ",", "xmlElement", ",", "yAxisPointingUpward", ")", ":", "aroundsFromPaths", "=", "intercircle", ".", "getAroundsFromPaths", "(", "paths", ",", "getStrokeRadius", "(", "xmlElement", ")", ")", "return", "getChainMatrixSVGIfNecessary", "(", "xmlElement", ",", "yAxisPointingUpward", ")", ".", "getTransformedPaths", "(", "aroundsFromPaths", ")" ]
get the outline from the paths .
train
false
22,954
def _validate_image(values, mandatory_status=True): if mandatory_status: status = values.get('status') if (not status): msg = 'Image status is required.' raise exception.Invalid(msg) if (status not in STATUSES): msg = ("Invalid image status '%s' for image." % status) raise exception.Invalid(msg) _validate_db_int(min_disk=values.get('min_disk'), min_ram=values.get('min_ram')) return values
[ "def", "_validate_image", "(", "values", ",", "mandatory_status", "=", "True", ")", ":", "if", "mandatory_status", ":", "status", "=", "values", ".", "get", "(", "'status'", ")", "if", "(", "not", "status", ")", ":", "msg", "=", "'Image status is required.'", "raise", "exception", ".", "Invalid", "(", "msg", ")", "if", "(", "status", "not", "in", "STATUSES", ")", ":", "msg", "=", "(", "\"Invalid image status '%s' for image.\"", "%", "status", ")", "raise", "exception", ".", "Invalid", "(", "msg", ")", "_validate_db_int", "(", "min_disk", "=", "values", ".", "get", "(", "'min_disk'", ")", ",", "min_ram", "=", "values", ".", "get", "(", "'min_ram'", ")", ")", "return", "values" ]
validates the incoming data and raises a invalid exception if anything is out of order .
train
false
22,956
def collect_shared_vars(expressions): if isinstance(expressions, theano.Variable): expressions = [expressions] return [v for v in theano.gof.graph.inputs(reversed(expressions)) if isinstance(v, theano.compile.SharedVariable)]
[ "def", "collect_shared_vars", "(", "expressions", ")", ":", "if", "isinstance", "(", "expressions", ",", "theano", ".", "Variable", ")", ":", "expressions", "=", "[", "expressions", "]", "return", "[", "v", "for", "v", "in", "theano", ".", "gof", ".", "graph", ".", "inputs", "(", "reversed", "(", "expressions", ")", ")", "if", "isinstance", "(", "v", ",", "theano", ".", "compile", ".", "SharedVariable", ")", "]" ]
returns all shared variables the given expression(s) depend on .
train
false
22,957
def filepath_to_uri(path): if (path is None): return path return quote(force_bytes(path).replace('\\', '/'), safe="/~!*()'")
[ "def", "filepath_to_uri", "(", "path", ")", ":", "if", "(", "path", "is", "None", ")", ":", "return", "path", "return", "quote", "(", "force_bytes", "(", "path", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ",", "safe", "=", "\"/~!*()'\"", ")" ]
convert a file system path to a uri portion that is suitable for inclusion in a url .
train
false
22,958
def _extract_docstring(node): doc_node = None if (node is None): pass elif isinstance(node, Nodes.ExprStatNode): if node.expr.is_string_literal: doc_node = node.expr node = Nodes.StatListNode(node.pos, stats=[]) elif (isinstance(node, Nodes.StatListNode) and node.stats): stats = node.stats if isinstance(stats[0], Nodes.ExprStatNode): if stats[0].expr.is_string_literal: doc_node = stats[0].expr del stats[0] if (doc_node is None): doc = None elif isinstance(doc_node, ExprNodes.BytesNode): warning(node.pos, 'Python 3 requires docstrings to be unicode strings') doc = doc_node.value elif isinstance(doc_node, ExprNodes.StringNode): doc = doc_node.unicode_value if (doc is None): doc = doc_node.value else: doc = doc_node.value return (doc, node)
[ "def", "_extract_docstring", "(", "node", ")", ":", "doc_node", "=", "None", "if", "(", "node", "is", "None", ")", ":", "pass", "elif", "isinstance", "(", "node", ",", "Nodes", ".", "ExprStatNode", ")", ":", "if", "node", ".", "expr", ".", "is_string_literal", ":", "doc_node", "=", "node", ".", "expr", "node", "=", "Nodes", ".", "StatListNode", "(", "node", ".", "pos", ",", "stats", "=", "[", "]", ")", "elif", "(", "isinstance", "(", "node", ",", "Nodes", ".", "StatListNode", ")", "and", "node", ".", "stats", ")", ":", "stats", "=", "node", ".", "stats", "if", "isinstance", "(", "stats", "[", "0", "]", ",", "Nodes", ".", "ExprStatNode", ")", ":", "if", "stats", "[", "0", "]", ".", "expr", ".", "is_string_literal", ":", "doc_node", "=", "stats", "[", "0", "]", ".", "expr", "del", "stats", "[", "0", "]", "if", "(", "doc_node", "is", "None", ")", ":", "doc", "=", "None", "elif", "isinstance", "(", "doc_node", ",", "ExprNodes", ".", "BytesNode", ")", ":", "warning", "(", "node", ".", "pos", ",", "'Python 3 requires docstrings to be unicode strings'", ")", "doc", "=", "doc_node", ".", "value", "elif", "isinstance", "(", "doc_node", ",", "ExprNodes", ".", "StringNode", ")", ":", "doc", "=", "doc_node", ".", "unicode_value", "if", "(", "doc", "is", "None", ")", ":", "doc", "=", "doc_node", ".", "value", "else", ":", "doc", "=", "doc_node", ".", "value", "return", "(", "doc", ",", "node", ")" ]
extract a docstring from a statement or from the first statement in a list .
train
false
22,959
def match_pattern(nm, patterns): patterns = coerce_to_list(patterns) for pat in patterns: if fnmatch.fnmatch(nm, pat): return True return False
[ "def", "match_pattern", "(", "nm", ",", "patterns", ")", ":", "patterns", "=", "coerce_to_list", "(", "patterns", ")", "for", "pat", "in", "patterns", ":", "if", "fnmatch", ".", "fnmatch", "(", "nm", ",", "pat", ")", ":", "return", "True", "return", "False" ]
compares nm with the supplied patterns .
train
true
22,960
@contextlib.contextmanager def uncache(*names): for name in names: if (name in ('sys', 'marshal', 'imp')): raise ValueError('cannot uncache {0} as it will break _importlib'.format(name)) try: del sys.modules[name] except KeyError: pass try: (yield) finally: for name in names: try: del sys.modules[name] except KeyError: pass
[ "@", "contextlib", ".", "contextmanager", "def", "uncache", "(", "*", "names", ")", ":", "for", "name", "in", "names", ":", "if", "(", "name", "in", "(", "'sys'", ",", "'marshal'", ",", "'imp'", ")", ")", ":", "raise", "ValueError", "(", "'cannot uncache {0} as it will break _importlib'", ".", "format", "(", "name", ")", ")", "try", ":", "del", "sys", ".", "modules", "[", "name", "]", "except", "KeyError", ":", "pass", "try", ":", "(", "yield", ")", "finally", ":", "for", "name", "in", "names", ":", "try", ":", "del", "sys", ".", "modules", "[", "name", "]", "except", "KeyError", ":", "pass" ]
uncache a module from sys .
train
false
22,961
def _get_volume_id(path_or_id): if isinstance(path_or_id, int): return path_or_id volume_id = path_or_id[(path_or_id.find(':volume-') + 1):] if (volume_id == path_or_id): volume_id = path_or_id[(path_or_id.find('-volume--') + 1):] volume_id = volume_id.replace('volume--', '') else: volume_id = volume_id.replace('volume-', '') volume_id = volume_id[0:volume_id.find('-')] return int(volume_id)
[ "def", "_get_volume_id", "(", "path_or_id", ")", ":", "if", "isinstance", "(", "path_or_id", ",", "int", ")", ":", "return", "path_or_id", "volume_id", "=", "path_or_id", "[", "(", "path_or_id", ".", "find", "(", "':volume-'", ")", "+", "1", ")", ":", "]", "if", "(", "volume_id", "==", "path_or_id", ")", ":", "volume_id", "=", "path_or_id", "[", "(", "path_or_id", ".", "find", "(", "'-volume--'", ")", "+", "1", ")", ":", "]", "volume_id", "=", "volume_id", ".", "replace", "(", "'volume--'", ",", "''", ")", "else", ":", "volume_id", "=", "volume_id", ".", "replace", "(", "'volume-'", ",", "''", ")", "volume_id", "=", "volume_id", "[", "0", ":", "volume_id", ".", "find", "(", "'-'", ")", "]", "return", "int", "(", "volume_id", ")" ]
retrieve the volume id from device_path .
train
false