id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
32,963
def get_item_similarity(reference_exp_category, reference_exp_language_code, reference_exp_owner_ids, compared_exp_category, compared_exp_language_code, compared_exp_last_updated, compared_exp_owner_ids, compared_exp_status): similarity_score = 0 if (compared_exp_status == rights_manager.ACTIVITY_STATUS_PRIVATE): return 0 elif (compared_exp_status == rights_manager.ACTIVITY_STATUS_PUBLICIZED): similarity_score += 1 similarity_score += (get_topic_similarity(reference_exp_category, compared_exp_category) * 5) if (reference_exp_owner_ids == compared_exp_owner_ids): similarity_score += 1 if (reference_exp_language_code == compared_exp_language_code): similarity_score += 2 time_now = datetime.datetime.utcnow() time_delta_days = int((time_now - compared_exp_last_updated).days) if (time_delta_days <= 7): similarity_score += 1 return similarity_score
[ "def", "get_item_similarity", "(", "reference_exp_category", ",", "reference_exp_language_code", ",", "reference_exp_owner_ids", ",", "compared_exp_category", ",", "compared_exp_language_code", ",", "compared_exp_last_updated", ",", "compared_exp_owner_ids", ",", "compared_exp_stat...
returns the ranking of compared_exp to reference_exp as a recommendation .
train
false
32,965
def add_or_update_enrollment_attr(user_id, course_id, attributes): for attribute in attributes: _ENROLLMENT_ATTRIBUTES.append({'namespace': attribute['namespace'], 'name': attribute['name'], 'value': attribute['value']})
[ "def", "add_or_update_enrollment_attr", "(", "user_id", ",", "course_id", ",", "attributes", ")", ":", "for", "attribute", "in", "attributes", ":", "_ENROLLMENT_ATTRIBUTES", ".", "append", "(", "{", "'namespace'", ":", "attribute", "[", "'namespace'", "]", ",", ...
set enrollment attributes for the enrollment of given user in the course provided .
train
false
32,968
def grepValue(fn, variable): variable = (variable + '=') for ln in open(fn, 'r'): if ln.startswith(variable): value = ln[len(variable):].strip() return value.strip('"\'') raise RuntimeError(('Cannot find variable %s' % variable[:(-1)]))
[ "def", "grepValue", "(", "fn", ",", "variable", ")", ":", "variable", "=", "(", "variable", "+", "'='", ")", "for", "ln", "in", "open", "(", "fn", ",", "'r'", ")", ":", "if", "ln", ".", "startswith", "(", "variable", ")", ":", "value", "=", "ln",...
return the unquoted value of a variable from a file .
train
false
32,969
def generate_control_file(tests=(), kernel=None, label=None, profilers=(), client_control_file='', use_container=False, profile_only=None, upload_kernel_config=False): if ((not tests) and (not client_control_file)): return dict(control_file='', is_server=False, synch_count=1, dependencies=[]) (cf_info, test_objects, profiler_objects, label) = rpc_utils.prepare_generate_control_file(tests, kernel, label, profilers) cf_info['control_file'] = control_file.generate_control(tests=test_objects, kernels=kernel, platform=label, profilers=profiler_objects, is_server=cf_info['is_server'], client_control_file=client_control_file, profile_only=profile_only, upload_kernel_config=upload_kernel_config) return cf_info
[ "def", "generate_control_file", "(", "tests", "=", "(", ")", ",", "kernel", "=", "None", ",", "label", "=", "None", ",", "profilers", "=", "(", ")", ",", "client_control_file", "=", "''", ",", "use_container", "=", "False", ",", "profile_only", "=", "Non...
generates a client-side control file to load a kernel and run tests .
train
false
32,971
def vfs_normpath(path): (slash, dot) = ((u'/', u'.') if isinstance(path, unicode) else ('/', '.')) if (path == ''): return dot initial_slashes = path.startswith('/') if (initial_slashes and path.startswith('//') and (not path.startswith('///'))): initial_slashes = 2 comps = path.split('/') new_comps = [] for comp in comps: if (comp in ('', '.')): continue if ((comp != '..') or ((not initial_slashes) and (not new_comps)) or (new_comps and (new_comps[(-1)] == '..'))): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = slash.join(comps) if initial_slashes: path = ((slash * initial_slashes) + path) return (path or dot)
[ "def", "vfs_normpath", "(", "path", ")", ":", "(", "slash", ",", "dot", ")", "=", "(", "(", "u'/'", ",", "u'.'", ")", "if", "isinstance", "(", "path", ",", "unicode", ")", "else", "(", "'/'", ",", "'.'", ")", ")", "if", "(", "path", "==", "''",...
normalize path from posixpath .
train
false
32,972
@app.errorhandler(http_client.INTERNAL_SERVER_ERROR) def unexpected_error(e): logging.exception('An error occured while processing the request.') response = jsonify({'code': http_client.INTERNAL_SERVER_ERROR, 'message': 'Exception: {}'.format(e)}) response.status_code = http_client.INTERNAL_SERVER_ERROR return response
[ "@", "app", ".", "errorhandler", "(", "http_client", ".", "INTERNAL_SERVER_ERROR", ")", "def", "unexpected_error", "(", "e", ")", ":", "logging", ".", "exception", "(", "'An error occured while processing the request.'", ")", "response", "=", "jsonify", "(", "{", ...
handle exceptions by returning swagger-compliant json .
train
false
32,973
def _normalize_params(params): if (type(params) == dict): params = list(params.items()) params = [(k, v) for (k, v) in params if (k not in ('oauth_signature', 'realm'))] params.sort() qs = parse.urlencode(params) qs = qs.replace('+', '%20') qs = qs.replace('%7E', '~') return qs
[ "def", "_normalize_params", "(", "params", ")", ":", "if", "(", "type", "(", "params", ")", "==", "dict", ")", ":", "params", "=", "list", "(", "params", ".", "items", "(", ")", ")", "params", "=", "[", "(", "k", ",", "v", ")", "for", "(", "k",...
returns a normalized query string sorted first by key .
train
false
32,974
def getreader(encoding): return lookup(encoding).streamreader
[ "def", "getreader", "(", "encoding", ")", ":", "return", "lookup", "(", "encoding", ")", ".", "streamreader" ]
lookup up the codec for the given encoding and return its streamreader class or factory function .
train
false
32,975
def element_style(attrs, style_def, parent_style): style = parent_style.copy() if ('class' in attrs): for css_class in attrs['class'].split(): css_style = style_def[('.' + css_class)] style.update(css_style) if ('style' in attrs): immediate_style = dumb_property_dict(attrs['style']) style.update(immediate_style) return style
[ "def", "element_style", "(", "attrs", ",", "style_def", ",", "parent_style", ")", ":", "style", "=", "parent_style", ".", "copy", "(", ")", "if", "(", "'class'", "in", "attrs", ")", ":", "for", "css_class", "in", "attrs", "[", "'class'", "]", ".", "spl...
returns a hash of the final style attributes of the element .
train
true
32,977
@aborts def test_require_multiple_missing_keys(): require('foo', 'bar')
[ "@", "aborts", "def", "test_require_multiple_missing_keys", "(", ")", ":", "require", "(", "'foo'", ",", "'bar'", ")" ]
when given multiple non-existent keys .
train
false
32,979
def test_suggested_cased_column_names(cased_completer, complete_event): text = u'SELECT from users' position = len(u'SELECT ') result = set(cased_completer.get_completions(Document(text=text, cursor_position=position), complete_event)) assert (set(result) == set((((cased_funcs + cased_users_cols) + testdata.builtin_functions()) + testdata.keywords())))
[ "def", "test_suggested_cased_column_names", "(", "cased_completer", ",", "complete_event", ")", ":", "text", "=", "u'SELECT from users'", "position", "=", "len", "(", "u'SELECT '", ")", "result", "=", "set", "(", "cased_completer", ".", "get_completions", "(", "Doc...
suggest column and function names when selecting from table .
train
false
32,980
def format_date(utc, isoformat=False): u = datetime.strptime(utc.replace('+0000', 'UTC'), '%a %b %d %H:%M:%S %Z %Y') unew = datetime.combine(u.date(), time(u.time().hour, u.time().minute, u.time().second, tzinfo=UTC)) unew = unew.astimezone(Local) if isoformat: return unew.isoformat() else: return unew.strftime('%Y-%m-%d %H:%M:%S %Z')
[ "def", "format_date", "(", "utc", ",", "isoformat", "=", "False", ")", ":", "u", "=", "datetime", ".", "strptime", "(", "utc", ".", "replace", "(", "'+0000'", ",", "'UTC'", ")", ",", "'%a %b %d %H:%M:%S %Z %Y'", ")", "unew", "=", "datetime", ".", "combin...
return a date formatted as a string using a subset of qts formatting codes .
train
false
32,981
@contextmanager def preserve_stty_settings(): stty_settings = STTYSettings() stty_settings.save_stty_options() (yield) stty_settings.restore_ssty_options()
[ "@", "contextmanager", "def", "preserve_stty_settings", "(", ")", ":", "stty_settings", "=", "STTYSettings", "(", ")", "stty_settings", ".", "save_stty_options", "(", ")", "(", "yield", ")", "stty_settings", ".", "restore_ssty_options", "(", ")" ]
run potentially stty-modifying operations .
train
false
32,982
def getCharacterIntegerString(character, offset, splitLine, step): floatValue = getFloatFromCharacterSplitLine(character, splitLine) if (floatValue == None): return None floatValue += offset integerValue = int(round(float((floatValue / step)))) return (character + str(integerValue))
[ "def", "getCharacterIntegerString", "(", "character", ",", "offset", ",", "splitLine", ",", "step", ")", ":", "floatValue", "=", "getFloatFromCharacterSplitLine", "(", "character", ",", "splitLine", ")", "if", "(", "floatValue", "==", "None", ")", ":", "return",...
get a character and integer string .
train
false
32,983
def load_settings(path): if os.path.exists(path): comments = (lambda s: (s and (not s.startswith('#')))) settings = filter(comments, open(path, 'r')) return dict(((k.strip(), v.strip()) for (k, _, v) in [s.partition('=') for s in settings])) return {}
[ "def", "load_settings", "(", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "comments", "=", "(", "lambda", "s", ":", "(", "s", "and", "(", "not", "s", ".", "startswith", "(", "'#'", ")", ")", ")", ")", "settin...
take given file path and return dictionary of any key=value pairs found .
train
false
32,985
def get_queue(name=DEFAULT_QUEUE_NAME): global _queues fullname = add_queue_name_prefix(name) try: return _queues[fullname] except KeyError: log.debug(u'Initializing background job queue "{}"'.format(name)) redis_conn = _connect() queue = _queues[fullname] = rq.Queue(fullname, connection=redis_conn) return queue
[ "def", "get_queue", "(", "name", "=", "DEFAULT_QUEUE_NAME", ")", ":", "global", "_queues", "fullname", "=", "add_queue_name_prefix", "(", "name", ")", "try", ":", "return", "_queues", "[", "fullname", "]", "except", "KeyError", ":", "log", ".", "debug", "(",...
returns a queue object tied to a redis connection .
train
false
32,986
def solarize(image, threshold=128): lut = [] for i in range(256): if (i < threshold): lut.append(i) else: lut.append((255 - i)) return _lut(image, lut)
[ "def", "solarize", "(", "image", ",", "threshold", "=", "128", ")", ":", "lut", "=", "[", "]", "for", "i", "in", "range", "(", "256", ")", ":", "if", "(", "i", "<", "threshold", ")", ":", "lut", ".", "append", "(", "i", ")", "else", ":", "lut...
invert all pixel values above a threshold .
train
false
32,989
def msigma(i): if (i == 1): mat = ((0, 1), (1, 0)) elif (i == 2): mat = ((0, (- I)), (I, 0)) elif (i == 3): mat = ((1, 0), (0, (-1))) else: raise IndexError('Invalid Pauli index') return Matrix(mat)
[ "def", "msigma", "(", "i", ")", ":", "if", "(", "i", "==", "1", ")", ":", "mat", "=", "(", "(", "0", ",", "1", ")", ",", "(", "1", ",", "0", ")", ")", "elif", "(", "i", "==", "2", ")", ":", "mat", "=", "(", "(", "0", ",", "(", "-", ...
returns a pauli matrix sigma_i with i=1 .
train
false
32,992
def check_impl_detail(**guards): (guards, default) = _parse_guards(guards) return guards.get(platform.python_implementation().lower(), default)
[ "def", "check_impl_detail", "(", "**", "guards", ")", ":", "(", "guards", ",", "default", ")", "=", "_parse_guards", "(", "guards", ")", "return", "guards", ".", "get", "(", "platform", ".", "python_implementation", "(", ")", ".", "lower", "(", ")", ",",...
this function returns true or false depending on the host platform .
train
false
32,993
def yaml_to_workflow(has_yaml, galaxy_interface, workflow_directory): as_python = yaml.load(has_yaml) return python_to_workflow(as_python, galaxy_interface, workflow_directory)
[ "def", "yaml_to_workflow", "(", "has_yaml", ",", "galaxy_interface", ",", "workflow_directory", ")", ":", "as_python", "=", "yaml", ".", "load", "(", "has_yaml", ")", "return", "python_to_workflow", "(", "as_python", ",", "galaxy_interface", ",", "workflow_directory...
convert a format 2 workflow into standard galaxy format from supplied stream .
train
false
32,994
def tileset_exists(filename): if (not exists(filename)): return False db = _connect(filename) db.text_factory = bytes try: db.execute('SELECT name, value FROM metadata LIMIT 1') db.execute('SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles LIMIT 1') except: return False return True
[ "def", "tileset_exists", "(", "filename", ")", ":", "if", "(", "not", "exists", "(", "filename", ")", ")", ":", "return", "False", "db", "=", "_connect", "(", "filename", ")", "db", ".", "text_factory", "=", "bytes", "try", ":", "db", ".", "execute", ...
return true if the tileset exists and appears to have the right tables .
train
false
32,996
def clean_session(): if os.path.exists(SESSION_FILE): os.remove(SESSION_FILE)
[ "def", "clean_session", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "SESSION_FILE", ")", ":", "os", ".", "remove", "(", "SESSION_FILE", ")" ]
removes previous session file .
train
false
32,997
def all_classes_in_module(module_name): assert (sys.version_info >= (2, 7)) import importlib module = importlib.import_module(module_name) objects = [getattr(module, obj_name) for obj_name in dir(module) if (getattr(getattr(module, obj_name), '__module__', '') == module_name)] classes = filter((lambda obj: (isinstance(obj, object) and (not isinstance(obj, types.FunctionType)))), objects) return classes
[ "def", "all_classes_in_module", "(", "module_name", ")", ":", "assert", "(", "sys", ".", "version_info", ">=", "(", "2", ",", "7", ")", ")", "import", "importlib", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "objects", "=", "...
returns all classes defined in the given module .
train
false
32,998
def new_public_project(email): from website.models import Node node = Node.load(email.data['nid']) if (not node): return False public = email.find_sent_of_same_type_and_user() return (node.is_public and (not len(public)))
[ "def", "new_public_project", "(", "email", ")", ":", "from", "website", ".", "models", "import", "Node", "node", "=", "Node", ".", "load", "(", "email", ".", "data", "[", "'nid'", "]", ")", "if", "(", "not", "node", ")", ":", "return", "False", "publ...
will check to make sure the project that triggered this presend is still public before sending the email .
train
false
32,999
def memory(since=0.0): ans = get_memory() ans /= float((1024 ** 2)) return (ans - since)
[ "def", "memory", "(", "since", "=", "0.0", ")", ":", "ans", "=", "get_memory", "(", ")", "ans", "/=", "float", "(", "(", "1024", "**", "2", ")", ")", "return", "(", "ans", "-", "since", ")" ]
return memory usage in bytes .
train
false
33,000
def read_float_matrix(fid, rows, cols): return _unpack_matrix(fid, rows, cols, dtype='>f4', out_dtype=np.float32)
[ "def", "read_float_matrix", "(", "fid", ",", "rows", ",", "cols", ")", ":", "return", "_unpack_matrix", "(", "fid", ",", "rows", ",", "cols", ",", "dtype", "=", "'>f4'", ",", "out_dtype", "=", "np", ".", "float32", ")" ]
read 32bit float matrix from bti file .
train
false
33,001
def convert_str(text): if six.PY2: return encodeutils.to_utf8(text) elif isinstance(text, bytes): return text.decode('utf-8') else: return text
[ "def", "convert_str", "(", "text", ")", ":", "if", "six", ".", "PY2", ":", "return", "encodeutils", ".", "to_utf8", "(", "text", ")", "elif", "isinstance", "(", "text", ",", "bytes", ")", ":", "return", "text", ".", "decode", "(", "'utf-8'", ")", "el...
convert to native string .
train
false
33,003
def unfollow_log(module, le_path, logs): removed_count = 0 for log in logs: if (not query_log_status(module, le_path, log)): continue if module.check_mode: module.exit_json(changed=True) (rc, out, err) = module.run_command([le_path, 'rm', log]) if query_log_status(module, le_path, log): module.fail_json(msg=("failed to remove '%s': %s" % (log, err.strip()))) removed_count += 1 if (removed_count > 0): module.exit_json(changed=True, msg=('removed %d package(s)' % removed_count)) module.exit_json(changed=False, msg='logs(s) already unfollowed')
[ "def", "unfollow_log", "(", "module", ",", "le_path", ",", "logs", ")", ":", "removed_count", "=", "0", "for", "log", "in", "logs", ":", "if", "(", "not", "query_log_status", "(", "module", ",", "le_path", ",", "log", ")", ")", ":", "continue", "if", ...
unfollows one or more logs if followed .
train
false
33,004
def median_high(name, num, minimum=0, maximum=0, ref=None): return calc(name, num, 'median_high', ref)
[ "def", "median_high", "(", "name", ",", "num", ",", "minimum", "=", "0", ",", "maximum", "=", "0", ",", "ref", "=", "None", ")", ":", "return", "calc", "(", "name", ",", "num", ",", "'median_high'", ",", "ref", ")" ]
return the high median of data .
train
false
33,005
def to_datetime(value): return (epoch + timedelta(seconds=value))
[ "def", "to_datetime", "(", "value", ")", ":", "return", "(", "epoch", "+", "timedelta", "(", "seconds", "=", "value", ")", ")" ]
convert a posix timestamp to a time zone aware datetime .
train
false
33,006
def charset_to_encoding(name): if (name == 'utf8mb4'): return 'utf8' return name
[ "def", "charset_to_encoding", "(", "name", ")", ":", "if", "(", "name", "==", "'utf8mb4'", ")", ":", "return", "'utf8'", "return", "name" ]
convert mysqls charset name to pythons codec name .
train
false
33,007
def epsilonCheck(x, epsilon=1e-06): epsilon = abs(epsilon) return ((- epsilon) < x < epsilon)
[ "def", "epsilonCheck", "(", "x", ",", "epsilon", "=", "1e-06", ")", ":", "epsilon", "=", "abs", "(", "epsilon", ")", "return", "(", "(", "-", "epsilon", ")", "<", "x", "<", "epsilon", ")" ]
checks that x is in .
train
false
33,009
def argv_to_str(command_argv, quote=True): map_func = (shlex_quote if quote else (lambda x: x)) return ' '.join([map_func(c) for c in command_argv if (c is not None)])
[ "def", "argv_to_str", "(", "command_argv", ",", "quote", "=", "True", ")", ":", "map_func", "=", "(", "shlex_quote", "if", "quote", "else", "(", "lambda", "x", ":", "x", ")", ")", "return", "' '", ".", "join", "(", "[", "map_func", "(", "c", ")", "...
convert an argv command list to a string for shell subprocess .
train
false
33,010
def shelve_open(filename, flag='c', protocol=None, writeback=False): import dumbdbm return shelve.Shelf(dumbdbm.open(filename, flag), protocol, writeback)
[ "def", "shelve_open", "(", "filename", ",", "flag", "=", "'c'", ",", "protocol", "=", "None", ",", "writeback", "=", "False", ")", ":", "import", "dumbdbm", "return", "shelve", ".", "Shelf", "(", "dumbdbm", ".", "open", "(", "filename", ",", "flag", ")...
a more system-portable wrapper around shelve .
train
false
33,011
def get_test_descriptor_system(): field_data = DictFieldData({}) descriptor_system = MakoDescriptorSystem(load_item=Mock(name='get_test_descriptor_system.load_item'), resources_fs=Mock(name='get_test_descriptor_system.resources_fs'), error_tracker=Mock(name='get_test_descriptor_system.error_tracker'), render_template=mock_render_template, mixins=(InheritanceMixin, XModuleMixin), field_data=field_data, services={'field-data': field_data}) descriptor_system.get_asides = (lambda block: []) return descriptor_system
[ "def", "get_test_descriptor_system", "(", ")", ":", "field_data", "=", "DictFieldData", "(", "{", "}", ")", "descriptor_system", "=", "MakoDescriptorSystem", "(", "load_item", "=", "Mock", "(", "name", "=", "'get_test_descriptor_system.load_item'", ")", ",", "resour...
construct a test descriptorsystem instance .
train
false
33,012
def usage(msg=None): global __doc__ if (msg is not None): print >>sys.stderr, msg print >>sys.stderr, (__doc__ % dict(PROG_NAME=PROG_NAME)) sys.exit(1)
[ "def", "usage", "(", "msg", "=", "None", ")", ":", "global", "__doc__", "if", "(", "msg", "is", "not", "None", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "msg", "print", ">>", "sys", ".", "stderr", ",", "(", "__doc__", "%", "dict", "(", ...
prints the usage .
train
false
33,013
def check_newest_version(hass, huuid): result = get_newest_version(huuid) if (result is None): return (newest, releasenotes) = result if ((newest is None) or ('dev' in CURRENT_VERSION)): return if (StrictVersion(newest) > StrictVersion(CURRENT_VERSION)): _LOGGER.info('The latest available version is %s', newest) hass.states.set(ENTITY_ID, newest, {ATTR_FRIENDLY_NAME: 'Update Available', ATTR_RELEASE_NOTES: releasenotes}) elif (StrictVersion(newest) == StrictVersion(CURRENT_VERSION)): _LOGGER.info('You are on the latest version (%s) of Home Assistant', newest)
[ "def", "check_newest_version", "(", "hass", ",", "huuid", ")", ":", "result", "=", "get_newest_version", "(", "huuid", ")", "if", "(", "result", "is", "None", ")", ":", "return", "(", "newest", ",", "releasenotes", ")", "=", "result", "if", "(", "(", "...
check if a new version is available and report if one is .
train
false
33,014
def calc_exact_neighbors(X, queries, n_queries, n_neighbors): print ('Building NearestNeighbors for %d samples in %d dimensions' % (X.shape[0], X.shape[1])) nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) average_time = 0 t0 = time() neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors, return_distance=False) average_time = ((time() - t0) / n_queries) return (neighbors, average_time)
[ "def", "calc_exact_neighbors", "(", "X", ",", "queries", ",", "n_queries", ",", "n_neighbors", ")", ":", "print", "(", "'Building NearestNeighbors for %d samples in %d dimensions'", "%", "(", "X", ".", "shape", "[", "0", "]", ",", "X", ".", "shape", "[", "1", ...
measures average times for exact neighbor queries .
train
false
33,015
def parse_attribute_map(filenames): forward = {} backward = {} for filename in filenames: for line in open(filename).readlines(): (name, friendly_name, name_format) = line.strip().split() forward[(name, name_format)] = friendly_name backward[friendly_name] = (name, name_format) return (forward, backward)
[ "def", "parse_attribute_map", "(", "filenames", ")", ":", "forward", "=", "{", "}", "backward", "=", "{", "}", "for", "filename", "in", "filenames", ":", "for", "line", "in", "open", "(", "filename", ")", ".", "readlines", "(", ")", ":", "(", "name", ...
expects a file with each line being composed of the oid for the attribute exactly one space .
train
true
33,016
def run_epylint(): from pylint.epylint import Run Run()
[ "def", "run_epylint", "(", ")", ":", "from", "pylint", ".", "epylint", "import", "Run", "Run", "(", ")" ]
run pylint .
train
false
33,017
def fixed_point(func, x0, args=(), xtol=1e-08, maxiter=500, method='del2'): use_accel = {'del2': True, 'iteration': False}[method] x0 = _asarray_validated(x0, as_inexact=True) return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
[ "def", "fixed_point", "(", "func", ",", "x0", ",", "args", "=", "(", ")", ",", "xtol", "=", "1e-08", ",", "maxiter", "=", "500", ",", "method", "=", "'del2'", ")", ":", "use_accel", "=", "{", "'del2'", ":", "True", ",", "'iteration'", ":", "False",...
find a fixed point of the function .
train
false
33,018
def dummyRequest(method, path, headers, body=''): parsed = urlparse(path) if parsed.query: new_path = ((parsed.path + '?') + parsed.query) else: new_path = parsed.path return _DummyRequest(next(_dummyRequestCounter), method, new_path, headers, body)
[ "def", "dummyRequest", "(", "method", ",", "path", ",", "headers", ",", "body", "=", "''", ")", ":", "parsed", "=", "urlparse", "(", "path", ")", "if", "parsed", ".", "query", ":", "new_path", "=", "(", "(", "parsed", ".", "path", "+", "'?'", ")", ...
construct a new dummy l{irequest} provider .
train
false
33,019
def gradle(registry, xml_parent, data): gradle = XML.SubElement(xml_parent, 'hudson.plugins.gradle.Gradle') XML.SubElement(gradle, 'description').text = '' XML.SubElement(gradle, 'tasks').text = data['tasks'] XML.SubElement(gradle, 'buildFile').text = '' XML.SubElement(gradle, 'rootBuildScriptDir').text = data.get('root-build-script-dir', '') XML.SubElement(gradle, 'gradleName').text = data.get('gradle-name', '') XML.SubElement(gradle, 'useWrapper').text = str(data.get('wrapper', False)).lower() XML.SubElement(gradle, 'makeExecutable').text = str(data.get('executable', False)).lower() switch_string = '\n'.join(data.get('switches', [])) XML.SubElement(gradle, 'switches').text = switch_string XML.SubElement(gradle, 'fromRootBuildScriptDir').text = str(data.get('use-root-dir', False)).lower()
[ "def", "gradle", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "gradle", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.gradle.Gradle'", ")", "XML", ".", "SubElement", "(", "gradle", ",", "'description'", ")", ".", "text...
yaml: gradle execute gradle tasks .
train
false
33,021
@mock_ec2 def test_eip_boto3_vpc_association(): session = boto3.session.Session(region_name=u'us-west-1') service = session.resource(u'ec2') client = session.client(u'ec2') vpc_res = client.create_vpc(CidrBlock=u'10.0.0.0/24') subnet_res = client.create_subnet(VpcId=vpc_res[u'Vpc'][u'VpcId'], CidrBlock=u'10.0.0.0/24') instance = service.create_instances(**{u'InstanceType': u't2.micro', u'ImageId': u'ami-test', u'MinCount': 1, u'MaxCount': 1, u'SubnetId': subnet_res[u'Subnet'][u'SubnetId']})[0] allocation_id = client.allocate_address(Domain=u'vpc')[u'AllocationId'] association_id = client.associate_address(InstanceId=instance.id, AllocationId=allocation_id, AllowReassociation=False) instance.load() instance.public_ip_address.should_not.be.none instance.public_dns_name.should_not.be.none
[ "@", "mock_ec2", "def", "test_eip_boto3_vpc_association", "(", ")", ":", "session", "=", "boto3", ".", "session", ".", "Session", "(", "region_name", "=", "u'us-west-1'", ")", "service", "=", "session", ".", "resource", "(", "u'ec2'", ")", "client", "=", "se...
associate eip to vpc instance in a new subnet with boto3 .
train
false
33,022
def reorder_tabs_handler(course_item, request): requested_tab_id_locators = request.json['tabs'] old_tab_list = course_item.tabs new_tab_list = [] for tab_id_locator in requested_tab_id_locators: tab = get_tab_by_tab_id_locator(old_tab_list, tab_id_locator) if (tab is None): return JsonResponse({'error': "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400) new_tab_list.append(tab) non_displayed_tabs = (set(old_tab_list) - set(new_tab_list)) new_tab_list.extend(non_displayed_tabs) try: CourseTabList.validate_tabs(new_tab_list) except InvalidTabsException as exception: return JsonResponse({'error': 'New list of tabs is not valid: {0}.'.format(str(exception))}, status=400) course_item.tabs = new_tab_list modulestore().update_item(course_item, request.user.id) return JsonResponse()
[ "def", "reorder_tabs_handler", "(", "course_item", ",", "request", ")", ":", "requested_tab_id_locators", "=", "request", ".", "json", "[", "'tabs'", "]", "old_tab_list", "=", "course_item", ".", "tabs", "new_tab_list", "=", "[", "]", "for", "tab_id_locator", "i...
helper function for handling reorder of tabs request .
train
false
33,024
def _has_method(arg, method): return (hasattr(arg, method) and callable(getattr(arg, method)))
[ "def", "_has_method", "(", "arg", ",", "method", ")", ":", "return", "(", "hasattr", "(", "arg", ",", "method", ")", "and", "callable", "(", "getattr", "(", "arg", ",", "method", ")", ")", ")" ]
returns true if the given object has a method with the given name .
train
true
33,025
def del_by_key(data, delete): data = list(data) for (idx, item) in enumerate(data): if ((isinstance(item[0], basestring) and (item[0] == delete)) or (isinstance(item[0], (list, tuple)) and (item[0] in delete))): del data[idx] return data
[ "def", "del_by_key", "(", "data", ",", "delete", ")", ":", "data", "=", "list", "(", "data", ")", "for", "(", "idx", ",", "item", ")", "in", "enumerate", "(", "data", ")", ":", "if", "(", "(", "isinstance", "(", "item", "[", "0", "]", ",", "bas...
delete a tuple from a list of tuples based on its first item .
train
false
33,026
def capture_upstart(reactor, host, output_file): results = [] for (directory, service) in [('flocker', 'flocker-control'), ('flocker', 'flocker-dataset-agent'), ('flocker', 'flocker-container-agent'), ('flocker', 'flocker-docker-plugin'), ('upstart', 'docker')]: def pull_logs_for_process(directory=directory, service=service): path = FilePath('/var/log/').child(directory).child((service + '.log')) formatter = TailFormatter(output_file, host, service) ran = run_ssh(reactor=reactor, host=host, username='root', command=['tail', '-F', path.path], handle_stdout=formatter.handle_output_line) ran.addErrback(write_failure, logger=None) ran.addCallback((lambda ignored, formatter=formatter: formatter.handle_output_line(''))) return ran results.append(loop_until(reactor, pull_logs_for_process, repeat(2.0))) return gather_deferreds(results)
[ "def", "capture_upstart", "(", "reactor", ",", "host", ",", "output_file", ")", ":", "results", "=", "[", "]", "for", "(", "directory", ",", "service", ")", "in", "[", "(", "'flocker'", ",", "'flocker-control'", ")", ",", "(", "'flocker'", ",", "'flocker...
ssh into given machine and capture relevant logs .
train
false
33,027
@typecheck(str) def checker(aString): if (aString == ''): return 1 else: return 2
[ "@", "typecheck", "(", "str", ")", "def", "checker", "(", "aString", ")", ":", "if", "(", "aString", "==", "''", ")", ":", "return", "1", "else", ":", "return", "2" ]
decorator to register a function as a checker .
train
false
33,029
def test_sparse_activation(): trainer = yaml_parse.load(test_yaml) trainer.main_loop()
[ "def", "test_sparse_activation", "(", ")", ":", "trainer", "=", "yaml_parse", ".", "load", "(", "test_yaml", ")", "trainer", ".", "main_loop", "(", ")" ]
test autoencoder sparse activation cost .
train
false
33,030
def retrieve_seq_length_op(data): with tf.name_scope('GetLength'): used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2)) length = tf.reduce_sum(used, reduction_indices=1) length = tf.cast(length, tf.int32) return length
[ "def", "retrieve_seq_length_op", "(", "data", ")", ":", "with", "tf", ".", "name_scope", "(", "'GetLength'", ")", ":", "used", "=", "tf", ".", "sign", "(", "tf", ".", "reduce_max", "(", "tf", ".", "abs", "(", "data", ")", ",", "reduction_indices", "=",...
an op to compute the length of a sequence from input shape of [batch_size .
train
true
33,031
def generate_scala_test(target, source, env): target = str(target[0]) test_jar = str(source[0]) jars = [os.path.abspath(str(jar)) for jar in source] test_class_names = _get_all_test_class_names_in_jar(test_jar) return _generate_scala_test(target, jars, test_class_names, env)
[ "def", "generate_scala_test", "(", "target", ",", "source", ",", "env", ")", ":", "target", "=", "str", "(", "target", "[", "0", "]", ")", "test_jar", "=", "str", "(", "source", "[", "0", "]", ")", "jars", "=", "[", "os", ".", "path", ".", "abspa...
generate wrapper shell script for scala test .
train
false
33,032
def set_query_parameter(url, param_name, param_value): (scheme, netloc, path, query_string, fragment) = urlsplit(url) query_params = parse_qs(query_string) query_params[param_name] = [param_value] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
[ "def", "set_query_parameter", "(", "url", ",", "param_name", ",", "param_value", ")", ":", "(", "scheme", ",", "netloc", ",", "path", ",", "query_string", ",", "fragment", ")", "=", "urlsplit", "(", "url", ")", "query_params", "=", "parse_qs", "(", "query_...
given a url .
train
true
33,035
def _collection_rights_to_search_dict(rights): doc = {} if (rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED): doc['is'] = 'featured' return doc
[ "def", "_collection_rights_to_search_dict", "(", "rights", ")", ":", "doc", "=", "{", "}", "if", "(", "rights", ".", "status", "==", "rights_manager", ".", "ACTIVITY_STATUS_PUBLICIZED", ")", ":", "doc", "[", "'is'", "]", "=", "'featured'", "return", "doc" ]
returns a search dict with information about the collection rights .
train
false
33,036
def get_sandbox_virtualenv_path(pack): if (pack in SYSTEM_PACK_NAMES): virtualenv_path = None else: system_base_path = cfg.CONF.system.base_path virtualenv_path = os.path.join(system_base_path, 'virtualenvs', pack) return virtualenv_path
[ "def", "get_sandbox_virtualenv_path", "(", "pack", ")", ":", "if", "(", "pack", "in", "SYSTEM_PACK_NAMES", ")", ":", "virtualenv_path", "=", "None", "else", ":", "system_base_path", "=", "cfg", ".", "CONF", ".", "system", ".", "base_path", "virtualenv_path", "...
return a path to the virtual environment for the provided pack .
train
false
33,037
def permissions_required(permissions, login_url=None): if (login_url is None): login_url = reverse_lazy('customer:login') def _check_permissions(user): outcome = check_permissions(user, permissions) if ((not outcome) and user.is_authenticated()): raise PermissionDenied else: return outcome return user_passes_test(_check_permissions, login_url=login_url)
[ "def", "permissions_required", "(", "permissions", ",", "login_url", "=", "None", ")", ":", "if", "(", "login_url", "is", "None", ")", ":", "login_url", "=", "reverse_lazy", "(", "'customer:login'", ")", "def", "_check_permissions", "(", "user", ")", ":", "o...
decorator that checks if a user has the given permissions .
train
false
33,038
def collect_storage_host(hosts): message = '\nSetting up high-availability masters requires a storage host. Please provide a\nhost that will be configured as a Registry Storage.\n\nNote: Containerized storage hosts are not currently supported.\n' click.echo(message) host_props = {} first_master = next((host for host in hosts if host.is_master())) hostname_or_ip = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_hostname, default=first_master.connect_to) (existing, existing_host) = is_host_already_node_or_master(hostname_or_ip, hosts) if (existing and existing_host.is_node()): existing_host.roles.append('storage') else: host_props['connect_to'] = hostname_or_ip host_props['preconfigured'] = False host_props['roles'] = ['storage'] storage = Host(**host_props) hosts.append(storage)
[ "def", "collect_storage_host", "(", "hosts", ")", ":", "message", "=", "'\\nSetting up high-availability masters requires a storage host. Please provide a\\nhost that will be configured as a Registry Storage.\\n\\nNote: Containerized storage hosts are not currently supported.\\n'", "click", ".",...
get a valid host for storage from the user and append it to the list of hosts .
train
false
33,039
def ResolveAppId(app): if (app is None): app = os.environ.get('APPLICATION_ID', '') ValidateString(app, 'app', datastore_errors.BadArgumentError) return app
[ "def", "ResolveAppId", "(", "app", ")", ":", "if", "(", "app", "is", "None", ")", ":", "app", "=", "os", ".", "environ", ".", "get", "(", "'APPLICATION_ID'", ",", "''", ")", "ValidateString", "(", "app", ",", "'app'", ",", "datastore_errors", ".", "B...
validate app id .
train
false
33,040
def _validate_trigger_type(trigger_type): required_fields = ['name'] for field in required_fields: if (field not in trigger_type): raise TriggerTypeRegistrationException(('Invalid trigger type. Missing field "%s"' % field))
[ "def", "_validate_trigger_type", "(", "trigger_type", ")", ":", "required_fields", "=", "[", "'name'", "]", "for", "field", "in", "required_fields", ":", "if", "(", "field", "not", "in", "trigger_type", ")", ":", "raise", "TriggerTypeRegistrationException", "(", ...
xxx: we need validator objects that define the required and optional fields .
train
false
33,041
def _oembed_for(thing, **embed_options): if isinstance(thing, Comment): return _oembed_comment(thing, **embed_options) elif isinstance(thing, Link): return _oembed_post(thing, **embed_options) raise NotImplementedError("Unable to render oembed for thing '%r'", thing)
[ "def", "_oembed_for", "(", "thing", ",", "**", "embed_options", ")", ":", "if", "isinstance", "(", "thing", ",", "Comment", ")", ":", "return", "_oembed_comment", "(", "thing", ",", "**", "embed_options", ")", "elif", "isinstance", "(", "thing", ",", "Link...
given a thing .
train
false
33,042
def biweight_location(a, c=6.0, M=None, axis=None): a = np.asanyarray(a) if (M is None): M = np.median(a, axis=axis) if (axis is not None): M = np.expand_dims(M, axis=axis) d = (a - M) mad = median_absolute_deviation(a, axis=axis) if (axis is not None): mad = np.expand_dims(mad, axis=axis) u = (d / (c * mad)) mask = (np.abs(u) >= 1) u = ((1 - (u ** 2)) ** 2) u[mask] = 0 return (M.squeeze() + ((d * u).sum(axis=axis) / u.sum(axis=axis)))
[ "def", "biweight_location", "(", "a", ",", "c", "=", "6.0", ",", "M", "=", "None", ",", "axis", "=", "None", ")", ":", "a", "=", "np", ".", "asanyarray", "(", "a", ")", "if", "(", "M", "is", "None", ")", ":", "M", "=", "np", ".", "median", ...
compute the biweight location .
train
false
33,043
def p_seen_struct(p): val = _make_empty_struct(p[2]) setattr(thrift_stack[(-1)], p[2], val) p[0] = val
[ "def", "p_seen_struct", "(", "p", ")", ":", "val", "=", "_make_empty_struct", "(", "p", "[", "2", "]", ")", "setattr", "(", "thrift_stack", "[", "(", "-", "1", ")", "]", ",", "p", "[", "2", "]", ",", "val", ")", "p", "[", "0", "]", "=", "val"...
seen_struct : struct identifier .
train
false
33,044
def net_io_counters(): with open_text(('%s/net/dev' % get_procfs_path())) as f: lines = f.readlines() retdict = {} for line in lines[2:]: colon = line.rfind(':') assert (colon > 0), repr(line) name = line[:colon].strip() fields = line[(colon + 1):].strip().split() (bytes_recv, packets_recv, errin, dropin, fifoin, framein, compressedin, multicastin, bytes_sent, packets_sent, errout, dropout, fifoout, collisionsout, carrierout, compressedout) = map(int, fields) retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout) return retdict
[ "def", "net_io_counters", "(", ")", ":", "with", "open_text", "(", "(", "'%s/net/dev'", "%", "get_procfs_path", "(", ")", ")", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "retdict", "=", "{", "}", "for", "line", "in", "lines",...
return network i/o statistics for every network interface installed on the system as a dict of raw tuples .
train
false
33,045
def get_queue_by_name(name): for queue in settings.CELERY_QUEUES: if (queue.name == name): return queue
[ "def", "get_queue_by_name", "(", "name", ")", ":", "for", "queue", "in", "settings", ".", "CELERY_QUEUES", ":", "if", "(", "queue", ".", "name", "==", "name", ")", ":", "return", "queue" ]
lookup a celery queue object by its name .
train
false
33,046
def collapse_samples(table, mapping_f, collapse_fields, collapse_mode): collapsed_metadata = _collapse_metadata(mapping_f, collapse_fields) (new_index_to_group, old_index_to_new_index) = _group_by_sample_metadata(collapsed_metadata) partition_f = partial(_sample_id_from_group_id, sid_to_group_id=old_index_to_new_index) collapse_fns = get_collapse_fns() try: collapse_f = collapse_fns[collapse_mode] except KeyError: raise KeyError(('Unknown collapse function %s. Valid choices are: %s.' % (collapse_mode, ', '.join(collapse_fns.keys())))) output_table = table.collapse(partition_f, collapse_f=collapse_f, norm=False, axis='sample') return (collapsed_metadata, output_table)
[ "def", "collapse_samples", "(", "table", ",", "mapping_f", ",", "collapse_fields", ",", "collapse_mode", ")", ":", "collapsed_metadata", "=", "_collapse_metadata", "(", "mapping_f", ",", "collapse_fields", ")", "(", "new_index_to_group", ",", "old_index_to_new_index", ...
collapse samples in a biom table and sample metadata parameters table : biom .
train
false
33,047
def feature_edit_check(request, layername): layer = _resolve_layer(request, layername) datastore = ogc_server_settings.DATASTORE feature_edit = (getattr(settings, 'GEOGIG_DATASTORE', None) or datastore) if (request.user.has_perm('change_layer_data', obj=layer) and (layer.storeType == 'dataStore') and feature_edit): return HttpResponse(json.dumps({'authorized': True}), content_type='application/json') else: return HttpResponse(json.dumps({'authorized': False}), content_type='application/json')
[ "def", "feature_edit_check", "(", "request", ",", "layername", ")", ":", "layer", "=", "_resolve_layer", "(", "request", ",", "layername", ")", "datastore", "=", "ogc_server_settings", ".", "DATASTORE", "feature_edit", "=", "(", "getattr", "(", "settings", ",", ...
if the layer is not a raster and the user has edit permission .
train
false
33,048
def test_conv_tanh_basic(): yaml_file = os.path.join(pylearn2.__path__[0], 'models/tests/conv_elemwise_tanh.yaml') with open(yaml_file) as yamlh: yaml_lines = yamlh.readlines() yaml_str = ''.join(yaml_lines) train = yaml_parse.load(yaml_str) train.main_loop()
[ "def", "test_conv_tanh_basic", "(", ")", ":", "yaml_file", "=", "os", ".", "path", ".", "join", "(", "pylearn2", ".", "__path__", "[", "0", "]", ",", "'models/tests/conv_elemwise_tanh.yaml'", ")", "with", "open", "(", "yaml_file", ")", "as", "yamlh", ":", ...
tests that we can load a convolutional tanh model and train it for a few epochs on a dummy dataset-- tiny model and dataset .
train
false
33,050
def assert_is(expr1, expr2, msg=None): assert_true((expr2 is expr2), msg)
[ "def", "assert_is", "(", "expr1", ",", "expr2", ",", "msg", "=", "None", ")", ":", "assert_true", "(", "(", "expr2", "is", "expr2", ")", ",", "msg", ")" ]
fake assert_is without message .
train
false
33,052
def segment_to_fraction(distance): if np.isnan(distance): return fr.Fraction(0, 1) else: fract = fr.Fraction(distance).limit_denominator(10) return fr.Fraction(fract.denominator, fract.numerator)
[ "def", "segment_to_fraction", "(", "distance", ")", ":", "if", "np", ".", "isnan", "(", "distance", ")", ":", "return", "fr", ".", "Fraction", "(", "0", ",", "1", ")", "else", ":", "fract", "=", "fr", ".", "Fraction", "(", "distance", ")", ".", "li...
converts lengths of which the plane cuts the axes to fraction .
train
false
33,054
def test_iso_init(): t1 = Time('2000:001:00:00:00.00000001', scale='tai') t2 = Time('3000:001:13:00:00.00000002', scale='tai') dt = (t2 - t1) assert allclose_jd2(dt.jd2, (((13.0 / 24.0) + (1e-08 / 86400.0)) - 1.0))
[ "def", "test_iso_init", "(", ")", ":", "t1", "=", "Time", "(", "'2000:001:00:00:00.00000001'", ",", "scale", "=", "'tai'", ")", "t2", "=", "Time", "(", "'3000:001:13:00:00.00000002'", ",", "scale", "=", "'tai'", ")", "dt", "=", "(", "t2", "-", "t1", ")",...
check when initializing from iso date .
train
false
33,056
def surrogateescape_handler(exc): mystring = exc.object[exc.start:exc.end] try: if isinstance(exc, UnicodeDecodeError): decoded = replace_surrogate_decode(mystring) elif isinstance(exc, UnicodeEncodeError): decoded = replace_surrogate_encode(mystring) else: raise exc except NotASurrogateError: raise exc return (decoded, exc.end)
[ "def", "surrogateescape_handler", "(", "exc", ")", ":", "mystring", "=", "exc", ".", "object", "[", "exc", ".", "start", ":", "exc", ".", "end", "]", "try", ":", "if", "isinstance", "(", "exc", ",", "UnicodeDecodeError", ")", ":", "decoded", "=", "repl...
pure python implementation of the pep 383: the "surrogateescape" error handler of python 3 .
train
true
33,057
def parseargs(parser): (options, args) = parser.parse_args() filenames = [arg for arg in args if arg.endswith('.py')] if filenames: if (len(filenames) > 1): parser.error('only one filename is acceptable') explicitfile = filenames[0] args.remove(explicitfile) else: explicitfile = None testlib.ENABLE_DBC = options.dbc newargs = parser.newargs if options.skipped: newargs.extend(['--skip', options.skipped]) if options.restart: options.exitfirst = True newargs += args return (options, explicitfile)
[ "def", "parseargs", "(", "parser", ")", ":", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "filenames", "=", "[", "arg", "for", "arg", "in", "args", "if", "arg", ".", "endswith", "(", "'.py'", ")", "]", "if", "filena...
parse the command line and return .
train
false
33,058
def mldata_filename(dataname): dataname = dataname.lower().replace(' ', '-') return re.sub('[().]', '', dataname)
[ "def", "mldata_filename", "(", "dataname", ")", ":", "dataname", "=", "dataname", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'-'", ")", "return", "re", ".", "sub", "(", "'[().]'", ",", "''", ",", "dataname", ")" ]
convert a raw name for a data set in a mldata .
train
false
33,060
def is_following(user, actor): return Follow.objects.is_following(user, actor)
[ "def", "is_following", "(", "user", ",", "actor", ")", ":", "return", "Follow", ".", "objects", ".", "is_following", "(", "user", ",", "actor", ")" ]
checks if a "follow" relationship exists .
train
false
33,061
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) def do_unlock(cs, args): _find_server(cs, args.server).unlock()
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "def", "do_unlock", "(", "cs", ",", "args", ")", ":", "_find_server", "(", "cs", ",", "args", ".", "server", ...
unlock a server .
train
false
33,062
def allows_tag_namespace_for(model_obj, ns, user): if (user.is_staff or user.is_superuser): return True if (not ns.startswith('system:')): return True return False
[ "def", "allows_tag_namespace_for", "(", "model_obj", ",", "ns", ",", "user", ")", ":", "if", "(", "user", ".", "is_staff", "or", "user", ".", "is_superuser", ")", ":", "return", "True", "if", "(", "not", "ns", ".", "startswith", "(", "'system:'", ")", ...
decide whether a tag namespace is editable by a user .
train
false
33,063
def _hello_file(tmpdir): file = (tmpdir / 'hashable') file.write('hello') return file
[ "def", "_hello_file", "(", "tmpdir", ")", ":", "file", "=", "(", "tmpdir", "/", "'hashable'", ")", "file", ".", "write", "(", "'hello'", ")", "return", "file" ]
return a temp file to hash containing "hello" .
train
false
33,067
@deferred_performer def treq_get(dispatcher, intent): action = startAction(action_type=u'flocker:provision:_effect:treq_get') with action.context(): Message.log(url=intent.url) d = DeferredContext(get(intent.url, persistent=False)) d.addActionFinish() return d.result
[ "@", "deferred_performer", "def", "treq_get", "(", "dispatcher", ",", "intent", ")", ":", "action", "=", "startAction", "(", "action_type", "=", "u'flocker:provision:_effect:treq_get'", ")", "with", "action", ".", "context", "(", ")", ":", "Message", ".", "log",...
performer to execute an http get .
train
false
33,068
def writeSettings(repository): profilesDirectoryPath = archive.getProfilesPath(getProfileBaseName(repository)) archive.makeDirectory(os.path.dirname(profilesDirectoryPath)) archive.writeFileText(profilesDirectoryPath, getRepositoryText(repository)) for setting in repository.preferences: setting.updateSaveListeners()
[ "def", "writeSettings", "(", "repository", ")", ":", "profilesDirectoryPath", "=", "archive", ".", "getProfilesPath", "(", "getProfileBaseName", "(", "repository", ")", ")", "archive", ".", "makeDirectory", "(", "os", ".", "path", ".", "dirname", "(", "profilesD...
write the settings to a file .
train
false
33,070
def _makesalt(): binarysalt = ''.join([pack('@H', randint(0, 65535)) for i in range(3)]) return b64encode(binarysalt, './')
[ "def", "_makesalt", "(", ")", ":", "binarysalt", "=", "''", ".", "join", "(", "[", "pack", "(", "'@H'", ",", "randint", "(", "0", ",", "65535", ")", ")", "for", "i", "in", "range", "(", "3", ")", "]", ")", "return", "b64encode", "(", "binarysalt"...
return a 48-bit pseudorandom salt for crypt() .
train
false
33,071
def get_exercise_parents_lookup_table(): global exercise_parents_lookup_table if exercise_parents_lookup_table: return exercise_parents_lookup_table tree = get_topic_nodes_with_children(parent='root') for topic in tree: for subtopic_id in topic['children']: exercises = get_topic_contents(topic_id=subtopic_id, kinds=['Exercise']) for ex in exercises: if (ex['id'] not in exercise_parents_lookup_table): exercise_parents_lookup_table[ex['id']] = {'subtopic_id': subtopic_id, 'topic_id': topic['id']} return exercise_parents_lookup_table
[ "def", "get_exercise_parents_lookup_table", "(", ")", ":", "global", "exercise_parents_lookup_table", "if", "exercise_parents_lookup_table", ":", "return", "exercise_parents_lookup_table", "tree", "=", "get_topic_nodes_with_children", "(", "parent", "=", "'root'", ")", "for",...
return a dictionary with exercise ids as keys and topic_ids as values .
train
false
33,073
def get_duplicate_emails(): return get_user_model().objects.hide_meta().values('email').annotate(Count('email')).filter(email__count__gt=1).values_list('email', flat=True)
[ "def", "get_duplicate_emails", "(", ")", ":", "return", "get_user_model", "(", ")", ".", "objects", ".", "hide_meta", "(", ")", ".", "values", "(", "'email'", ")", ".", "annotate", "(", "Count", "(", "'email'", ")", ")", ".", "filter", "(", "email__count...
get a list of emails that occur more than once in user accounts .
train
false
33,074
def return_json_file(request): match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE']) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET obj_dict = json.loads(request.body.decode(charset)) obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False) response = HttpResponse(obj_json.encode(charset), status=200, content_type=('application/json; charset=%s' % charset)) response['Content-Disposition'] = 'attachment; filename=testfile.json' return response
[ "def", "return_json_file", "(", "request", ")", ":", "match", "=", "CONTENT_TYPE_RE", ".", "match", "(", "request", ".", "META", "[", "'CONTENT_TYPE'", "]", ")", "if", "match", ":", "charset", "=", "match", ".", "group", "(", "1", ")", "else", ":", "ch...
a view that parses and returns a json string as a file .
train
false
33,076
def _do_path_update(path, update_path, key, name): path = op.abspath(path) if (update_path is None): if (get_config(key, '') != path): update_path = True if ('--update-dataset-path' in sys.argv): answer = 'y' else: msg = ('Do you want to set the path:\n %s\nas the default %s dataset path in the mne-python config [y]/n? ' % (path, name)) answer = input(msg) if (answer.lower() == 'n'): update_path = False else: update_path = False if (update_path is True): set_config(key, path, set_env=False) return path
[ "def", "_do_path_update", "(", "path", ",", "update_path", ",", "key", ",", "name", ")", ":", "path", "=", "op", ".", "abspath", "(", "path", ")", "if", "(", "update_path", "is", "None", ")", ":", "if", "(", "get_config", "(", "key", ",", "''", ")"...
helper to update path .
train
false
33,077
def _has_instructor_access_to_descriptor(user, descriptor, course_key): return _has_instructor_access_to_location(user, descriptor.location, course_key)
[ "def", "_has_instructor_access_to_descriptor", "(", "user", ",", "descriptor", ",", "course_key", ")", ":", "return", "_has_instructor_access_to_location", "(", "user", ",", "descriptor", ".", "location", ",", "course_key", ")" ]
helper method that checks whether the user has staff access to the course of the location .
train
false
33,078
def _filter_boxes(boxes, min_size): ws = ((boxes[:, 2] - boxes[:, 0]) + 1) hs = ((boxes[:, 3] - boxes[:, 1]) + 1) keep = np.where(((ws >= min_size) & (hs >= min_size)))[0] return keep
[ "def", "_filter_boxes", "(", "boxes", ",", "min_size", ")", ":", "ws", "=", "(", "(", "boxes", "[", ":", ",", "2", "]", "-", "boxes", "[", ":", ",", "0", "]", ")", "+", "1", ")", "hs", "=", "(", "(", "boxes", "[", ":", ",", "3", "]", "-",...
remove all boxes with any side smaller than min_size .
train
false
33,079
def _rem_redundancy(l1, terms): essential = [] for x in terms: temporary = [] for y in l1: if _compare_term(x, y): temporary.append(y) if (len(temporary) == 1): if (temporary[0] not in essential): essential.append(temporary[0]) for x in terms: for y in essential: if _compare_term(x, y): break else: for z in l1: if _compare_term(x, z): if (z not in essential): essential.append(z) break return essential
[ "def", "_rem_redundancy", "(", "l1", ",", "terms", ")", ":", "essential", "=", "[", "]", "for", "x", "in", "terms", ":", "temporary", "=", "[", "]", "for", "y", "in", "l1", ":", "if", "_compare_term", "(", "x", ",", "y", ")", ":", "temporary", "....
after the truth table has been sufficiently simplified .
train
false
33,081
def get_axes_bounds(fig): (x_min, x_max, y_min, y_max) = ([], [], [], []) for axes_obj in fig.get_axes(): bounds = axes_obj.get_position().bounds x_min.append(bounds[0]) x_max.append((bounds[0] + bounds[2])) y_min.append(bounds[1]) y_max.append((bounds[1] + bounds[3])) (x_min, y_min, x_max, y_max) = (min(x_min), min(y_min), max(x_max), max(y_max)) return ((x_min, x_max), (y_min, y_max))
[ "def", "get_axes_bounds", "(", "fig", ")", ":", "(", "x_min", ",", "x_max", ",", "y_min", ",", "y_max", ")", "=", "(", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "for", "axes_obj", "in", "fig", ".", "get_axes", "(", ")", ":",...
return the entire axes space for figure .
train
false
33,082
def _list_filesystems(reactor, pool): listing = zfs_command(reactor, ['list', '-d', '1', '-H', '-p', '-o', 'name,mountpoint,refquota', pool]) def listed(output, pool): for line in output.splitlines(): (name, mountpoint, refquota) = line.split(' DCTB ') name = name[(len(pool) + 1):] if name: refquota = int(refquota.decode('ascii')) if (refquota == 0): refquota = None (yield _DatasetInfo(dataset=name, mountpoint=mountpoint, refquota=refquota)) listing.addCallback(listed, pool) return listing
[ "def", "_list_filesystems", "(", "reactor", ",", "pool", ")", ":", "listing", "=", "zfs_command", "(", "reactor", ",", "[", "'list'", ",", "'-d'", ",", "'1'", ",", "'-H'", ",", "'-p'", ",", "'-o'", ",", "'name,mountpoint,refquota'", ",", "pool", "]", ")"...
get a listing of all filesystems on a given pool .
train
false
33,083
def figure_to_rgb_array(fig, shape=None): array = np.frombuffer(_get_buffer(fig, dpi=fig.dpi, format='raw').read(), dtype='uint8') if (shape is None): (w, h) = fig.canvas.get_width_height() shape = (h, w, 4) return array.reshape(*shape)
[ "def", "figure_to_rgb_array", "(", "fig", ",", "shape", "=", "None", ")", ":", "array", "=", "np", ".", "frombuffer", "(", "_get_buffer", "(", "fig", ",", "dpi", "=", "fig", ".", "dpi", ",", "format", "=", "'raw'", ")", ".", "read", "(", ")", ",", ...
converts figure to a numpy array parameters fig : matplotlib .
train
false
33,084
def _rec_degree_in(g, v, i, j): if (i == j): return dmp_degree(g, v) (v, i) = ((v - 1), (i + 1)) return max([_rec_degree_in(c, v, i, j) for c in g])
[ "def", "_rec_degree_in", "(", "g", ",", "v", ",", "i", ",", "j", ")", ":", "if", "(", "i", "==", "j", ")", ":", "return", "dmp_degree", "(", "g", ",", "v", ")", "(", "v", ",", "i", ")", "=", "(", "(", "v", "-", "1", ")", ",", "(", "i", ...
recursive helper function for :func:dmp_degree_in .
train
false
33,085
@jinja2.contextfunction @library.global_function def has_perm(context, perm, obj): return access.has_perm(context['request'].user, perm, obj)
[ "@", "jinja2", ".", "contextfunction", "@", "library", ".", "global_function", "def", "has_perm", "(", "context", ",", "perm", ",", "obj", ")", ":", "return", "access", ".", "has_perm", "(", "context", "[", "'request'", "]", ".", "user", ",", "perm", ","...
check if the user has a permission on a specific object .
train
false
33,088
def dirname(p): return split(p)[0]
[ "def", "dirname", "(", "p", ")", ":", "return", "split", "(", "p", ")", "[", "0", "]" ]
an os .
train
false
33,089
def reference_property_to_reference(refprop): ref = entity_pb.Reference() app_id = clean_app_id(refprop.app()) ref.set_app(app_id) if refprop.has_name_space(): ref.set_name_space(refprop.name_space()) for pathelem in refprop.pathelement_list(): ref.mutable_path().add_element().CopyFrom(pathelem) return ref
[ "def", "reference_property_to_reference", "(", "refprop", ")", ":", "ref", "=", "entity_pb", ".", "Reference", "(", ")", "app_id", "=", "clean_app_id", "(", "refprop", ".", "app", "(", ")", ")", "ref", ".", "set_app", "(", "app_id", ")", "if", "refprop", ...
creates a reference from a referenceproperty .
train
false
33,090
def make_jobs(commands, job_prefix, queue, jobs_dir='jobs/', walltime='72:00:00', ncpus=1, nodes=1, keep_output='oe'): filenames = [] create_dir(jobs_dir) for command in commands: (fd, job_name) = mkstemp(dir=jobs_dir, prefix=(job_prefix + '_'), suffix='.txt') close(fd) out_fh = open(job_name, 'w') out_fh.write((QSUB_TEXT % (walltime, ncpus, nodes, queue, job_prefix, keep_output, command))) out_fh.close() filenames.append(job_name) return filenames
[ "def", "make_jobs", "(", "commands", ",", "job_prefix", ",", "queue", ",", "jobs_dir", "=", "'jobs/'", ",", "walltime", "=", "'72:00:00'", ",", "ncpus", "=", "1", ",", "nodes", "=", "1", ",", "keep_output", "=", "'oe'", ")", ":", "filenames", "=", "[",...
prepare qsub text files .
train
false
33,091
def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change, case_changes, val_map, is_authors=False): kval = kmap(val) item_id = rid_map.get(kval, None) if (item_id is None): if is_authors: aus = author_to_author_sort(val) db.execute(u'INSERT INTO authors(name,sort) VALUES (?,?)', (val.replace(u',', u'|'), aus)) else: db.execute((u'INSERT INTO %s(%s) VALUES (?)' % (m[u'table'], m[u'column'])), (val,)) item_id = rid_map[kval] = db.last_insert_rowid() table.id_map[item_id] = val table.col_book_map[item_id] = set() if is_authors: table.asort_map[item_id] = aus table.alink_map[item_id] = u'' elif (allow_case_change and (val != table.id_map[item_id])): case_changes[item_id] = val val_map[val] = item_id
[ "def", "get_db_id", "(", "val", ",", "db", ",", "m", ",", "table", ",", "kmap", ",", "rid_map", ",", "allow_case_change", ",", "case_changes", ",", "val_map", ",", "is_authors", "=", "False", ")", ":", "kval", "=", "kmap", "(", "val", ")", "item_id", ...
get the db id for the value val .
train
false
33,092
def levenshtein(s1, s2): if (len(s1) < len(s2)): return levenshtein(s2, s1) if (not s1): return len(s2) previous_row = xrange((len(s2) + 1)) for (i, c1) in enumerate(s1): current_row = [(i + 1)] for (j, c2) in enumerate(s2): insertions = (previous_row[(j + 1)] + 1) deletions = (current_row[j] + 1) substitutions = (previous_row[j] + (c1 != c2)) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[(-1)]
[ "def", "levenshtein", "(", "s1", ",", "s2", ")", ":", "if", "(", "len", "(", "s1", ")", "<", "len", "(", "s2", ")", ")", ":", "return", "levenshtein", "(", "s2", ",", "s1", ")", "if", "(", "not", "s1", ")", ":", "return", "len", "(", "s2", ...
measures the amount of difference between two strings .
train
false
33,093
def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None): services = (services or SERVICES) languages = (language_list(languages) if (languages is not None) else language_list(LANGUAGES)) if isinstance(paths, basestring): paths = [paths] order = (order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]) subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for (video, subtitles) in subtitles_by_video.iteritems(): subtitles.sort(key=(lambda s: key_subtitles(s, video, languages, services, order)), reverse=True) results = [] service_instances = {} tasks = create_download_tasks(subtitles_by_video, languages, multi) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error((u'Error consuming task %r' % task), exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results)
[ "def", "download_subtitles", "(", "paths", ",", "languages", "=", "None", ",", "services", "=", "None", ",", "force", "=", "True", ",", "multi", "=", "False", ",", "cache_dir", "=", "None", ",", "max_depth", "=", "3", ",", "scan_filter", "=", "None", "...
download :attr:~subliminal .
train
false
33,094
def get_can_use_couchdb(): if has_module(u'storages.backends.couchdb', members=[u'CouchDBStorage']): return (True, None) else: return (False, _(u'CouchDB depends on django-storages, which is not installed'))
[ "def", "get_can_use_couchdb", "(", ")", ":", "if", "has_module", "(", "u'storages.backends.couchdb'", ",", "members", "=", "[", "u'CouchDBStorage'", "]", ")", ":", "return", "(", "True", ",", "None", ")", "else", ":", "return", "(", "False", ",", "_", "(",...
check whether django-storages is installed .
train
false
33,095
def get_user_model_name(): return getattr(settings, u'AUTH_USER_MODEL', u'auth.User')
[ "def", "get_user_model_name", "(", ")", ":", "return", "getattr", "(", "settings", ",", "u'AUTH_USER_MODEL'", ",", "u'auth.User'", ")" ]
returns the app_label .
train
false