id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
4,985
def run_recover_tasks(host, guest, instance, on_migration_failure): while on_migration_failure: task = on_migration_failure.popleft() if (task == 'unpause'): try: state = guest.get_power_state(host) if (state == power_state.PAUSED): guest.resume() except Exception as e: LOG.warning(_LW('Failed to resume paused instance before live-migration rollback %s'), e, instance=instance) else: LOG.warning(_LW("Unknown migration task '%(task)s'"), {'task': task}, instance=instance)
[ "def", "run_recover_tasks", "(", "host", ",", "guest", ",", "instance", ",", "on_migration_failure", ")", ":", "while", "on_migration_failure", ":", "task", "=", "on_migration_failure", ".", "popleft", "(", ")", "if", "(", "task", "==", "'unpause'", ")", ":", "try", ":", "state", "=", "guest", ".", "get_power_state", "(", "host", ")", "if", "(", "state", "==", "power_state", ".", "PAUSED", ")", ":", "guest", ".", "resume", "(", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "warning", "(", "_LW", "(", "'Failed to resume paused instance before live-migration rollback %s'", ")", ",", "e", ",", "instance", "=", "instance", ")", "else", ":", "LOG", ".", "warning", "(", "_LW", "(", "\"Unknown migration task '%(task)s'\"", ")", ",", "{", "'task'", ":", "task", "}", ",", "instance", "=", "instance", ")" ]
run any pending migration recovery tasks .
train
false
4,986
def strip_trailing_semicolon(query): s = _SEMICOLON_WHITESPACE.split(query, 2) if (len(s) > 1): assert (len(s) == 2) assert (s[1] == '') return s[0]
[ "def", "strip_trailing_semicolon", "(", "query", ")", ":", "s", "=", "_SEMICOLON_WHITESPACE", ".", "split", "(", "query", ",", "2", ")", "if", "(", "len", "(", "s", ")", ">", "1", ")", ":", "assert", "(", "len", "(", "s", ")", "==", "2", ")", "assert", "(", "s", "[", "1", "]", "==", "''", ")", "return", "s", "[", "0", "]" ]
as a convenience .
train
false
4,988
def _get_node(lb, node_id=None, address=None, port=None): for node in getattr(lb, 'nodes', []): match_list = [] if (node_id is not None): match_list.append((getattr(node, 'id', None) == node_id)) if (address is not None): match_list.append((getattr(node, 'address', None) == address)) if (port is not None): match_list.append((getattr(node, 'port', None) == port)) if (match_list and all(match_list)): return node return None
[ "def", "_get_node", "(", "lb", ",", "node_id", "=", "None", ",", "address", "=", "None", ",", "port", "=", "None", ")", ":", "for", "node", "in", "getattr", "(", "lb", ",", "'nodes'", ",", "[", "]", ")", ":", "match_list", "=", "[", "]", "if", "(", "node_id", "is", "not", "None", ")", ":", "match_list", ".", "append", "(", "(", "getattr", "(", "node", ",", "'id'", ",", "None", ")", "==", "node_id", ")", ")", "if", "(", "address", "is", "not", "None", ")", ":", "match_list", ".", "append", "(", "(", "getattr", "(", "node", ",", "'address'", ",", "None", ")", "==", "address", ")", ")", "if", "(", "port", "is", "not", "None", ")", ":", "match_list", ".", "append", "(", "(", "getattr", "(", "node", ",", "'port'", ",", "None", ")", "==", "port", ")", ")", "if", "(", "match_list", "and", "all", "(", "match_list", ")", ")", ":", "return", "node", "return", "None" ]
helper function that returns all information about a named node .
train
false
4,989
def fxa_authorize(request): return render(request, 'commonplace/fxa_authorize.html')
[ "def", "fxa_authorize", "(", "request", ")", ":", "return", "render", "(", "request", ",", "'commonplace/fxa_authorize.html'", ")" ]
a page to mimic commonplaces fxa-authorize page to handle login .
train
false
4,990
def chshell(name, shell): pre_info = info(name) if (not pre_info): raise CommandExecutionError("User '{0}' does not exist".format(name)) if (shell == pre_info['shell']): return True cmd = ['pw', 'usermod', '-s', shell, '-n', name] __salt__['cmd.run'](cmd, python_shell=False) return (info(name).get('shell') == shell)
[ "def", "chshell", "(", "name", ",", "shell", ")", ":", "pre_info", "=", "info", "(", "name", ")", "if", "(", "not", "pre_info", ")", ":", "raise", "CommandExecutionError", "(", "\"User '{0}' does not exist\"", ".", "format", "(", "name", ")", ")", "if", "(", "shell", "==", "pre_info", "[", "'shell'", "]", ")", ":", "return", "True", "cmd", "=", "[", "'pw'", ",", "'usermod'", ",", "'-s'", ",", "shell", ",", "'-n'", ",", "name", "]", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "(", "info", "(", "name", ")", ".", "get", "(", "'shell'", ")", "==", "shell", ")" ]
change the default shell of the user cli example: .
train
true
4,991
def getNextChildIndex(elementNode): for (childNodeIndex, childNode) in enumerate(elementNode.parentNode.childNodes): if (childNode == elementNode): return (childNodeIndex + 1) return len(elementNode.parentNode.childNodes)
[ "def", "getNextChildIndex", "(", "elementNode", ")", ":", "for", "(", "childNodeIndex", ",", "childNode", ")", "in", "enumerate", "(", "elementNode", ".", "parentNode", ".", "childNodes", ")", ":", "if", "(", "childNode", "==", "elementNode", ")", ":", "return", "(", "childNodeIndex", "+", "1", ")", "return", "len", "(", "elementNode", ".", "parentNode", ".", "childNodes", ")" ]
get the next child index .
train
false
4,993
def get_url_path(path, use_directory_urls=True): path = get_html_path(path) url = (u'/' + path.replace(os.path.sep, u'/')) if use_directory_urls: return url[:(- len(u'index.html'))] return url
[ "def", "get_url_path", "(", "path", ",", "use_directory_urls", "=", "True", ")", ":", "path", "=", "get_html_path", "(", "path", ")", "url", "=", "(", "u'/'", "+", "path", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "u'/'", ")", ")", "if", "use_directory_urls", ":", "return", "url", "[", ":", "(", "-", "len", "(", "u'index.html'", ")", ")", "]", "return", "url" ]
map a source file path to an output html path .
train
false
4,994
def _clean_object_name(name): return (urlquote(name, safe='') if name else None)
[ "def", "_clean_object_name", "(", "name", ")", ":", "return", "(", "urlquote", "(", "name", ",", "safe", "=", "''", ")", "if", "name", "else", "None", ")" ]
return the url encoded name .
train
false
4,995
@pytest.fixture def _history(): history = InMemoryHistory() history.append(u'alpha beta gamma delta') history.append(u'one two three four') return history
[ "@", "pytest", ".", "fixture", "def", "_history", "(", ")", ":", "history", "=", "InMemoryHistory", "(", ")", "history", ".", "append", "(", "u'alpha beta gamma delta'", ")", "history", ".", "append", "(", "u'one two three four'", ")", "return", "history" ]
prefilled history .
train
false
4,996
def simplify_regex(pattern): pattern = named_group_matcher.sub((lambda m: m.group(1)), pattern) pattern = non_named_group_matcher.sub('<var>', pattern) pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '') if (not pattern.startswith('/')): pattern = ('/' + pattern) return pattern
[ "def", "simplify_regex", "(", "pattern", ")", ":", "pattern", "=", "named_group_matcher", ".", "sub", "(", "(", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", ")", ",", "pattern", ")", "pattern", "=", "non_named_group_matcher", ".", "sub", "(", "'<var>'", ",", "pattern", ")", "pattern", "=", "pattern", ".", "replace", "(", "'^'", ",", "''", ")", ".", "replace", "(", "'$'", ",", "''", ")", ".", "replace", "(", "'?'", ",", "''", ")", ".", "replace", "(", "'//'", ",", "'/'", ")", ".", "replace", "(", "'\\\\'", ",", "''", ")", "if", "(", "not", "pattern", ".", "startswith", "(", "'/'", ")", ")", ":", "pattern", "=", "(", "'/'", "+", "pattern", ")", "return", "pattern" ]
clean up urlpattern regexes into something more readable by humans .
train
false
4,999
def _unpick_search(sort, allowed_fields=None, total=None): sorts = [] split_sort = sort.split(',') for part in split_sort: split_part = part.strip().split() field = split_part[0] if (len(split_part) > 1): order = split_part[1].lower() else: order = 'asc' if allowed_fields: if (field not in allowed_fields): raise ValidationError(('Cannot sort by field `%s`' % field)) if (order not in ['asc', 'desc']): raise ValidationError(('Invalid sort direction `%s`' % order)) sorts.append((field, order)) if (total and (len(sorts) > total)): raise ValidationError(('Too many sort criteria provided only %s allowed' % total)) return sorts
[ "def", "_unpick_search", "(", "sort", ",", "allowed_fields", "=", "None", ",", "total", "=", "None", ")", ":", "sorts", "=", "[", "]", "split_sort", "=", "sort", ".", "split", "(", "','", ")", "for", "part", "in", "split_sort", ":", "split_part", "=", "part", ".", "strip", "(", ")", ".", "split", "(", ")", "field", "=", "split_part", "[", "0", "]", "if", "(", "len", "(", "split_part", ")", ">", "1", ")", ":", "order", "=", "split_part", "[", "1", "]", ".", "lower", "(", ")", "else", ":", "order", "=", "'asc'", "if", "allowed_fields", ":", "if", "(", "field", "not", "in", "allowed_fields", ")", ":", "raise", "ValidationError", "(", "(", "'Cannot sort by field `%s`'", "%", "field", ")", ")", "if", "(", "order", "not", "in", "[", "'asc'", ",", "'desc'", "]", ")", ":", "raise", "ValidationError", "(", "(", "'Invalid sort direction `%s`'", "%", "order", ")", ")", "sorts", ".", "append", "(", "(", "field", ",", "order", ")", ")", "if", "(", "total", "and", "(", "len", "(", "sorts", ")", ">", "total", ")", ")", ":", "raise", "ValidationError", "(", "(", "'Too many sort criteria provided only %s allowed'", "%", "total", ")", ")", "return", "sorts" ]
this is a helper function that takes a sort string eg name asc .
train
false
5,000
def _fill_and_one_pad_stride(stride, n): try: return (((1,) + _fill_shape(stride, n)) + (1,)) except TypeError: raise base.IncompatibleShapeError('stride is {} ({}), must be either an integer or an iterable of integers of size {}'.format(stride, type(stride), n))
[ "def", "_fill_and_one_pad_stride", "(", "stride", ",", "n", ")", ":", "try", ":", "return", "(", "(", "(", "1", ",", ")", "+", "_fill_shape", "(", "stride", ",", "n", ")", ")", "+", "(", "1", ",", ")", ")", "except", "TypeError", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "'stride is {} ({}), must be either an integer or an iterable of integers of size {}'", ".", "format", "(", "stride", ",", "type", "(", "stride", ")", ",", "n", ")", ")" ]
expands the provided stride to size n and pads it with 1s .
train
false
5,001
def make_sample_query_from_filter(sample_filter, require_meter=True): meter = sample_filter.meter if ((not meter) and require_meter): raise RuntimeError('Missing required meter specifier') (start_row, end_row, ts_query) = make_timestamp_query(make_general_rowkey_scan, start=sample_filter.start_timestamp, start_op=sample_filter.start_timestamp_op, end=sample_filter.end_timestamp, end_op=sample_filter.end_timestamp_op, some_id=meter) kwargs = dict(user_id=sample_filter.user, project_id=sample_filter.project, counter_name=meter, resource_id=sample_filter.resource, source=sample_filter.source, message_id=sample_filter.message_id) q = make_query(metaquery=sample_filter.metaquery, **kwargs) if q: res_q = (((q + ' AND ') + ts_query) if ts_query else q) else: res_q = (ts_query if ts_query else None) need_timestamp = ((sample_filter.start_timestamp or sample_filter.end_timestamp) is not None) columns = get_meter_columns(metaquery=sample_filter.metaquery, need_timestamp=need_timestamp, **kwargs) return (res_q, start_row, end_row, columns)
[ "def", "make_sample_query_from_filter", "(", "sample_filter", ",", "require_meter", "=", "True", ")", ":", "meter", "=", "sample_filter", ".", "meter", "if", "(", "(", "not", "meter", ")", "and", "require_meter", ")", ":", "raise", "RuntimeError", "(", "'Missing required meter specifier'", ")", "(", "start_row", ",", "end_row", ",", "ts_query", ")", "=", "make_timestamp_query", "(", "make_general_rowkey_scan", ",", "start", "=", "sample_filter", ".", "start_timestamp", ",", "start_op", "=", "sample_filter", ".", "start_timestamp_op", ",", "end", "=", "sample_filter", ".", "end_timestamp", ",", "end_op", "=", "sample_filter", ".", "end_timestamp_op", ",", "some_id", "=", "meter", ")", "kwargs", "=", "dict", "(", "user_id", "=", "sample_filter", ".", "user", ",", "project_id", "=", "sample_filter", ".", "project", ",", "counter_name", "=", "meter", ",", "resource_id", "=", "sample_filter", ".", "resource", ",", "source", "=", "sample_filter", ".", "source", ",", "message_id", "=", "sample_filter", ".", "message_id", ")", "q", "=", "make_query", "(", "metaquery", "=", "sample_filter", ".", "metaquery", ",", "**", "kwargs", ")", "if", "q", ":", "res_q", "=", "(", "(", "(", "q", "+", "' AND '", ")", "+", "ts_query", ")", "if", "ts_query", "else", "q", ")", "else", ":", "res_q", "=", "(", "ts_query", "if", "ts_query", "else", "None", ")", "need_timestamp", "=", "(", "(", "sample_filter", ".", "start_timestamp", "or", "sample_filter", ".", "end_timestamp", ")", "is", "not", "None", ")", "columns", "=", "get_meter_columns", "(", "metaquery", "=", "sample_filter", ".", "metaquery", ",", "need_timestamp", "=", "need_timestamp", ",", "**", "kwargs", ")", "return", "(", "res_q", ",", "start_row", ",", "end_row", ",", "columns", ")" ]
return a query dictionary based on the settings in the filter .
train
false
5,002
def _PutSigningKeyset(io_loop, secret): _GetSecretsManager().PutSecret(secret, json.dumps(secrets.CreateSigningKeyset(secret))) io_loop.stop()
[ "def", "_PutSigningKeyset", "(", "io_loop", ",", "secret", ")", ":", "_GetSecretsManager", "(", ")", ".", "PutSecret", "(", "secret", ",", "json", ".", "dumps", "(", "secrets", ".", "CreateSigningKeyset", "(", "secret", ")", ")", ")", "io_loop", ".", "stop", "(", ")" ]
creates a new keyczar crypt keyset used for signing and signature verification and writes it to secrets subdir .
train
false
5,003
def check_phylip_reject_duplicate(): handle = StringIO() sequences = [SeqRecord(Seq('AAAA'), id='longsequencename1'), SeqRecord(Seq('AAAA'), id='longsequencename2'), SeqRecord(Seq('AAAA'), id='other_sequence')] alignment = MultipleSeqAlignment(sequences) try: AlignIO.write(alignment, handle, 'phylip') assert False, 'Duplicate IDs after truncation are not allowed.' except ValueError as e: assert ("Repeated name 'longsequen'" in str(e))
[ "def", "check_phylip_reject_duplicate", "(", ")", ":", "handle", "=", "StringIO", "(", ")", "sequences", "=", "[", "SeqRecord", "(", "Seq", "(", "'AAAA'", ")", ",", "id", "=", "'longsequencename1'", ")", ",", "SeqRecord", "(", "Seq", "(", "'AAAA'", ")", ",", "id", "=", "'longsequencename2'", ")", ",", "SeqRecord", "(", "Seq", "(", "'AAAA'", ")", ",", "id", "=", "'other_sequence'", ")", "]", "alignment", "=", "MultipleSeqAlignment", "(", "sequences", ")", "try", ":", "AlignIO", ".", "write", "(", "alignment", ",", "handle", ",", "'phylip'", ")", "assert", "False", ",", "'Duplicate IDs after truncation are not allowed.'", "except", "ValueError", "as", "e", ":", "assert", "(", "\"Repeated name 'longsequen'\"", "in", "str", "(", "e", ")", ")" ]
ensure that attempting to write sequences with duplicate ids after truncation fails for phylip format .
train
false
5,004
def idz_sfrmi(l, m): return _id.idz_sfrmi(l, m)
[ "def", "idz_sfrmi", "(", "l", ",", "m", ")", ":", "return", "_id", ".", "idz_sfrmi", "(", "l", ",", "m", ")" ]
initialize data for :func:idz_sfrm .
train
false
5,005
def write_bem_solution(fname, bem): _check_bem_size(bem['surfs']) with start_file(fname) as fid: start_block(fid, FIFF.FIFFB_BEM) write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, bem['surfs'][0]['coord_frame']) _write_bem_surfaces_block(fid, bem['surfs']) if ('solution' in bem): if (bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL): raise RuntimeError('Only linear collocation supported') write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR) write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION, bem['solution']) end_block(fid, FIFF.FIFFB_BEM) end_file(fid)
[ "def", "write_bem_solution", "(", "fname", ",", "bem", ")", ":", "_check_bem_size", "(", "bem", "[", "'surfs'", "]", ")", "with", "start_file", "(", "fname", ")", "as", "fid", ":", "start_block", "(", "fid", ",", "FIFF", ".", "FIFFB_BEM", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_COORD_FRAME", ",", "bem", "[", "'surfs'", "]", "[", "0", "]", "[", "'coord_frame'", "]", ")", "_write_bem_surfaces_block", "(", "fid", ",", "bem", "[", "'surfs'", "]", ")", "if", "(", "'solution'", "in", "bem", ")", ":", "if", "(", "bem", "[", "'bem_method'", "]", "!=", "FIFF", ".", "FWD_BEM_LINEAR_COLL", ")", ":", "raise", "RuntimeError", "(", "'Only linear collocation supported'", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_APPROX", ",", "FIFF", ".", "FIFFV_BEM_APPROX_LINEAR", ")", "write_float_matrix", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_POT_SOLUTION", ",", "bem", "[", "'solution'", "]", ")", "end_block", "(", "fid", ",", "FIFF", ".", "FIFFB_BEM", ")", "end_file", "(", "fid", ")" ]
write a bem model with solution .
train
false
5,006
def test_tf_mxne(): alpha_space = 10.0 alpha_time = 5.0 (M, G, active_set) = _generate_tf_data() (X_hat_tf, active_set_hat_tf, E) = tf_mixed_norm_solver(M, G, alpha_space, alpha_time, maxit=200, tol=1e-08, verbose=True, n_orient=1, tstep=4, wsize=32) assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
[ "def", "test_tf_mxne", "(", ")", ":", "alpha_space", "=", "10.0", "alpha_time", "=", "5.0", "(", "M", ",", "G", ",", "active_set", ")", "=", "_generate_tf_data", "(", ")", "(", "X_hat_tf", ",", "active_set_hat_tf", ",", "E", ")", "=", "tf_mixed_norm_solver", "(", "M", ",", "G", ",", "alpha_space", ",", "alpha_time", ",", "maxit", "=", "200", ",", "tol", "=", "1e-08", ",", "verbose", "=", "True", ",", "n_orient", "=", "1", ",", "tstep", "=", "4", ",", "wsize", "=", "32", ")", "assert_array_equal", "(", "np", ".", "where", "(", "active_set_hat_tf", ")", "[", "0", "]", ",", "active_set", ")" ]
test convergence of tf-mxne solver .
train
false
5,007
@contextmanager def inside_dir(dirpath): old_path = os.getcwd() try: os.chdir(dirpath) (yield) finally: os.chdir(old_path)
[ "@", "contextmanager", "def", "inside_dir", "(", "dirpath", ")", ":", "old_path", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "dirpath", ")", "(", "yield", ")", "finally", ":", "os", ".", "chdir", "(", "old_path", ")" ]
execute code from inside the given directory .
train
false
5,008
def find_ndk_project_root(source): ndk_directory = os.path.abspath(source) while (ndk_directory != '/'): if os.path.exists(os.path.join(ndk_directory, 'jni')): break ndk_directory = os.path.dirname(ndk_directory) else: return None return ndk_directory
[ "def", "find_ndk_project_root", "(", "source", ")", ":", "ndk_directory", "=", "os", ".", "path", ".", "abspath", "(", "source", ")", "while", "(", "ndk_directory", "!=", "'/'", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "ndk_directory", ",", "'jni'", ")", ")", ":", "break", "ndk_directory", "=", "os", ".", "path", ".", "dirname", "(", "ndk_directory", ")", "else", ":", "return", "None", "return", "ndk_directory" ]
given a directory path .
train
false
5,009
def sdm_zero(): return []
[ "def", "sdm_zero", "(", ")", ":", "return", "[", "]" ]
return the zero module element .
train
false
5,010
def _iterate_sagittal_slices(array, limits=None): shape = array.shape[0] for ind in range(shape): if (limits and (ind not in limits)): continue (yield (ind, array[ind, :, :]))
[ "def", "_iterate_sagittal_slices", "(", "array", ",", "limits", "=", "None", ")", ":", "shape", "=", "array", ".", "shape", "[", "0", "]", "for", "ind", "in", "range", "(", "shape", ")", ":", "if", "(", "limits", "and", "(", "ind", "not", "in", "limits", ")", ")", ":", "continue", "(", "yield", "(", "ind", ",", "array", "[", "ind", ",", ":", ",", ":", "]", ")", ")" ]
iterate sagittal slices .
train
false
5,011
@hsa.jit(device=True) def device_scan(tid, data, temp, inclusive): lane = (tid & (_WARPSIZE - 1)) warpid = (tid >> 6) temp[tid] = data hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) warp_scan_res = warp_scan(tid, temp, inclusive) hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) if (lane == (_WARPSIZE - 1)): temp[warpid] = temp[tid] hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) if (warpid == 0): warp_scan(tid, temp, True) hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) if (warpid > 0): warp_scan_res += temp[(warpid - 1)] hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) if (tid == (temp.size - 1)): if inclusive: temp[0] = warp_scan_res else: temp[0] = (warp_scan_res + data) hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) prefixsum = temp[0] hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE) return (warp_scan_res, prefixsum)
[ "@", "hsa", ".", "jit", "(", "device", "=", "True", ")", "def", "device_scan", "(", "tid", ",", "data", ",", "temp", ",", "inclusive", ")", ":", "lane", "=", "(", "tid", "&", "(", "_WARPSIZE", "-", "1", ")", ")", "warpid", "=", "(", "tid", ">>", "6", ")", "temp", "[", "tid", "]", "=", "data", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "warp_scan_res", "=", "warp_scan", "(", "tid", ",", "temp", ",", "inclusive", ")", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "if", "(", "lane", "==", "(", "_WARPSIZE", "-", "1", ")", ")", ":", "temp", "[", "warpid", "]", "=", "temp", "[", "tid", "]", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "if", "(", "warpid", "==", "0", ")", ":", "warp_scan", "(", "tid", ",", "temp", ",", "True", ")", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "if", "(", "warpid", ">", "0", ")", ":", "warp_scan_res", "+=", "temp", "[", "(", "warpid", "-", "1", ")", "]", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "if", "(", "tid", "==", "(", "temp", ".", "size", "-", "1", ")", ")", ":", "if", "inclusive", ":", "temp", "[", "0", "]", "=", "warp_scan_res", "else", ":", "temp", "[", "0", "]", "=", "(", "warp_scan_res", "+", "data", ")", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "prefixsum", "=", "temp", "[", "0", "]", "hsa", ".", "barrier", "(", "hsa", ".", "CLK_GLOBAL_MEM_FENCE", ")", "return", "(", "warp_scan_res", ",", "prefixsum", ")" ]
args tid: thread id data: scalar input for tid temp: shared memory for temporary work .
train
false
5,012
def runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw): for (args, kwargs) in suppressedWarnings: warnings.filterwarnings(*args, **kwargs) addedFilters = warnings.filters[:len(suppressedWarnings)] try: result = f(*a, **kw) except: exc_info = sys.exc_info() _resetWarningFilters(None, addedFilters) raise exc_info[0], exc_info[1], exc_info[2] else: if isinstance(result, defer.Deferred): result.addBoth(_resetWarningFilters, addedFilters) else: _resetWarningFilters(None, addedFilters) return result
[ "def", "runWithWarningsSuppressed", "(", "suppressedWarnings", ",", "f", ",", "*", "a", ",", "**", "kw", ")", ":", "for", "(", "args", ",", "kwargs", ")", "in", "suppressedWarnings", ":", "warnings", ".", "filterwarnings", "(", "*", "args", ",", "**", "kwargs", ")", "addedFilters", "=", "warnings", ".", "filters", "[", ":", "len", "(", "suppressedWarnings", ")", "]", "try", ":", "result", "=", "f", "(", "*", "a", ",", "**", "kw", ")", "except", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "_resetWarningFilters", "(", "None", ",", "addedFilters", ")", "raise", "exc_info", "[", "0", "]", ",", "exc_info", "[", "1", "]", ",", "exc_info", "[", "2", "]", "else", ":", "if", "isinstance", "(", "result", ",", "defer", ".", "Deferred", ")", ":", "result", ".", "addBoth", "(", "_resetWarningFilters", ",", "addedFilters", ")", "else", ":", "_resetWarningFilters", "(", "None", ",", "addedFilters", ")", "return", "result" ]
run c{f} .
train
false
5,013
def _val_or_dict(tk, options, *args): options = _format_optdict(options) res = tk.call(*(args + options)) if (len(options) % 2): return res return _dict_from_tcltuple(tk.splitlist(res))
[ "def", "_val_or_dict", "(", "tk", ",", "options", ",", "*", "args", ")", ":", "options", "=", "_format_optdict", "(", "options", ")", "res", "=", "tk", ".", "call", "(", "*", "(", "args", "+", "options", ")", ")", "if", "(", "len", "(", "options", ")", "%", "2", ")", ":", "return", "res", "return", "_dict_from_tcltuple", "(", "tk", ".", "splitlist", "(", "res", ")", ")" ]
format options then call func with args and options and return the appropriate result .
train
false
5,014
def add_or_update_given_trace_db(trace_db, action_executions=None, rules=None, trigger_instances=None): if (trace_db is None): raise ValueError('trace_db should be non-None.') if (not action_executions): action_executions = [] action_executions = [_to_trace_component_db(component=action_execution) for action_execution in action_executions] if (not rules): rules = [] rules = [_to_trace_component_db(component=rule) for rule in rules] if (not trigger_instances): trigger_instances = [] trigger_instances = [_to_trace_component_db(component=trigger_instance) for trigger_instance in trigger_instances] if trace_db.id: return Trace.push_components(trace_db, action_executions=action_executions, rules=rules, trigger_instances=trigger_instances) trace_db.action_executions = action_executions trace_db.rules = rules trace_db.trigger_instances = trigger_instances return Trace.add_or_update(trace_db)
[ "def", "add_or_update_given_trace_db", "(", "trace_db", ",", "action_executions", "=", "None", ",", "rules", "=", "None", ",", "trigger_instances", "=", "None", ")", ":", "if", "(", "trace_db", "is", "None", ")", ":", "raise", "ValueError", "(", "'trace_db should be non-None.'", ")", "if", "(", "not", "action_executions", ")", ":", "action_executions", "=", "[", "]", "action_executions", "=", "[", "_to_trace_component_db", "(", "component", "=", "action_execution", ")", "for", "action_execution", "in", "action_executions", "]", "if", "(", "not", "rules", ")", ":", "rules", "=", "[", "]", "rules", "=", "[", "_to_trace_component_db", "(", "component", "=", "rule", ")", "for", "rule", "in", "rules", "]", "if", "(", "not", "trigger_instances", ")", ":", "trigger_instances", "=", "[", "]", "trigger_instances", "=", "[", "_to_trace_component_db", "(", "component", "=", "trigger_instance", ")", "for", "trigger_instance", "in", "trigger_instances", "]", "if", "trace_db", ".", "id", ":", "return", "Trace", ".", "push_components", "(", "trace_db", ",", "action_executions", "=", "action_executions", ",", "rules", "=", "rules", ",", "trigger_instances", "=", "trigger_instances", ")", "trace_db", ".", "action_executions", "=", "action_executions", "trace_db", ".", "rules", "=", "rules", "trace_db", ".", "trigger_instances", "=", "trigger_instances", "return", "Trace", ".", "add_or_update", "(", "trace_db", ")" ]
will update an existing trace .
train
false
5,015
def wait_for_download_folder(): while (not cfg.download_dir.test_path()): logging.debug('Waiting for "incomplete" folder') time.sleep(2.0)
[ "def", "wait_for_download_folder", "(", ")", ":", "while", "(", "not", "cfg", ".", "download_dir", ".", "test_path", "(", ")", ")", ":", "logging", ".", "debug", "(", "'Waiting for \"incomplete\" folder'", ")", "time", ".", "sleep", "(", "2.0", ")" ]
wait for download folder to become available .
train
false
5,017
def add_checks(actions): if (FETCH in actions): actions.setdefault(CHECK_FETCH, [True]) if (EXTRACT in actions): actions.setdefault(CHECK_EXTRACT, [True])
[ "def", "add_checks", "(", "actions", ")", ":", "if", "(", "FETCH", "in", "actions", ")", ":", "actions", ".", "setdefault", "(", "CHECK_FETCH", ",", "[", "True", "]", ")", "if", "(", "EXTRACT", "in", "actions", ")", ":", "actions", ".", "setdefault", "(", "CHECK_EXTRACT", ",", "[", "True", "]", ")" ]
adds appropriate checks to a given dict of actions .
train
false
5,019
def _ros_sort(df, observations, censorship, warn=False): censored = sort_values(df[df[censorship]], observations, axis=0) uncensored = sort_values(df[(~ df[censorship])], observations, axis=0) if (censored[observations].max() > uncensored[observations].max()): censored = censored[(censored[observations] <= uncensored[observations].max())] if warn: msg = 'Dropping censored observations greater than the max uncensored observation.' warnings.warn(msg) return censored.append(uncensored)[[observations, censorship]].reset_index(drop=True)
[ "def", "_ros_sort", "(", "df", ",", "observations", ",", "censorship", ",", "warn", "=", "False", ")", ":", "censored", "=", "sort_values", "(", "df", "[", "df", "[", "censorship", "]", "]", ",", "observations", ",", "axis", "=", "0", ")", "uncensored", "=", "sort_values", "(", "df", "[", "(", "~", "df", "[", "censorship", "]", ")", "]", ",", "observations", ",", "axis", "=", "0", ")", "if", "(", "censored", "[", "observations", "]", ".", "max", "(", ")", ">", "uncensored", "[", "observations", "]", ".", "max", "(", ")", ")", ":", "censored", "=", "censored", "[", "(", "censored", "[", "observations", "]", "<=", "uncensored", "[", "observations", "]", ".", "max", "(", ")", ")", "]", "if", "warn", ":", "msg", "=", "'Dropping censored observations greater than the max uncensored observation.'", "warnings", ".", "warn", "(", "msg", ")", "return", "censored", ".", "append", "(", "uncensored", ")", "[", "[", "observations", ",", "censorship", "]", "]", ".", "reset_index", "(", "drop", "=", "True", ")" ]
this function prepares a dataframe for ros .
train
false
5,020
def as_string(value): if six.PY2: buffer_types = (buffer, memoryview) else: buffer_types = memoryview if (value is None): return u'' elif isinstance(value, buffer_types): return bytes(value).decode('utf-8', 'ignore') elif isinstance(value, bytes): return value.decode('utf-8', 'ignore') else: return six.text_type(value)
[ "def", "as_string", "(", "value", ")", ":", "if", "six", ".", "PY2", ":", "buffer_types", "=", "(", "buffer", ",", "memoryview", ")", "else", ":", "buffer_types", "=", "memoryview", "if", "(", "value", "is", "None", ")", ":", "return", "u''", "elif", "isinstance", "(", "value", ",", "buffer_types", ")", ":", "return", "bytes", "(", "value", ")", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "value", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", "else", ":", "return", "six", ".", "text_type", "(", "value", ")" ]
convert a value to a unicode object for matching with a query .
train
true
5,021
def _get_eth_link(vif, ifc_num): link_id = vif.get('devname') if (not link_id): link_id = ('interface%d' % ifc_num) if (vif.get('type') == 'ethernet'): nic_type = 'phy' else: nic_type = vif.get('type') link = {'id': link_id, 'vif_id': vif['id'], 'type': nic_type, 'mtu': vif['network']['meta'].get('mtu'), 'ethernet_mac_address': vif.get('address')} return link
[ "def", "_get_eth_link", "(", "vif", ",", "ifc_num", ")", ":", "link_id", "=", "vif", ".", "get", "(", "'devname'", ")", "if", "(", "not", "link_id", ")", ":", "link_id", "=", "(", "'interface%d'", "%", "ifc_num", ")", "if", "(", "vif", ".", "get", "(", "'type'", ")", "==", "'ethernet'", ")", ":", "nic_type", "=", "'phy'", "else", ":", "nic_type", "=", "vif", ".", "get", "(", "'type'", ")", "link", "=", "{", "'id'", ":", "link_id", ",", "'vif_id'", ":", "vif", "[", "'id'", "]", ",", "'type'", ":", "nic_type", ",", "'mtu'", ":", "vif", "[", "'network'", "]", "[", "'meta'", "]", ".", "get", "(", "'mtu'", ")", ",", "'ethernet_mac_address'", ":", "vif", ".", "get", "(", "'address'", ")", "}", "return", "link" ]
get a vif or physical nic representation .
train
false
5,022
def is_valid_hidden_service_address(entry): try: return bool(HS_ADDRESS_PATTERN.match(entry)) except TypeError: return False
[ "def", "is_valid_hidden_service_address", "(", "entry", ")", ":", "try", ":", "return", "bool", "(", "HS_ADDRESS_PATTERN", ".", "match", "(", "entry", ")", ")", "except", "TypeError", ":", "return", "False" ]
checks if a string is a valid format for being a hidden service address .
train
false
5,023
@receiver(job_was_approved) def on_job_was_approved(sender, job, approving_user, **kwargs): send_job_review_message(job, approving_user, 'jobs/email/job_was_approved_subject.txt', 'jobs/email/job_was_approved.txt')
[ "@", "receiver", "(", "job_was_approved", ")", "def", "on_job_was_approved", "(", "sender", ",", "job", ",", "approving_user", ",", "**", "kwargs", ")", ":", "send_job_review_message", "(", "job", ",", "approving_user", ",", "'jobs/email/job_was_approved_subject.txt'", ",", "'jobs/email/job_was_approved.txt'", ")" ]
handle approving job offer .
train
false
5,024
def find_gui_and_backend(gui=None, gui_select=None): import matplotlib if (gui and (gui != 'auto')): backend = backends[gui] if (gui == 'agg'): gui = None else: backend = matplotlib.rcParamsOrig['backend'] gui = backend2gui.get(backend, None) if (gui_select and (gui != gui_select)): gui = gui_select backend = backends[gui] return (gui, backend)
[ "def", "find_gui_and_backend", "(", "gui", "=", "None", ",", "gui_select", "=", "None", ")", ":", "import", "matplotlib", "if", "(", "gui", "and", "(", "gui", "!=", "'auto'", ")", ")", ":", "backend", "=", "backends", "[", "gui", "]", "if", "(", "gui", "==", "'agg'", ")", ":", "gui", "=", "None", "else", ":", "backend", "=", "matplotlib", ".", "rcParamsOrig", "[", "'backend'", "]", "gui", "=", "backend2gui", ".", "get", "(", "backend", ",", "None", ")", "if", "(", "gui_select", "and", "(", "gui", "!=", "gui_select", ")", ")", ":", "gui", "=", "gui_select", "backend", "=", "backends", "[", "gui", "]", "return", "(", "gui", ",", "backend", ")" ]
given a gui string return the gui and mpl backend .
train
false
5,026
@register.filter def all_ancestors_are_published(page, language): page = page.parent while page: if (not page.is_published(language)): return False page = page.parent return True
[ "@", "register", ".", "filter", "def", "all_ancestors_are_published", "(", "page", ",", "language", ")", ":", "page", "=", "page", ".", "parent", "while", "page", ":", "if", "(", "not", "page", ".", "is_published", "(", "language", ")", ")", ":", "return", "False", "page", "=", "page", ".", "parent", "return", "True" ]
returns false if any of the ancestors of page are unpublished .
train
false
5,027
def generate_adjlist(G, delimiter=' '): directed = G.is_directed() seen = set() for (s, nbrs) in G.adjacency(): line = (make_str(s) + delimiter) for (t, data) in nbrs.items(): if ((not directed) and (t in seen)): continue if G.is_multigraph(): for d in data.values(): line += (make_str(t) + delimiter) else: line += (make_str(t) + delimiter) if (not directed): seen.add(s) (yield line[:(- len(delimiter))])
[ "def", "generate_adjlist", "(", "G", ",", "delimiter", "=", "' '", ")", ":", "directed", "=", "G", ".", "is_directed", "(", ")", "seen", "=", "set", "(", ")", "for", "(", "s", ",", "nbrs", ")", "in", "G", ".", "adjacency", "(", ")", ":", "line", "=", "(", "make_str", "(", "s", ")", "+", "delimiter", ")", "for", "(", "t", ",", "data", ")", "in", "nbrs", ".", "items", "(", ")", ":", "if", "(", "(", "not", "directed", ")", "and", "(", "t", "in", "seen", ")", ")", ":", "continue", "if", "G", ".", "is_multigraph", "(", ")", ":", "for", "d", "in", "data", ".", "values", "(", ")", ":", "line", "+=", "(", "make_str", "(", "t", ")", "+", "delimiter", ")", "else", ":", "line", "+=", "(", "make_str", "(", "t", ")", "+", "delimiter", ")", "if", "(", "not", "directed", ")", ":", "seen", ".", "add", "(", "s", ")", "(", "yield", "line", "[", ":", "(", "-", "len", "(", "delimiter", ")", ")", "]", ")" ]
generate a single line of the graph g in adjacency list format .
train
false
5,028
def grains_refresh(): DETAILS['grains_cache'] = {} return grains()
[ "def", "grains_refresh", "(", ")", ":", "DETAILS", "[", "'grains_cache'", "]", "=", "{", "}", "return", "grains", "(", ")" ]
refresh the grains from the proxy device .
train
false
5,029
def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None): noise = evoked.copy() noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state, evoked.data.shape[1])[0] return noise
[ "def", "simulate_noise_evoked", "(", "evoked", ",", "cov", ",", "iir_filter", "=", "None", ",", "random_state", "=", "None", ")", ":", "noise", "=", "evoked", ".", "copy", "(", ")", "noise", ".", "data", "=", "_generate_noise", "(", "evoked", ".", "info", ",", "cov", ",", "iir_filter", ",", "random_state", ",", "evoked", ".", "data", ".", "shape", "[", "1", "]", ")", "[", "0", "]", "return", "noise" ]
create noise as a multivariate gaussian .
train
false
5,030
def username(): global _username if (not _username): uid = os.getuid() _username = (pwd_from_uid(uid)[0] or ('user%d' % uid)) return _username
[ "def", "username", "(", ")", ":", "global", "_username", "if", "(", "not", "_username", ")", ":", "uid", "=", "os", ".", "getuid", "(", ")", "_username", "=", "(", "pwd_from_uid", "(", "uid", ")", "[", "0", "]", "or", "(", "'user%d'", "%", "uid", ")", ")", "return", "_username" ]
check that the username leads to an existing player .
train
false
5,031
def normalize_slice(s): (start, stop, step) = (s.start, s.stop, s.step) if (start is None): start = 0 if (step is None): step = 1 if ((start < 0) or (step < 0) or ((stop is not None) and (stop < 0))): raise NotImplementedError() return slice(start, stop, step)
[ "def", "normalize_slice", "(", "s", ")", ":", "(", "start", ",", "stop", ",", "step", ")", "=", "(", "s", ".", "start", ",", "s", ".", "stop", ",", "s", ".", "step", ")", "if", "(", "start", "is", "None", ")", ":", "start", "=", "0", "if", "(", "step", "is", "None", ")", ":", "step", "=", "1", "if", "(", "(", "start", "<", "0", ")", "or", "(", "step", "<", "0", ")", "or", "(", "(", "stop", "is", "not", "None", ")", "and", "(", "stop", "<", "0", ")", ")", ")", ":", "raise", "NotImplementedError", "(", ")", "return", "slice", "(", "start", ",", "stop", ",", "step", ")" ]
replace nones in slices with integers .
train
false
5,032
def get_attrib_file(path, size): attribs = [] path = os.path.join(path, ATTRIB_FILE) try: f = open(path, 'r') except: return [None for unused in xrange(size)] for unused in xrange(size): line = f.readline().strip('\r\n ') if line: if (line.lower() == 'none'): line = None try: line = int(line) except: pass attribs.append(line) else: attribs.append(None) f.close() return attribs
[ "def", "get_attrib_file", "(", "path", ",", "size", ")", ":", "attribs", "=", "[", "]", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "ATTRIB_FILE", ")", "try", ":", "f", "=", "open", "(", "path", ",", "'r'", ")", "except", ":", "return", "[", "None", "for", "unused", "in", "xrange", "(", "size", ")", "]", "for", "unused", "in", "xrange", "(", "size", ")", ":", "line", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", "'\\r\\n '", ")", "if", "line", ":", "if", "(", "line", ".", "lower", "(", ")", "==", "'none'", ")", ":", "line", "=", "None", "try", ":", "line", "=", "int", "(", "line", ")", "except", ":", "pass", "attribs", ".", "append", "(", "line", ")", "else", ":", "attribs", ".", "append", "(", "None", ")", "f", ".", "close", "(", ")", "return", "attribs" ]
read jobs attributes from file .
train
false
5,033
def regexp2pattern(string): if (type(string) is REGEXP_T): flags = string.flags string = string.pattern if string.startswith('^'): string = string[1:] else: string = ('.*' + string) if string.endswith('$'): string = string[:(-1)] else: string += '.*' return (string, flags) else: return (re.escape(string), 0)
[ "def", "regexp2pattern", "(", "string", ")", ":", "if", "(", "type", "(", "string", ")", "is", "REGEXP_T", ")", ":", "flags", "=", "string", ".", "flags", "string", "=", "string", ".", "pattern", "if", "string", ".", "startswith", "(", "'^'", ")", ":", "string", "=", "string", "[", "1", ":", "]", "else", ":", "string", "=", "(", "'.*'", "+", "string", ")", "if", "string", ".", "endswith", "(", "'$'", ")", ":", "string", "=", "string", "[", ":", "(", "-", "1", ")", "]", "else", ":", "string", "+=", "'.*'", "return", "(", "string", ",", "flags", ")", "else", ":", "return", "(", "re", ".", "escape", "(", "string", ")", ",", "0", ")" ]
this function takes a regexp or a string and returns a pattern and some flags .
train
false
5,034
def relevant_issues(issues, after): logging.info('finding relevant issues after {}...'.format(after)) seen = set() for issue in issues: if (relevent_issue(issue, after) and (issue['title'] not in seen)): seen.add(issue['title']) (yield issue)
[ "def", "relevant_issues", "(", "issues", ",", "after", ")", ":", "logging", ".", "info", "(", "'finding relevant issues after {}...'", ".", "format", "(", "after", ")", ")", "seen", "=", "set", "(", ")", "for", "issue", "in", "issues", ":", "if", "(", "relevent_issue", "(", "issue", ",", "after", ")", "and", "(", "issue", "[", "'title'", "]", "not", "in", "seen", ")", ")", ":", "seen", ".", "add", "(", "issue", "[", "'title'", "]", ")", "(", "yield", "issue", ")" ]
yields relevant closed issues given a list of issues .
train
true
5,036
def write_index(group): return (u'%s_%s' % (settings.ES_INDEX_PREFIX, settings.ES_WRITE_INDEXES[group]))
[ "def", "write_index", "(", "group", ")", ":", "return", "(", "u'%s_%s'", "%", "(", "settings", ".", "ES_INDEX_PREFIX", ",", "settings", ".", "ES_WRITE_INDEXES", "[", "group", "]", ")", ")" ]
write an index file .
train
false
5,037
def determine_disk_image_type(image_meta): if (not image_meta.obj_attr_is_set('disk_format')): return None disk_format_map = {'ami': ImageType.DISK, 'aki': ImageType.KERNEL, 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD, 'iso': ImageType.DISK_ISO} try: image_type = disk_format_map[image_meta.disk_format] except KeyError: raise exception.InvalidDiskFormat(disk_format=image_meta.disk_format) LOG.debug('Detected %(type)s format for image %(image)s', {'type': ImageType.to_string(image_type), 'image': image_meta}) return image_type
[ "def", "determine_disk_image_type", "(", "image_meta", ")", ":", "if", "(", "not", "image_meta", ".", "obj_attr_is_set", "(", "'disk_format'", ")", ")", ":", "return", "None", "disk_format_map", "=", "{", "'ami'", ":", "ImageType", ".", "DISK", ",", "'aki'", ":", "ImageType", ".", "KERNEL", ",", "'ari'", ":", "ImageType", ".", "RAMDISK", ",", "'raw'", ":", "ImageType", ".", "DISK_RAW", ",", "'vhd'", ":", "ImageType", ".", "DISK_VHD", ",", "'iso'", ":", "ImageType", ".", "DISK_ISO", "}", "try", ":", "image_type", "=", "disk_format_map", "[", "image_meta", ".", "disk_format", "]", "except", "KeyError", ":", "raise", "exception", ".", "InvalidDiskFormat", "(", "disk_format", "=", "image_meta", ".", "disk_format", ")", "LOG", ".", "debug", "(", "'Detected %(type)s format for image %(image)s'", ",", "{", "'type'", ":", "ImageType", ".", "to_string", "(", "image_type", ")", ",", "'image'", ":", "image_meta", "}", ")", "return", "image_type" ]
disk image types are used to determine where the kernel will reside within an image .
train
false
5,041
def test_tie_situation(): clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)], voting='hard') assert_equal(clf1.fit(X, y).predict(X)[73], 2) assert_equal(clf2.fit(X, y).predict(X)[73], 1) assert_equal(eclf.fit(X, y).predict(X)[73], 1)
[ "def", "test_tie_situation", "(", ")", ":", "clf1", "=", "LogisticRegression", "(", "random_state", "=", "123", ")", "clf2", "=", "RandomForestClassifier", "(", "random_state", "=", "123", ")", "eclf", "=", "VotingClassifier", "(", "estimators", "=", "[", "(", "'lr'", ",", "clf1", ")", ",", "(", "'rf'", ",", "clf2", ")", "]", ",", "voting", "=", "'hard'", ")", "assert_equal", "(", "clf1", ".", "fit", "(", "X", ",", "y", ")", ".", "predict", "(", "X", ")", "[", "73", "]", ",", "2", ")", "assert_equal", "(", "clf2", ".", "fit", "(", "X", ",", "y", ")", ".", "predict", "(", "X", ")", "[", "73", "]", ",", "1", ")", "assert_equal", "(", "eclf", ".", "fit", "(", "X", ",", "y", ")", ".", "predict", "(", "X", ")", "[", "73", "]", ",", "1", ")" ]
check voting classifier selects smaller class label in tie situation .
train
false
5,042
def merge_pr(num): pr = gh_api.get_pull_request(gh_project, num) repo = pr['head']['repo']['clone_url'] branch = pr['head']['ref'] mergeable = merge_branch(repo=repo, branch=branch) if (not mergeable): cmd = ((('git pull ' + repo) + ' ') + branch) not_merged[str(num)] = cmd print '==============================================================================' print 'Something went wrong merging this branch, you can try it manually by runngin :' print cmd print '=============================================================================='
[ "def", "merge_pr", "(", "num", ")", ":", "pr", "=", "gh_api", ".", "get_pull_request", "(", "gh_project", ",", "num", ")", "repo", "=", "pr", "[", "'head'", "]", "[", "'repo'", "]", "[", "'clone_url'", "]", "branch", "=", "pr", "[", "'head'", "]", "[", "'ref'", "]", "mergeable", "=", "merge_branch", "(", "repo", "=", "repo", ",", "branch", "=", "branch", ")", "if", "(", "not", "mergeable", ")", ":", "cmd", "=", "(", "(", "(", "'git pull '", "+", "repo", ")", "+", "' '", ")", "+", "branch", ")", "not_merged", "[", "str", "(", "num", ")", "]", "=", "cmd", "print", "'=============================================================================='", "print", "'Something went wrong merging this branch, you can try it manually by runngin :'", "print", "cmd", "print", "'=============================================================================='" ]
try to merge the branch of pr num into current branch .
train
false
5,043
def verify_challenge(uri): while True: try: resp = urlopen(uri) challenge_status = json.loads(resp.read().decode('utf8')) except IOError as e: raise ValueError('Error checking challenge: {0} {1}'.format(e.code, json.loads(e.read().decode('utf8')))) if (challenge_status['status'] == 'pending'): time.sleep(2) elif (challenge_status['status'] == 'valid'): LOGGER.info('Domain verified!') break else: raise ValueError('Domain challenge did not pass: {0}'.format(challenge_status))
[ "def", "verify_challenge", "(", "uri", ")", ":", "while", "True", ":", "try", ":", "resp", "=", "urlopen", "(", "uri", ")", "challenge_status", "=", "json", ".", "loads", "(", "resp", ".", "read", "(", ")", ".", "decode", "(", "'utf8'", ")", ")", "except", "IOError", "as", "e", ":", "raise", "ValueError", "(", "'Error checking challenge: {0} {1}'", ".", "format", "(", "e", ".", "code", ",", "json", ".", "loads", "(", "e", ".", "read", "(", ")", ".", "decode", "(", "'utf8'", ")", ")", ")", ")", "if", "(", "challenge_status", "[", "'status'", "]", "==", "'pending'", ")", ":", "time", ".", "sleep", "(", "2", ")", "elif", "(", "challenge_status", "[", "'status'", "]", "==", "'valid'", ")", ":", "LOGGER", ".", "info", "(", "'Domain verified!'", ")", "break", "else", ":", "raise", "ValueError", "(", "'Domain challenge did not pass: {0}'", ".", "format", "(", "challenge_status", ")", ")" ]
loop until our challenge is verified .
train
true
5,044
def _find_option_with_arg(argv, short_opts=None, long_opts=None): for (i, arg) in enumerate(argv): if arg.startswith(u'-'): if (long_opts and arg.startswith(u'--')): (name, sep, val) = arg.partition(u'=') if (name in long_opts): return (val if sep else argv[(i + 1)]) if (short_opts and (arg in short_opts)): return argv[(i + 1)] raise KeyError(u'|'.join((short_opts or ([] + long_opts) or [])))
[ "def", "_find_option_with_arg", "(", "argv", ",", "short_opts", "=", "None", ",", "long_opts", "=", "None", ")", ":", "for", "(", "i", ",", "arg", ")", "in", "enumerate", "(", "argv", ")", ":", "if", "arg", ".", "startswith", "(", "u'-'", ")", ":", "if", "(", "long_opts", "and", "arg", ".", "startswith", "(", "u'--'", ")", ")", ":", "(", "name", ",", "sep", ",", "val", ")", "=", "arg", ".", "partition", "(", "u'='", ")", "if", "(", "name", "in", "long_opts", ")", ":", "return", "(", "val", "if", "sep", "else", "argv", "[", "(", "i", "+", "1", ")", "]", ")", "if", "(", "short_opts", "and", "(", "arg", "in", "short_opts", ")", ")", ":", "return", "argv", "[", "(", "i", "+", "1", ")", "]", "raise", "KeyError", "(", "u'|'", ".", "join", "(", "(", "short_opts", "or", "(", "[", "]", "+", "long_opts", ")", "or", "[", "]", ")", ")", ")" ]
search argv for options specifying short and longopt alternatives .
train
false
5,045
def split_filename(fname): special_extensions = [u'.nii.gz', u'.tar.gz'] pth = os.path.dirname(fname) fname = os.path.basename(fname) ext = None for special_ext in special_extensions: ext_len = len(special_ext) if ((len(fname) > ext_len) and (fname[(- ext_len):].lower() == special_ext.lower())): ext = fname[(- ext_len):] fname = fname[:(- ext_len)] break if (not ext): (fname, ext) = os.path.splitext(fname) return (pth, fname, ext)
[ "def", "split_filename", "(", "fname", ")", ":", "special_extensions", "=", "[", "u'.nii.gz'", ",", "u'.tar.gz'", "]", "pth", "=", "os", ".", "path", ".", "dirname", "(", "fname", ")", "fname", "=", "os", ".", "path", ".", "basename", "(", "fname", ")", "ext", "=", "None", "for", "special_ext", "in", "special_extensions", ":", "ext_len", "=", "len", "(", "special_ext", ")", "if", "(", "(", "len", "(", "fname", ")", ">", "ext_len", ")", "and", "(", "fname", "[", "(", "-", "ext_len", ")", ":", "]", ".", "lower", "(", ")", "==", "special_ext", ".", "lower", "(", ")", ")", ")", ":", "ext", "=", "fname", "[", "(", "-", "ext_len", ")", ":", "]", "fname", "=", "fname", "[", ":", "(", "-", "ext_len", ")", "]", "break", "if", "(", "not", "ext", ")", ":", "(", "fname", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "return", "(", "pth", ",", "fname", ",", "ext", ")" ]
extract name .
train
false
5,046
def poll_until(predicate, steps, sleep=None): if (sleep is None): sleep = time.sleep for step in steps: result = predicate() if result: return result sleep(step) result = predicate() if result: return result raise LoopExceeded(predicate, result)
[ "def", "poll_until", "(", "predicate", ",", "steps", ",", "sleep", "=", "None", ")", ":", "if", "(", "sleep", "is", "None", ")", ":", "sleep", "=", "time", ".", "sleep", "for", "step", "in", "steps", ":", "result", "=", "predicate", "(", ")", "if", "result", ":", "return", "result", "sleep", "(", "step", ")", "result", "=", "predicate", "(", ")", "if", "result", ":", "return", "result", "raise", "LoopExceeded", "(", "predicate", ",", "result", ")" ]
perform steps until a non-false result is returned .
train
false
5,047
@commands(u'suggest') def suggest(bot, trigger): if (not trigger.group(2)): return bot.reply(u'No query term.') query = trigger.group(2) uri = u'http://websitedev.de/temp-bin/suggest.pl?q=' answer = web.get((uri + query.replace(u'+', u'%2B'))) if answer: bot.say(answer) else: bot.reply(u'Sorry, no result.')
[ "@", "commands", "(", "u'suggest'", ")", "def", "suggest", "(", "bot", ",", "trigger", ")", ":", "if", "(", "not", "trigger", ".", "group", "(", "2", ")", ")", ":", "return", "bot", ".", "reply", "(", "u'No query term.'", ")", "query", "=", "trigger", ".", "group", "(", "2", ")", "uri", "=", "u'http://websitedev.de/temp-bin/suggest.pl?q='", "answer", "=", "web", ".", "get", "(", "(", "uri", "+", "query", ".", "replace", "(", "u'+'", ",", "u'%2B'", ")", ")", ")", "if", "answer", ":", "bot", ".", "say", "(", "answer", ")", "else", ":", "bot", ".", "reply", "(", "u'Sorry, no result.'", ")" ]
processes translation suggestions and stores them in the database .
train
false
5,049
def rs_swap(a, b): d = {} for rsa in a: d[rsa] = [rsb for rsb in b if (rsa.symbol == rsb.symbol)][0] return d
[ "def", "rs_swap", "(", "a", ",", "b", ")", ":", "d", "=", "{", "}", "for", "rsa", "in", "a", ":", "d", "[", "rsa", "]", "=", "[", "rsb", "for", "rsb", "in", "b", "if", "(", "rsa", ".", "symbol", "==", "rsb", ".", "symbol", ")", "]", "[", "0", "]", "return", "d" ]
build a dictionary to swap randomsymbols based on their underlying symbol .
train
false
5,050
def _check_for_int(x): try: y = int(x) except (OverflowError, ValueError): pass else: if ((x == x) and (y == x)): return y return x
[ "def", "_check_for_int", "(", "x", ")", ":", "try", ":", "y", "=", "int", "(", "x", ")", "except", "(", "OverflowError", ",", "ValueError", ")", ":", "pass", "else", ":", "if", "(", "(", "x", "==", "x", ")", "and", "(", "y", "==", "x", ")", ")", ":", "return", "y", "return", "x" ]
this is a compatibility function that takes a c{float} and converts it to an c{int} if the values are equal .
train
true
5,052
def unzipIter(filename, directory='.', overwrite=0): zf = zipfile.ZipFile(filename, 'r') names = zf.namelist() if (not os.path.exists(directory)): os.makedirs(directory) remaining = len(zf.namelist()) for entry in names: remaining -= 1 isdir = (zf.getinfo(entry).external_attr & DIR_BIT) f = os.path.join(directory, entry) if isdir: if (not os.path.exists(f)): os.makedirs(f) else: fdir = os.path.split(f)[0] if (not os.path.exists(fdir)): os.makedirs(fdir) if (overwrite or (not os.path.exists(f))): outfile = file(f, 'wb') outfile.write(zf.read(entry)) outfile.close() (yield remaining)
[ "def", "unzipIter", "(", "filename", ",", "directory", "=", "'.'", ",", "overwrite", "=", "0", ")", ":", "zf", "=", "zipfile", ".", "ZipFile", "(", "filename", ",", "'r'", ")", "names", "=", "zf", ".", "namelist", "(", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "remaining", "=", "len", "(", "zf", ".", "namelist", "(", ")", ")", "for", "entry", "in", "names", ":", "remaining", "-=", "1", "isdir", "=", "(", "zf", ".", "getinfo", "(", "entry", ")", ".", "external_attr", "&", "DIR_BIT", ")", "f", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "entry", ")", "if", "isdir", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "f", ")", ")", ":", "os", ".", "makedirs", "(", "f", ")", "else", ":", "fdir", "=", "os", ".", "path", ".", "split", "(", "f", ")", "[", "0", "]", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "fdir", ")", ")", ":", "os", ".", "makedirs", "(", "fdir", ")", "if", "(", "overwrite", "or", "(", "not", "os", ".", "path", ".", "exists", "(", "f", ")", ")", ")", ":", "outfile", "=", "file", "(", "f", ",", "'wb'", ")", "outfile", ".", "write", "(", "zf", ".", "read", "(", "entry", ")", ")", "outfile", ".", "close", "(", ")", "(", "yield", "remaining", ")" ]
return a generator for the zipfile .
train
false
5,053
def package_dirname(package): if isinstance(package, str): package = __import__(package, fromlist=['']) filename = package.__file__ dirname = os.path.dirname(filename) return dirname
[ "def", "package_dirname", "(", "package", ")", ":", "if", "isinstance", "(", "package", ",", "str", ")", ":", "package", "=", "__import__", "(", "package", ",", "fromlist", "=", "[", "''", "]", ")", "filename", "=", "package", ".", "__file__", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "return", "dirname" ]
return the directory path where package is located .
train
false
5,054
def random_string_from_module(module): return random.choice(string_from_module(module))
[ "def", "random_string_from_module", "(", "module", ")", ":", "return", "random", ".", "choice", "(", "string_from_module", "(", "module", ")", ")" ]
returns a random global string from a module .
train
false
5,055
def _build_bounding_box_lookup(bounding_box_file): lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines() images_to_bboxes = {} num_bbox = 0 num_image = 0 for l in lines: if l: parts = l.split(',') assert (len(parts) == 5), ('Failed to parse: %s' % l) filename = parts[0] xmin = float(parts[1]) ymin = float(parts[2]) xmax = float(parts[3]) ymax = float(parts[4]) box = [xmin, ymin, xmax, ymax] if (filename not in images_to_bboxes): images_to_bboxes[filename] = [] num_image += 1 images_to_bboxes[filename].append(box) num_bbox += 1 print(('Successfully read %d bounding boxes across %d images.' % (num_bbox, num_image))) return images_to_bboxes
[ "def", "_build_bounding_box_lookup", "(", "bounding_box_file", ")", ":", "lines", "=", "tf", ".", "gfile", ".", "FastGFile", "(", "bounding_box_file", ",", "'r'", ")", ".", "readlines", "(", ")", "images_to_bboxes", "=", "{", "}", "num_bbox", "=", "0", "num_image", "=", "0", "for", "l", "in", "lines", ":", "if", "l", ":", "parts", "=", "l", ".", "split", "(", "','", ")", "assert", "(", "len", "(", "parts", ")", "==", "5", ")", ",", "(", "'Failed to parse: %s'", "%", "l", ")", "filename", "=", "parts", "[", "0", "]", "xmin", "=", "float", "(", "parts", "[", "1", "]", ")", "ymin", "=", "float", "(", "parts", "[", "2", "]", ")", "xmax", "=", "float", "(", "parts", "[", "3", "]", ")", "ymax", "=", "float", "(", "parts", "[", "4", "]", ")", "box", "=", "[", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", "]", "if", "(", "filename", "not", "in", "images_to_bboxes", ")", ":", "images_to_bboxes", "[", "filename", "]", "=", "[", "]", "num_image", "+=", "1", "images_to_bboxes", "[", "filename", "]", ".", "append", "(", "box", ")", "num_bbox", "+=", "1", "print", "(", "(", "'Successfully read %d bounding boxes across %d images.'", "%", "(", "num_bbox", ",", "num_image", ")", ")", ")", "return", "images_to_bboxes" ]
build a lookup from image file to bounding boxes .
train
true
5,056
def convert_PostalAddressProperty(model, prop, kwargs): return get_TextField(kwargs)
[ "def", "convert_PostalAddressProperty", "(", "model", ",", "prop", ",", "kwargs", ")", ":", "return", "get_TextField", "(", "kwargs", ")" ]
returns a form field for a db .
train
false
5,057
def _flatten_artist_credit(credit): artist_parts = [] artist_sort_parts = [] artist_credit_parts = [] for el in credit: if isinstance(el, basestring): artist_parts.append(el) artist_credit_parts.append(el) artist_sort_parts.append(el) else: alias = _preferred_alias(el['artist'].get('alias-list', ())) if alias: cur_artist_name = alias['alias'] else: cur_artist_name = el['artist']['name'] artist_parts.append(cur_artist_name) if alias: artist_sort_parts.append(alias['sort-name']) elif ('sort-name' in el['artist']): artist_sort_parts.append(el['artist']['sort-name']) else: artist_sort_parts.append(cur_artist_name) if ('name' in el): artist_credit_parts.append(el['name']) else: artist_credit_parts.append(cur_artist_name) return (''.join(artist_parts), ''.join(artist_sort_parts), ''.join(artist_credit_parts))
[ "def", "_flatten_artist_credit", "(", "credit", ")", ":", "artist_parts", "=", "[", "]", "artist_sort_parts", "=", "[", "]", "artist_credit_parts", "=", "[", "]", "for", "el", "in", "credit", ":", "if", "isinstance", "(", "el", ",", "basestring", ")", ":", "artist_parts", ".", "append", "(", "el", ")", "artist_credit_parts", ".", "append", "(", "el", ")", "artist_sort_parts", ".", "append", "(", "el", ")", "else", ":", "alias", "=", "_preferred_alias", "(", "el", "[", "'artist'", "]", ".", "get", "(", "'alias-list'", ",", "(", ")", ")", ")", "if", "alias", ":", "cur_artist_name", "=", "alias", "[", "'alias'", "]", "else", ":", "cur_artist_name", "=", "el", "[", "'artist'", "]", "[", "'name'", "]", "artist_parts", ".", "append", "(", "cur_artist_name", ")", "if", "alias", ":", "artist_sort_parts", ".", "append", "(", "alias", "[", "'sort-name'", "]", ")", "elif", "(", "'sort-name'", "in", "el", "[", "'artist'", "]", ")", ":", "artist_sort_parts", ".", "append", "(", "el", "[", "'artist'", "]", "[", "'sort-name'", "]", ")", "else", ":", "artist_sort_parts", ".", "append", "(", "cur_artist_name", ")", "if", "(", "'name'", "in", "el", ")", ":", "artist_credit_parts", ".", "append", "(", "el", "[", "'name'", "]", ")", "else", ":", "artist_credit_parts", ".", "append", "(", "cur_artist_name", ")", "return", "(", "''", ".", "join", "(", "artist_parts", ")", ",", "''", ".", "join", "(", "artist_sort_parts", ")", ",", "''", ".", "join", "(", "artist_credit_parts", ")", ")" ]
given a list representing an artist-credit block .
train
false
5,058
def adjust_string(original, length): if (not (length > 0)): raise AssertionError resulting_string = original if (len(resulting_string) > length): unit_db = (resulting_string.endswith('dB') and (resulting_string.find('.') != (-1))) if ((len(resulting_string.strip()) > length) and unit_db): resulting_string = resulting_string[:(-2)] if (len(resulting_string) > length): for char in (' ', '_', 'i', 'o', 'u', 'e', 'a'): offset = (0 if (char == ' ') else 1) while ((len(resulting_string) > length) and (resulting_string.rfind(char, offset) > 0)): char_pos = resulting_string.rfind(char, offset) resulting_string = (resulting_string[:char_pos] + resulting_string[(char_pos + 1):]) resulting_string = resulting_string[:length] resulting_string = ((len(resulting_string) < length) and resulting_string.ljust(length)) return resulting_string
[ "def", "adjust_string", "(", "original", ",", "length", ")", ":", "if", "(", "not", "(", "length", ">", "0", ")", ")", ":", "raise", "AssertionError", "resulting_string", "=", "original", "if", "(", "len", "(", "resulting_string", ")", ">", "length", ")", ":", "unit_db", "=", "(", "resulting_string", ".", "endswith", "(", "'dB'", ")", "and", "(", "resulting_string", ".", "find", "(", "'.'", ")", "!=", "(", "-", "1", ")", ")", ")", "if", "(", "(", "len", "(", "resulting_string", ".", "strip", "(", ")", ")", ">", "length", ")", "and", "unit_db", ")", ":", "resulting_string", "=", "resulting_string", "[", ":", "(", "-", "2", ")", "]", "if", "(", "len", "(", "resulting_string", ")", ">", "length", ")", ":", "for", "char", "in", "(", "' '", ",", "'_'", ",", "'i'", ",", "'o'", ",", "'u'", ",", "'e'", ",", "'a'", ")", ":", "offset", "=", "(", "0", "if", "(", "char", "==", "' '", ")", "else", "1", ")", "while", "(", "(", "len", "(", "resulting_string", ")", ">", "length", ")", "and", "(", "resulting_string", ".", "rfind", "(", "char", ",", "offset", ")", ">", "0", ")", ")", ":", "char_pos", "=", "resulting_string", ".", "rfind", "(", "char", ",", "offset", ")", "resulting_string", "=", "(", "resulting_string", "[", ":", "char_pos", "]", "+", "resulting_string", "[", "(", "char_pos", "+", "1", ")", ":", "]", ")", "resulting_string", "=", "resulting_string", "[", ":", "length", "]", "resulting_string", "=", "(", "(", "len", "(", "resulting_string", ")", "<", "length", ")", "and", "resulting_string", ".", "ljust", "(", "length", ")", ")", "return", "resulting_string" ]
brings the string to the given length by either removing characters or adding spaces .
train
false
5,059
def testAgent(path, agent, port=DEFAULT_PORT): agent = adaptAgentObject(BenchmarkingAgent(agent)) experiment = RLCExperiment(path, str(port)) experiment.start() clientAgent = ClientAgent(agent) clientAgent.connect(DEFAULT_HOST, port, CLIENT_TIMEOUT) logging.info('Agent connected') clientAgent.runAgentEventLoop() clientAgent.close() logging.info('Agent finished') experiment.stop() return agent.agent.benchmark
[ "def", "testAgent", "(", "path", ",", "agent", ",", "port", "=", "DEFAULT_PORT", ")", ":", "agent", "=", "adaptAgentObject", "(", "BenchmarkingAgent", "(", "agent", ")", ")", "experiment", "=", "RLCExperiment", "(", "path", ",", "str", "(", "port", ")", ")", "experiment", ".", "start", "(", ")", "clientAgent", "=", "ClientAgent", "(", "agent", ")", "clientAgent", ".", "connect", "(", "DEFAULT_HOST", ",", "port", ",", "CLIENT_TIMEOUT", ")", "logging", ".", "info", "(", "'Agent connected'", ")", "clientAgent", ".", "runAgentEventLoop", "(", ")", "clientAgent", ".", "close", "(", ")", "logging", ".", "info", "(", "'Agent finished'", ")", "experiment", ".", "stop", "(", ")", "return", "agent", ".", "agent", ".", "benchmark" ]
test an agent once on a rlcompetition experiment .
train
false
5,060
@np.deprecate(new_name='expm') def expm3(A, q=20): A = _asarray_square(A) n = A.shape[0] t = A.dtype.char if (t not in ['f', 'F', 'd', 'D']): A = A.astype('d') t = 'd' eA = np.identity(n, dtype=t) trm = np.identity(n, dtype=t) castfunc = cast[t] for k in range(1, q): trm[:] = (trm.dot(A) / castfunc(k)) eA += trm return eA
[ "@", "np", ".", "deprecate", "(", "new_name", "=", "'expm'", ")", "def", "expm3", "(", "A", ",", "q", "=", "20", ")", ":", "A", "=", "_asarray_square", "(", "A", ")", "n", "=", "A", ".", "shape", "[", "0", "]", "t", "=", "A", ".", "dtype", ".", "char", "if", "(", "t", "not", "in", "[", "'f'", ",", "'F'", ",", "'d'", ",", "'D'", "]", ")", ":", "A", "=", "A", ".", "astype", "(", "'d'", ")", "t", "=", "'d'", "eA", "=", "np", ".", "identity", "(", "n", ",", "dtype", "=", "t", ")", "trm", "=", "np", ".", "identity", "(", "n", ",", "dtype", "=", "t", ")", "castfunc", "=", "cast", "[", "t", "]", "for", "k", "in", "range", "(", "1", ",", "q", ")", ":", "trm", "[", ":", "]", "=", "(", "trm", ".", "dot", "(", "A", ")", "/", "castfunc", "(", "k", ")", ")", "eA", "+=", "trm", "return", "eA" ]
compute the matrix exponential using taylor series .
train
false
5,061
@login_required @require_POST def add_member(request, group_slug): prof = get_object_or_404(GroupProfile, slug=group_slug) if (not _user_can_edit(request.user, prof)): raise PermissionDenied form = AddUserForm(request.POST) if form.is_valid(): for user in form.cleaned_data['users']: user.groups.add(prof.group) msg = _('{users} added to the group successfully!').format(users=request.POST.get('users')) messages.add_message(request, messages.SUCCESS, msg) return HttpResponseRedirect(prof.get_absolute_url()) msg = _('There were errors adding members to the group, see below.') messages.add_message(request, messages.ERROR, msg) return profile(request, group_slug, member_form=form)
[ "@", "login_required", "@", "require_POST", "def", "add_member", "(", "request", ",", "group_slug", ")", ":", "prof", "=", "get_object_or_404", "(", "GroupProfile", ",", "slug", "=", "group_slug", ")", "if", "(", "not", "_user_can_edit", "(", "request", ".", "user", ",", "prof", ")", ")", ":", "raise", "PermissionDenied", "form", "=", "AddUserForm", "(", "request", ".", "POST", ")", "if", "form", ".", "is_valid", "(", ")", ":", "for", "user", "in", "form", ".", "cleaned_data", "[", "'users'", "]", ":", "user", ".", "groups", ".", "add", "(", "prof", ".", "group", ")", "msg", "=", "_", "(", "'{users} added to the group successfully!'", ")", ".", "format", "(", "users", "=", "request", ".", "POST", ".", "get", "(", "'users'", ")", ")", "messages", ".", "add_message", "(", "request", ",", "messages", ".", "SUCCESS", ",", "msg", ")", "return", "HttpResponseRedirect", "(", "prof", ".", "get_absolute_url", "(", ")", ")", "msg", "=", "_", "(", "'There were errors adding members to the group, see below.'", ")", "messages", ".", "add_message", "(", "request", ",", "messages", ".", "ERROR", ",", "msg", ")", "return", "profile", "(", "request", ",", "group_slug", ",", "member_form", "=", "form", ")" ]
add a member to the group .
train
false
5,062
def isstdin(): if (not _state): raise RuntimeError('no active input()') return _state.isstdin()
[ "def", "isstdin", "(", ")", ":", "if", "(", "not", "_state", ")", ":", "raise", "RuntimeError", "(", "'no active input()'", ")", "return", "_state", ".", "isstdin", "(", ")" ]
returns true if the last line was read from sys .
train
false
5,063
def files_contains(file1, file2, attributes=None): local_file = open(file1, 'U').readlines() history_data = open(file2, 'U').read() lines_diff = int(attributes.get('lines_diff', 0)) line_diff_count = 0 while local_file: contains = local_file.pop(0).rstrip('\n\r') if (contains not in history_data): line_diff_count += 1 if (line_diff_count > lines_diff): raise AssertionError(("Failed to find '%s' in history data. (lines_diff=%i):\n" % (contains, lines_diff)))
[ "def", "files_contains", "(", "file1", ",", "file2", ",", "attributes", "=", "None", ")", ":", "local_file", "=", "open", "(", "file1", ",", "'U'", ")", ".", "readlines", "(", ")", "history_data", "=", "open", "(", "file2", ",", "'U'", ")", ".", "read", "(", ")", "lines_diff", "=", "int", "(", "attributes", ".", "get", "(", "'lines_diff'", ",", "0", ")", ")", "line_diff_count", "=", "0", "while", "local_file", ":", "contains", "=", "local_file", ".", "pop", "(", "0", ")", ".", "rstrip", "(", "'\\n\\r'", ")", "if", "(", "contains", "not", "in", "history_data", ")", ":", "line_diff_count", "+=", "1", "if", "(", "line_diff_count", ">", "lines_diff", ")", ":", "raise", "AssertionError", "(", "(", "\"Failed to find '%s' in history data. (lines_diff=%i):\\n\"", "%", "(", "contains", ",", "lines_diff", ")", ")", ")" ]
check the contents of file2 for substrings found in file1 .
train
false
5,065
def new_post_mails(reply, users_and_watches): post_url = add_utm(reply.get_absolute_url(), 'kbforums-post') c = {'post': reply.content, 'post_html': reply.content_parsed, 'author': reply.creator, 'host': Site.objects.get_current().domain, 'thread': reply.thread.title, 'forum': reply.thread.document.title, 'post_url': post_url} return emails_with_users_and_watches(subject=_lazy(u'Re: {forum} - {thread}'), text_template='kbforums/email/new_post.ltxt', html_template='kbforums/email/new_post.html', context_vars=c, users_and_watches=users_and_watches)
[ "def", "new_post_mails", "(", "reply", ",", "users_and_watches", ")", ":", "post_url", "=", "add_utm", "(", "reply", ".", "get_absolute_url", "(", ")", ",", "'kbforums-post'", ")", "c", "=", "{", "'post'", ":", "reply", ".", "content", ",", "'post_html'", ":", "reply", ".", "content_parsed", ",", "'author'", ":", "reply", ".", "creator", ",", "'host'", ":", "Site", ".", "objects", ".", "get_current", "(", ")", ".", "domain", ",", "'thread'", ":", "reply", ".", "thread", ".", "title", ",", "'forum'", ":", "reply", ".", "thread", ".", "document", ".", "title", ",", "'post_url'", ":", "post_url", "}", "return", "emails_with_users_and_watches", "(", "subject", "=", "_lazy", "(", "u'Re: {forum} - {thread}'", ")", ",", "text_template", "=", "'kbforums/email/new_post.ltxt'", ",", "html_template", "=", "'kbforums/email/new_post.html'", ",", "context_vars", "=", "c", ",", "users_and_watches", "=", "users_and_watches", ")" ]
return an interable of emailmessages to send when a new post is created .
train
false
5,066
def cds(): rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) rebulk.regex('cd-?(?P<cd>\\d+)(?:-?of-?(?P<cd_count>\\d+))?', validator={'cd': (lambda match: (0 < match.value < 100)), 'cd_count': (lambda match: (0 < match.value < 100))}, formatter={'cd': int, 'cd_count': int}, children=True, private_parent=True, properties={'cd': [None], 'cd_count': [None]}) rebulk.regex('(?P<cd_count>\\d+)-?cds?', validator={'cd': (lambda match: (0 < match.value < 100)), 'cd_count': (lambda match: (0 < match.value < 100))}, formatter={'cd_count': int}, children=True, private_parent=True, properties={'cd': [None], 'cd_count': [None]}) return rebulk
[ "def", "cds", "(", ")", ":", "rebulk", "=", "Rebulk", "(", ")", ".", "regex_defaults", "(", "flags", "=", "re", ".", "IGNORECASE", ",", "abbreviations", "=", "[", "dash", "]", ")", "rebulk", ".", "regex", "(", "'cd-?(?P<cd>\\\\d+)(?:-?of-?(?P<cd_count>\\\\d+))?'", ",", "validator", "=", "{", "'cd'", ":", "(", "lambda", "match", ":", "(", "0", "<", "match", ".", "value", "<", "100", ")", ")", ",", "'cd_count'", ":", "(", "lambda", "match", ":", "(", "0", "<", "match", ".", "value", "<", "100", ")", ")", "}", ",", "formatter", "=", "{", "'cd'", ":", "int", ",", "'cd_count'", ":", "int", "}", ",", "children", "=", "True", ",", "private_parent", "=", "True", ",", "properties", "=", "{", "'cd'", ":", "[", "None", "]", ",", "'cd_count'", ":", "[", "None", "]", "}", ")", "rebulk", ".", "regex", "(", "'(?P<cd_count>\\\\d+)-?cds?'", ",", "validator", "=", "{", "'cd'", ":", "(", "lambda", "match", ":", "(", "0", "<", "match", ".", "value", "<", "100", ")", ")", ",", "'cd_count'", ":", "(", "lambda", "match", ":", "(", "0", "<", "match", ".", "value", "<", "100", ")", ")", "}", ",", "formatter", "=", "{", "'cd_count'", ":", "int", "}", ",", "children", "=", "True", ",", "private_parent", "=", "True", ",", "properties", "=", "{", "'cd'", ":", "[", "None", "]", ",", "'cd_count'", ":", "[", "None", "]", "}", ")", "return", "rebulk" ]
builder for rebulk object .
train
false
5,069
def _is_single_paragraph(node): if (len(node) == 0): return False elif (len(node) > 1): for subnode in node[1:]: if (not isinstance(subnode, nodes.system_message)): return False if isinstance(node[0], nodes.paragraph): return True return False
[ "def", "_is_single_paragraph", "(", "node", ")", ":", "if", "(", "len", "(", "node", ")", "==", "0", ")", ":", "return", "False", "elif", "(", "len", "(", "node", ")", ">", "1", ")", ":", "for", "subnode", "in", "node", "[", "1", ":", "]", ":", "if", "(", "not", "isinstance", "(", "subnode", ",", "nodes", ".", "system_message", ")", ")", ":", "return", "False", "if", "isinstance", "(", "node", "[", "0", "]", ",", "nodes", ".", "paragraph", ")", ":", "return", "True", "return", "False" ]
true if the node only contains one paragraph .
train
false
5,074
def dispatch_method(self, basename, arg, **options): method_name = ('%s_%s' % (basename, arg.__class__.__name__)) if hasattr(self, method_name): f = getattr(self, method_name) result = f(arg, **options) if (result is not None): return result raise NotImplementedError(("%s.%s can't handle: %r" % (self.__class__.__name__, basename, arg)))
[ "def", "dispatch_method", "(", "self", ",", "basename", ",", "arg", ",", "**", "options", ")", ":", "method_name", "=", "(", "'%s_%s'", "%", "(", "basename", ",", "arg", ".", "__class__", ".", "__name__", ")", ")", "if", "hasattr", "(", "self", ",", "method_name", ")", ":", "f", "=", "getattr", "(", "self", ",", "method_name", ")", "result", "=", "f", "(", "arg", ",", "**", "options", ")", "if", "(", "result", "is", "not", "None", ")", ":", "return", "result", "raise", "NotImplementedError", "(", "(", "\"%s.%s can't handle: %r\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "basename", ",", "arg", ")", ")", ")" ]
dispatch a method to the proper handlers .
train
false
5,075
def convert_sysv_runlevel(level): runlevel = str(level) if (runlevel == '0'): target = 'poweroff.target' elif (runlevel in ['1', 's', 'single']): target = 'rescue.target' elif (runlevel in ['2', '3', '4']): target = 'multi-user.target' elif (runlevel == '5'): target = 'graphical.target' elif (runlevel == '6'): target = 'reboot.target' else: raise ValueError(('unknown runlevel %s' % level)) return target
[ "def", "convert_sysv_runlevel", "(", "level", ")", ":", "runlevel", "=", "str", "(", "level", ")", "if", "(", "runlevel", "==", "'0'", ")", ":", "target", "=", "'poweroff.target'", "elif", "(", "runlevel", "in", "[", "'1'", ",", "'s'", ",", "'single'", "]", ")", ":", "target", "=", "'rescue.target'", "elif", "(", "runlevel", "in", "[", "'2'", ",", "'3'", ",", "'4'", "]", ")", ":", "target", "=", "'multi-user.target'", "elif", "(", "runlevel", "==", "'5'", ")", ":", "target", "=", "'graphical.target'", "elif", "(", "runlevel", "==", "'6'", ")", ":", "target", "=", "'reboot.target'", "else", ":", "raise", "ValueError", "(", "(", "'unknown runlevel %s'", "%", "level", ")", ")", "return", "target" ]
convert runlevel to systemd target .
train
false
5,076
def _path_to_string(path): return '.'.join(path)
[ "def", "_path_to_string", "(", "path", ")", ":", "return", "'.'", ".", "join", "(", "path", ")" ]
convert a list of path elements into a single path string .
train
false
5,077
def argrelmax(data, axis=0, order=1, mode='clip'): return argrelextrema(data, np.greater, axis, order, mode)
[ "def", "argrelmax", "(", "data", ",", "axis", "=", "0", ",", "order", "=", "1", ",", "mode", "=", "'clip'", ")", ":", "return", "argrelextrema", "(", "data", ",", "np", ".", "greater", ",", "axis", ",", "order", ",", "mode", ")" ]
calculate the relative maxima of data .
train
true
5,078
def min_weighted_vertex_cover(G, weight=None): cost = dict(G.nodes(data=weight, default=1)) for (u, v) in G.edges(): min_cost = min(cost[u], cost[v]) cost[u] -= min_cost cost[v] -= min_cost return {u for (u, c) in cost.items() if (c == 0)}
[ "def", "min_weighted_vertex_cover", "(", "G", ",", "weight", "=", "None", ")", ":", "cost", "=", "dict", "(", "G", ".", "nodes", "(", "data", "=", "weight", ",", "default", "=", "1", ")", ")", "for", "(", "u", ",", "v", ")", "in", "G", ".", "edges", "(", ")", ":", "min_cost", "=", "min", "(", "cost", "[", "u", "]", ",", "cost", "[", "v", "]", ")", "cost", "[", "u", "]", "-=", "min_cost", "cost", "[", "v", "]", "-=", "min_cost", "return", "{", "u", "for", "(", "u", ",", "c", ")", "in", "cost", ".", "items", "(", ")", "if", "(", "c", "==", "0", ")", "}" ]
returns an approximate minimum weighted vertex cover .
train
false
5,079
@must_have_permission('write') @must_not_be_registration @must_have_addon('forward', 'node') def forward_config_put(auth, node_addon, **kwargs): try: node_addon.url = request.json['url'] node_addon.label = request.json.get('label') except (KeyError, TypeError, ValueError): raise HTTPError(http.BAD_REQUEST) try: dirty_fields = node_addon.get_dirty_fields() node_addon.save() except ValidationValueError: raise HTTPError(http.BAD_REQUEST) if ('url' in dirty_fields): node_addon.owner.add_log(action='forward_url_changed', params=dict(node=node_addon.owner._id, project=node_addon.owner.parent_id, forward_url=node_addon.url), auth=auth, save=True) return {}
[ "@", "must_have_permission", "(", "'write'", ")", "@", "must_not_be_registration", "@", "must_have_addon", "(", "'forward'", ",", "'node'", ")", "def", "forward_config_put", "(", "auth", ",", "node_addon", ",", "**", "kwargs", ")", ":", "try", ":", "node_addon", ".", "url", "=", "request", ".", "json", "[", "'url'", "]", "node_addon", ".", "label", "=", "request", ".", "json", ".", "get", "(", "'label'", ")", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ")", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ")", "try", ":", "dirty_fields", "=", "node_addon", ".", "get_dirty_fields", "(", ")", "node_addon", ".", "save", "(", ")", "except", "ValidationValueError", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ")", "if", "(", "'url'", "in", "dirty_fields", ")", ":", "node_addon", ".", "owner", ".", "add_log", "(", "action", "=", "'forward_url_changed'", ",", "params", "=", "dict", "(", "node", "=", "node_addon", ".", "owner", ".", "_id", ",", "project", "=", "node_addon", ".", "owner", ".", "parent_id", ",", "forward_url", "=", "node_addon", ".", "url", ")", ",", "auth", "=", "auth", ",", "save", "=", "True", ")", "return", "{", "}" ]
set configuration for forward node settings .
train
false
5,080
def stem_text(text): text = utils.to_unicode(text) p = PorterStemmer() return ' '.join((p.stem(word) for word in text.split()))
[ "def", "stem_text", "(", "text", ")", ":", "text", "=", "utils", ".", "to_unicode", "(", "text", ")", "p", "=", "PorterStemmer", "(", ")", "return", "' '", ".", "join", "(", "(", "p", ".", "stem", "(", "word", ")", "for", "word", "in", "text", ".", "split", "(", ")", ")", ")" ]
return lowercase and stemmed version of string text .
train
false
5,082
def profile_func(filename=None): def proffunc(fun): def profiled_func(*args, **kwargs): logging.info('Profiling function {0}'.format(fun.__name__)) try: profiler = cProfile.Profile() retval = profiler.runcall(fun, *args, **kwargs) profiler.dump_stats((filename or '{0}_func.profile'.format(fun.__name__))) except IOError: logging.exception('Could not open profile file {0}'.format(filename)) return retval return profiled_func return proffunc
[ "def", "profile_func", "(", "filename", "=", "None", ")", ":", "def", "proffunc", "(", "fun", ")", ":", "def", "profiled_func", "(", "*", "args", ",", "**", "kwargs", ")", ":", "logging", ".", "info", "(", "'Profiling function {0}'", ".", "format", "(", "fun", ".", "__name__", ")", ")", "try", ":", "profiler", "=", "cProfile", ".", "Profile", "(", ")", "retval", "=", "profiler", ".", "runcall", "(", "fun", ",", "*", "args", ",", "**", "kwargs", ")", "profiler", ".", "dump_stats", "(", "(", "filename", "or", "'{0}_func.profile'", ".", "format", "(", "fun", ".", "__name__", ")", ")", ")", "except", "IOError", ":", "logging", ".", "exception", "(", "'Could not open profile file {0}'", ".", "format", "(", "filename", ")", ")", "return", "retval", "return", "profiled_func", "return", "proffunc" ]
decorator for adding profiling to a nested function in salt .
train
true
5,087
def getTranslateTetragrid(elementNode, prefix): translation = getCumulativeVector3Remove(Vector3(), elementNode, prefix) if translation.getIsDefault(): return None return getTranslateTetragridByTranslation(translation)
[ "def", "getTranslateTetragrid", "(", "elementNode", ",", "prefix", ")", ":", "translation", "=", "getCumulativeVector3Remove", "(", "Vector3", "(", ")", ",", "elementNode", ",", "prefix", ")", "if", "translation", ".", "getIsDefault", "(", ")", ":", "return", "None", "return", "getTranslateTetragridByTranslation", "(", "translation", ")" ]
get translate matrix and delete the translate attributes .
train
false
5,088
def unsafe_inline_enabled(response): non_report_only_policies = retrieve_csp_policies(response) report_only_policies = retrieve_csp_policies(response, True) policies_all = merge_policies_dict(non_report_only_policies, report_only_policies) if (len(policies_all) > 0): for directive_name in policies_all: if ((directive_name.lower() != CSP_DIRECTIVE_SCRIPT) and (directive_name.lower() != CSP_DIRECTIVE_STYLE)): continue for directive_value in policies_all[directive_name]: if (directive_value.strip().lower() == CSP_DIRECTIVE_VALUE_UNSAFE_INLINE): return True return False
[ "def", "unsafe_inline_enabled", "(", "response", ")", ":", "non_report_only_policies", "=", "retrieve_csp_policies", "(", "response", ")", "report_only_policies", "=", "retrieve_csp_policies", "(", "response", ",", "True", ")", "policies_all", "=", "merge_policies_dict", "(", "non_report_only_policies", ",", "report_only_policies", ")", "if", "(", "len", "(", "policies_all", ")", ">", "0", ")", ":", "for", "directive_name", "in", "policies_all", ":", "if", "(", "(", "directive_name", ".", "lower", "(", ")", "!=", "CSP_DIRECTIVE_SCRIPT", ")", "and", "(", "directive_name", ".", "lower", "(", ")", "!=", "CSP_DIRECTIVE_STYLE", ")", ")", ":", "continue", "for", "directive_value", "in", "policies_all", "[", "directive_name", "]", ":", "if", "(", "directive_value", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "CSP_DIRECTIVE_VALUE_UNSAFE_INLINE", ")", ":", "return", "True", "return", "False" ]
method to detect if csp policies are specified for script/style .
train
false
5,089
def norm_dlldy(y): return (- y)
[ "def", "norm_dlldy", "(", "y", ")", ":", "return", "(", "-", "y", ")" ]
derivative of log pdf of standard normal with respect to y .
train
false
5,090
def isfloat(x): try: a = float(x) except ValueError: return False else: return True
[ "def", "isfloat", "(", "x", ")", ":", "try", ":", "a", "=", "float", "(", "x", ")", "except", "ValueError", ":", "return", "False", "else", ":", "return", "True" ]
check if argument is float .
train
false
5,092
@transaction.atomic def mass_get_or_create(model_class, base_queryset, id_field, default_dict, global_defaults): current_instances = list(base_queryset) current_ids = set([unicode(getattr(c, id_field)) for c in current_instances]) given_ids = map(unicode, default_dict.keys()) new_ids = [g for g in given_ids if (g not in current_ids)] prepared_models = [] for new_id in new_ids: defaults = default_dict[new_id] defaults[id_field] = new_id defaults.update(global_defaults) model_instance = model_class(**defaults) prepared_models.append(model_instance) if hasattr(model_class.objects, 'bulk_create'): model_class.objects.bulk_create(prepared_models) else: [m.save() for m in prepared_models] inserted_model_instances = prepared_models return (current_instances, inserted_model_instances)
[ "@", "transaction", ".", "atomic", "def", "mass_get_or_create", "(", "model_class", ",", "base_queryset", ",", "id_field", ",", "default_dict", ",", "global_defaults", ")", ":", "current_instances", "=", "list", "(", "base_queryset", ")", "current_ids", "=", "set", "(", "[", "unicode", "(", "getattr", "(", "c", ",", "id_field", ")", ")", "for", "c", "in", "current_instances", "]", ")", "given_ids", "=", "map", "(", "unicode", ",", "default_dict", ".", "keys", "(", ")", ")", "new_ids", "=", "[", "g", "for", "g", "in", "given_ids", "if", "(", "g", "not", "in", "current_ids", ")", "]", "prepared_models", "=", "[", "]", "for", "new_id", "in", "new_ids", ":", "defaults", "=", "default_dict", "[", "new_id", "]", "defaults", "[", "id_field", "]", "=", "new_id", "defaults", ".", "update", "(", "global_defaults", ")", "model_instance", "=", "model_class", "(", "**", "defaults", ")", "prepared_models", ".", "append", "(", "model_instance", ")", "if", "hasattr", "(", "model_class", ".", "objects", ",", "'bulk_create'", ")", ":", "model_class", ".", "objects", ".", "bulk_create", "(", "prepared_models", ")", "else", ":", "[", "m", ".", "save", "(", ")", "for", "m", "in", "prepared_models", "]", "inserted_model_instances", "=", "prepared_models", "return", "(", "current_instances", ",", "inserted_model_instances", ")" ]
updates the data by inserting all not found records doesnt delete records if not in the new data example usage .
train
false
5,093
def decode_utf8(string): if isinstance(string, str): for encoding in (('utf-8',), ('windows-1252',), ('utf-8', 'ignore')): try: return string.decode(*encoding) except: pass return string return unicode(string)
[ "def", "decode_utf8", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "str", ")", ":", "for", "encoding", "in", "(", "(", "'utf-8'", ",", ")", ",", "(", "'windows-1252'", ",", ")", ",", "(", "'utf-8'", ",", "'ignore'", ")", ")", ":", "try", ":", "return", "string", ".", "decode", "(", "*", "encoding", ")", "except", ":", "pass", "return", "string", "return", "unicode", "(", "string", ")" ]
returns the given string as a unicode string .
train
false
5,097
def add_callers(target, source): new_callers = {} for (func, caller) in target.iteritems(): new_callers[func] = caller for (func, caller) in source.iteritems(): if (func in new_callers): new_callers[func] = tuple([(i[0] + i[1]) for i in zip(caller, new_callers[func])]) else: new_callers[func] = caller return new_callers
[ "def", "add_callers", "(", "target", ",", "source", ")", ":", "new_callers", "=", "{", "}", "for", "(", "func", ",", "caller", ")", "in", "target", ".", "iteritems", "(", ")", ":", "new_callers", "[", "func", "]", "=", "caller", "for", "(", "func", ",", "caller", ")", "in", "source", ".", "iteritems", "(", ")", ":", "if", "(", "func", "in", "new_callers", ")", ":", "new_callers", "[", "func", "]", "=", "tuple", "(", "[", "(", "i", "[", "0", "]", "+", "i", "[", "1", "]", ")", "for", "i", "in", "zip", "(", "caller", ",", "new_callers", "[", "func", "]", ")", "]", ")", "else", ":", "new_callers", "[", "func", "]", "=", "caller", "return", "new_callers" ]
combine two caller lists in a single list .
train
false
5,098
def _strip_cache_events(data, opts): event_data = copy.deepcopy(data) strip_fields = opts.get('cache_event_strip_fields', []) for field in strip_fields: if (field in event_data): del event_data[field] return event_data
[ "def", "_strip_cache_events", "(", "data", ",", "opts", ")", ":", "event_data", "=", "copy", ".", "deepcopy", "(", "data", ")", "strip_fields", "=", "opts", ".", "get", "(", "'cache_event_strip_fields'", ",", "[", "]", ")", "for", "field", "in", "strip_fields", ":", "if", "(", "field", "in", "event_data", ")", ":", "del", "event_data", "[", "field", "]", "return", "event_data" ]
strip out user-configured sensitive event data .
train
true
5,099
def _compute_node_to_inventory_dict(compute_node): result = {} if (compute_node.vcpus > 0): result[VCPU] = {'total': compute_node.vcpus, 'reserved': 0, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, 'allocation_ratio': compute_node.cpu_allocation_ratio} if (compute_node.memory_mb > 0): result[MEMORY_MB] = {'total': compute_node.memory_mb, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio} if (compute_node.local_gb > 0): result[DISK_GB] = {'total': compute_node.local_gb, 'reserved': (CONF.reserved_host_disk_mb * 1024), 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio} return result
[ "def", "_compute_node_to_inventory_dict", "(", "compute_node", ")", ":", "result", "=", "{", "}", "if", "(", "compute_node", ".", "vcpus", ">", "0", ")", ":", "result", "[", "VCPU", "]", "=", "{", "'total'", ":", "compute_node", ".", "vcpus", ",", "'reserved'", ":", "0", ",", "'min_unit'", ":", "1", ",", "'max_unit'", ":", "compute_node", ".", "vcpus", ",", "'step_size'", ":", "1", ",", "'allocation_ratio'", ":", "compute_node", ".", "cpu_allocation_ratio", "}", "if", "(", "compute_node", ".", "memory_mb", ">", "0", ")", ":", "result", "[", "MEMORY_MB", "]", "=", "{", "'total'", ":", "compute_node", ".", "memory_mb", ",", "'reserved'", ":", "CONF", ".", "reserved_host_memory_mb", ",", "'min_unit'", ":", "1", ",", "'max_unit'", ":", "compute_node", ".", "memory_mb", ",", "'step_size'", ":", "1", ",", "'allocation_ratio'", ":", "compute_node", ".", "ram_allocation_ratio", "}", "if", "(", "compute_node", ".", "local_gb", ">", "0", ")", ":", "result", "[", "DISK_GB", "]", "=", "{", "'total'", ":", "compute_node", ".", "local_gb", ",", "'reserved'", ":", "(", "CONF", ".", "reserved_host_disk_mb", "*", "1024", ")", ",", "'min_unit'", ":", "1", ",", "'max_unit'", ":", "compute_node", ".", "local_gb", ",", "'step_size'", ":", "1", ",", "'allocation_ratio'", ":", "compute_node", ".", "disk_allocation_ratio", "}", "return", "result" ]
given a supplied objects .
train
false
5,100
def docs_up_to_date(path): if (hasattr(sys, 'frozen') or (not is_git_repo())): return True html_path = os.path.join(qutebrowser.basedir, 'html', 'doc', path) filename = os.path.splitext(path)[0] asciidoc_path = os.path.join(qutebrowser.basedir, os.path.pardir, 'doc', 'help', (filename + '.asciidoc')) try: html_time = os.path.getmtime(html_path) asciidoc_time = os.path.getmtime(asciidoc_path) except FileNotFoundError: return True return (asciidoc_time <= html_time)
[ "def", "docs_up_to_date", "(", "path", ")", ":", "if", "(", "hasattr", "(", "sys", ",", "'frozen'", ")", "or", "(", "not", "is_git_repo", "(", ")", ")", ")", ":", "return", "True", "html_path", "=", "os", ".", "path", ".", "join", "(", "qutebrowser", ".", "basedir", ",", "'html'", ",", "'doc'", ",", "path", ")", "filename", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "0", "]", "asciidoc_path", "=", "os", ".", "path", ".", "join", "(", "qutebrowser", ".", "basedir", ",", "os", ".", "path", ".", "pardir", ",", "'doc'", ",", "'help'", ",", "(", "filename", "+", "'.asciidoc'", ")", ")", "try", ":", "html_time", "=", "os", ".", "path", ".", "getmtime", "(", "html_path", ")", "asciidoc_time", "=", "os", ".", "path", ".", "getmtime", "(", "asciidoc_path", ")", "except", "FileNotFoundError", ":", "return", "True", "return", "(", "asciidoc_time", "<=", "html_time", ")" ]
check if the generated html documentation is up to date .
train
false
5,101
def __determine_list_kwargs(options): list_kwargs = __determine_delete_kwargs(options) list_kwargs.update(__kwargs_option_to_dict(options.list_kwargs)) return list_kwargs
[ "def", "__determine_list_kwargs", "(", "options", ")", ":", "list_kwargs", "=", "__determine_delete_kwargs", "(", "options", ")", "list_kwargs", ".", "update", "(", "__kwargs_option_to_dict", "(", "options", ".", "list_kwargs", ")", ")", "return", "list_kwargs" ]
determine the standard keyword arguments to pass to list() method .
train
false
5,103
def _kb_readout(request, readout_slug, readouts, locale=None, mode=None, product=None): if (readout_slug not in readouts): raise Http404 return readouts[readout_slug](request, locale=locale, mode=mode, product=product)
[ "def", "_kb_readout", "(", "request", ",", "readout_slug", ",", "readouts", ",", "locale", "=", "None", ",", "mode", "=", "None", ",", "product", "=", "None", ")", ":", "if", "(", "readout_slug", "not", "in", "readouts", ")", ":", "raise", "Http404", "return", "readouts", "[", "readout_slug", "]", "(", "request", ",", "locale", "=", "locale", ",", "mode", "=", "mode", ",", "product", "=", "product", ")" ]
instantiate and return the readout with the given slug .
train
false
5,104
def MergeGlobalXcodeSettingsToSpec(global_dict, spec): global_xcode_settings = global_dict.get('xcode_settings', {}) for config in spec['configurations'].values(): if ('xcode_settings' in config): new_settings = global_xcode_settings.copy() new_settings.update(config['xcode_settings']) config['xcode_settings'] = new_settings
[ "def", "MergeGlobalXcodeSettingsToSpec", "(", "global_dict", ",", "spec", ")", ":", "global_xcode_settings", "=", "global_dict", ".", "get", "(", "'xcode_settings'", ",", "{", "}", ")", "for", "config", "in", "spec", "[", "'configurations'", "]", ".", "values", "(", ")", ":", "if", "(", "'xcode_settings'", "in", "config", ")", ":", "new_settings", "=", "global_xcode_settings", ".", "copy", "(", ")", "new_settings", ".", "update", "(", "config", "[", "'xcode_settings'", "]", ")", "config", "[", "'xcode_settings'", "]", "=", "new_settings" ]
merges the global xcode_settings dictionary into each configuration of the target represented by spec .
train
false
5,105
def serialize_field(value): if isinstance(value, basestring): return value return json.dumps(value, cls=EdxJSONEncoder)
[ "def", "serialize_field", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "return", "value", "return", "json", ".", "dumps", "(", "value", ",", "cls", "=", "EdxJSONEncoder", ")" ]
return a string version of the value .
train
false
5,106
def _rec_eval_in(g, a, v, i, j, K): if (i == j): return dmp_eval(g, a, v, K) (v, i) = ((v - 1), (i + 1)) return dmp_strip([_rec_eval_in(c, a, v, i, j, K) for c in g], v)
[ "def", "_rec_eval_in", "(", "g", ",", "a", ",", "v", ",", "i", ",", "j", ",", "K", ")", ":", "if", "(", "i", "==", "j", ")", ":", "return", "dmp_eval", "(", "g", ",", "a", ",", "v", ",", "K", ")", "(", "v", ",", "i", ")", "=", "(", "(", "v", "-", "1", ")", ",", "(", "i", "+", "1", ")", ")", "return", "dmp_strip", "(", "[", "_rec_eval_in", "(", "c", ",", "a", ",", "v", ",", "i", ",", "j", ",", "K", ")", "for", "c", "in", "g", "]", ",", "v", ")" ]
recursive helper for :func:dmp_eval_in .
train
false
5,107
def getSharedFace(firstEdge, faces, secondEdge): for firstEdgeFaceIndex in firstEdge.faceIndexes: for secondEdgeFaceIndex in secondEdge.faceIndexes: if (firstEdgeFaceIndex == secondEdgeFaceIndex): return faces[firstEdgeFaceIndex] return None
[ "def", "getSharedFace", "(", "firstEdge", ",", "faces", ",", "secondEdge", ")", ":", "for", "firstEdgeFaceIndex", "in", "firstEdge", ".", "faceIndexes", ":", "for", "secondEdgeFaceIndex", "in", "secondEdge", ".", "faceIndexes", ":", "if", "(", "firstEdgeFaceIndex", "==", "secondEdgeFaceIndex", ")", ":", "return", "faces", "[", "firstEdgeFaceIndex", "]", "return", "None" ]
get the face which is shared by two edges .
train
false
5,108
def deflate_long(n, add_sign_padding=True): s = bytes() n = long(n) while ((n != 0) and (n != (-1))): s = (struct.pack('>I', (n & xffffffff)) + s) n >>= 32 for i in enumerate(s): if ((n == 0) and (i[1] != deflate_zero)): break if ((n == (-1)) and (i[1] != deflate_ff)): break else: i = (0,) if (n == 0): s = zero_byte else: s = max_byte s = s[i[0]:] if add_sign_padding: if ((n == 0) and (byte_ord(s[0]) >= 128)): s = (zero_byte + s) if ((n == (-1)) and (byte_ord(s[0]) < 128)): s = (max_byte + s) return s
[ "def", "deflate_long", "(", "n", ",", "add_sign_padding", "=", "True", ")", ":", "s", "=", "bytes", "(", ")", "n", "=", "long", "(", "n", ")", "while", "(", "(", "n", "!=", "0", ")", "and", "(", "n", "!=", "(", "-", "1", ")", ")", ")", ":", "s", "=", "(", "struct", ".", "pack", "(", "'>I'", ",", "(", "n", "&", "xffffffff", ")", ")", "+", "s", ")", "n", ">>=", "32", "for", "i", "in", "enumerate", "(", "s", ")", ":", "if", "(", "(", "n", "==", "0", ")", "and", "(", "i", "[", "1", "]", "!=", "deflate_zero", ")", ")", ":", "break", "if", "(", "(", "n", "==", "(", "-", "1", ")", ")", "and", "(", "i", "[", "1", "]", "!=", "deflate_ff", ")", ")", ":", "break", "else", ":", "i", "=", "(", "0", ",", ")", "if", "(", "n", "==", "0", ")", ":", "s", "=", "zero_byte", "else", ":", "s", "=", "max_byte", "s", "=", "s", "[", "i", "[", "0", "]", ":", "]", "if", "add_sign_padding", ":", "if", "(", "(", "n", "==", "0", ")", "and", "(", "byte_ord", "(", "s", "[", "0", "]", ")", ">=", "128", ")", ")", ":", "s", "=", "(", "zero_byte", "+", "s", ")", "if", "(", "(", "n", "==", "(", "-", "1", ")", ")", "and", "(", "byte_ord", "(", "s", "[", "0", "]", ")", "<", "128", ")", ")", ":", "s", "=", "(", "max_byte", "+", "s", ")", "return", "s" ]
turns a long-int into a normalized byte string .
train
true
5,109
def promoted_param(registry, xml_parent, data): pdef = base_param(registry, xml_parent, data, False, 'hudson.plugins.promoted__builds.parameters.PromotedBuildParameterDefinition') try: XML.SubElement(pdef, 'projectName').text = data['project-name'] except KeyError: raise MissingAttributeError('project-name') XML.SubElement(pdef, 'promotionProcessName').text = data.get('promotion-name', None)
[ "def", "promoted_param", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "pdef", "=", "base_param", "(", "registry", ",", "xml_parent", ",", "data", ",", "False", ",", "'hudson.plugins.promoted__builds.parameters.PromotedBuildParameterDefinition'", ")", "try", ":", "XML", ".", "SubElement", "(", "pdef", ",", "'projectName'", ")", ".", "text", "=", "data", "[", "'project-name'", "]", "except", "KeyError", ":", "raise", "MissingAttributeError", "(", "'project-name'", ")", "XML", ".", "SubElement", "(", "pdef", ",", "'promotionProcessName'", ")", ".", "text", "=", "data", ".", "get", "(", "'promotion-name'", ",", "None", ")" ]
yaml: promoted build a promoted build parameter .
train
false
5,110
def snapshot_in_progress(client, repository=None, snapshot=None): allsnaps = get_snapshot_data(client, repository=repository) inprogress = [snap['snapshot'] for snap in allsnaps if (('state' in snap.keys()) and (snap['state'] == 'IN_PROGRESS'))] if snapshot: return (snapshot if (snapshot in inprogress) else False) elif (len(inprogress) == 0): return False elif (len(inprogress) == 1): return inprogress[0] else: raise CuratorException('More than 1 snapshot in progress: {0}'.format(inprogress))
[ "def", "snapshot_in_progress", "(", "client", ",", "repository", "=", "None", ",", "snapshot", "=", "None", ")", ":", "allsnaps", "=", "get_snapshot_data", "(", "client", ",", "repository", "=", "repository", ")", "inprogress", "=", "[", "snap", "[", "'snapshot'", "]", "for", "snap", "in", "allsnaps", "if", "(", "(", "'state'", "in", "snap", ".", "keys", "(", ")", ")", "and", "(", "snap", "[", "'state'", "]", "==", "'IN_PROGRESS'", ")", ")", "]", "if", "snapshot", ":", "return", "(", "snapshot", "if", "(", "snapshot", "in", "inprogress", ")", "else", "False", ")", "elif", "(", "len", "(", "inprogress", ")", "==", "0", ")", ":", "return", "False", "elif", "(", "len", "(", "inprogress", ")", "==", "1", ")", ":", "return", "inprogress", "[", "0", "]", "else", ":", "raise", "CuratorException", "(", "'More than 1 snapshot in progress: {0}'", ".", "format", "(", "inprogress", ")", ")" ]
determine whether the provided snapshot in repository is in_progress .
train
false
5,111
def discovery_agent(rpyc_port): data = struct.pack('<H', rpyc_port) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('', UDP_DISCOVERY_PORT)) log('discovery_agent: started') while True: (query, addr) = s.recvfrom(MAX_DGRAM_SIZE) if (query == QUERY_MAGIC): log('discovery_agent: now answering', addr) s.sendto(data, addr)
[ "def", "discovery_agent", "(", "rpyc_port", ")", ":", "data", "=", "struct", ".", "pack", "(", "'<H'", ",", "rpyc_port", ")", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "s", ".", "bind", "(", "(", "''", ",", "UDP_DISCOVERY_PORT", ")", ")", "log", "(", "'discovery_agent: started'", ")", "while", "True", ":", "(", "query", ",", "addr", ")", "=", "s", ".", "recvfrom", "(", "MAX_DGRAM_SIZE", ")", "if", "(", "query", "==", "QUERY_MAGIC", ")", ":", "log", "(", "'discovery_agent: now answering'", ",", "addr", ")", "s", ".", "sendto", "(", "data", ",", "addr", ")" ]
answers broadcasted queries with the port of the rpyc server on this machine .
train
false
5,113
def consistencygroup_destroy(context, consistencygroup_id): return IMPL.consistencygroup_destroy(context, consistencygroup_id)
[ "def", "consistencygroup_destroy", "(", "context", ",", "consistencygroup_id", ")", ":", "return", "IMPL", ".", "consistencygroup_destroy", "(", "context", ",", "consistencygroup_id", ")" ]
destroy the consistencygroup or raise if it does not exist .
train
false
5,115
def obedient_process(*args, **kwargs): stopped = [] def handler(sig, frame): send('SIGINT') stopped.append(1) signal.signal(signal.SIGINT, handler) send('STARTED') while (not stopped): signal.pause() send('STOPPED')
[ "def", "obedient_process", "(", "*", "args", ",", "**", "kwargs", ")", ":", "stopped", "=", "[", "]", "def", "handler", "(", "sig", ",", "frame", ")", ":", "send", "(", "'SIGINT'", ")", "stopped", ".", "append", "(", "1", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "handler", ")", "send", "(", "'STARTED'", ")", "while", "(", "not", "stopped", ")", ":", "signal", ".", "pause", "(", ")", "send", "(", "'STOPPED'", ")" ]
waits for sigint and exits normally .
train
false
5,116
def startswith_(a, fragment, msg=None): assert a.startswith(fragment), (msg or ('%r does not start with %r' % (a, fragment)))
[ "def", "startswith_", "(", "a", ",", "fragment", ",", "msg", "=", "None", ")", ":", "assert", "a", ".", "startswith", "(", "fragment", ")", ",", "(", "msg", "or", "(", "'%r does not start with %r'", "%", "(", "a", ",", "fragment", ")", ")", ")" ]
assert a .
train
false