id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
2,021
def holdAcknowledge(): a = TpPd(pd=3) b = MessageType(mesType=25) packet = (a / b) return packet
[ "def", "holdAcknowledge", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "3", ")", "b", "=", "MessageType", "(", "mesType", "=", "25", ")", "packet", "=", "(", "a", "/", "b", ")", "return", "packet" ]
hold acknowledge section 9 .
train
true
2,022
def get_project_hierarchy(context, project_id, subtree_as_ids=False, parents_as_ids=False, is_admin_project=False): keystone = _keystone_client(context) generic_project = GenericProjectInfo(project_id, keystone.version) if (keystone.version == 'v3'): project = keystone.projects.get(project_id, subtree_as_ids=subtree_as_ids, parents_as_ids=parents_as_ids) generic_project.parent_id = None if (project.parent_id != project.domain_id): generic_project.parent_id = project.parent_id generic_project.subtree = (project.subtree if subtree_as_ids else None) generic_project.parents = None if parents_as_ids: generic_project.parents = _filter_domain_id_from_parents(project.domain_id, project.parents) generic_project.is_admin_project = is_admin_project return generic_project
[ "def", "get_project_hierarchy", "(", "context", ",", "project_id", ",", "subtree_as_ids", "=", "False", ",", "parents_as_ids", "=", "False", ",", "is_admin_project", "=", "False", ")", ":", "keystone", "=", "_keystone_client", "(", "context", ")", "generic_project", "=", "GenericProjectInfo", "(", "project_id", ",", "keystone", ".", "version", ")", "if", "(", "keystone", ".", "version", "==", "'v3'", ")", ":", "project", "=", "keystone", ".", "projects", ".", "get", "(", "project_id", ",", "subtree_as_ids", "=", "subtree_as_ids", ",", "parents_as_ids", "=", "parents_as_ids", ")", "generic_project", ".", "parent_id", "=", "None", "if", "(", "project", ".", "parent_id", "!=", "project", ".", "domain_id", ")", ":", "generic_project", ".", "parent_id", "=", "project", ".", "parent_id", "generic_project", ".", "subtree", "=", "(", "project", ".", "subtree", "if", "subtree_as_ids", "else", "None", ")", "generic_project", ".", "parents", "=", "None", "if", "parents_as_ids", ":", "generic_project", ".", "parents", "=", "_filter_domain_id_from_parents", "(", "project", ".", "domain_id", ",", "project", ".", "parents", ")", "generic_project", ".", "is_admin_project", "=", "is_admin_project", "return", "generic_project" ]
a helper method to get the project hierarchy .
train
false
2,023
def egquery(**keywds): cgi = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/egquery.fcgi' variables = {} variables.update(keywds) return _open(cgi, variables)
[ "def", "egquery", "(", "**", "keywds", ")", ":", "cgi", "=", "'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/egquery.fcgi'", "variables", "=", "{", "}", "variables", ".", "update", "(", "keywds", ")", "return", "_open", "(", "cgi", ",", "variables", ")" ]
egquery provides entrez database counts for a global search .
train
false
2,024
def rank_est(A, atol=1e-13, rtol=0): A = np.atleast_2d(A) s = svd(A, compute_uv=False) tol = max(atol, (rtol * s[0])) rank = int((s >= tol).sum()) return rank
[ "def", "rank_est", "(", "A", ",", "atol", "=", "1e-13", ",", "rtol", "=", "0", ")", ":", "A", "=", "np", ".", "atleast_2d", "(", "A", ")", "s", "=", "svd", "(", "A", ",", "compute_uv", "=", "False", ")", "tol", "=", "max", "(", "atol", ",", "(", "rtol", "*", "s", "[", "0", "]", ")", ")", "rank", "=", "int", "(", "(", "s", ">=", "tol", ")", ".", "sum", "(", ")", ")", "return", "rank" ]
estimate the rank of a matrix .
train
true
2,025
def waic(trace, model=None, n_eff=False, pointwise=False): model = modelcontext(model) log_py = log_post_trace(trace, model) lppd_i = logsumexp(log_py, axis=0, b=(1.0 / log_py.shape[0])) vars_lpd = np.var(log_py, axis=0) if np.any((vars_lpd > 0.4)): warnings.warn('For one or more samples the posterior variance of the\n log predictive densities exceeds 0.4. This could be indication of\n WAIC starting to fail see http://arxiv.org/abs/1507.04544 for details\n ') waic_i = ((-2) * (lppd_i - vars_lpd)) waic_se = np.sqrt((len(waic_i) * np.var(waic_i))) waic = np.sum(waic_i) p_waic = np.sum(vars_lpd) if n_eff: return (waic, waic_se, p_waic) elif pointwise: return (waic, waic_se, waic_i, p_waic) else: return (waic, waic_se)
[ "def", "waic", "(", "trace", ",", "model", "=", "None", ",", "n_eff", "=", "False", ",", "pointwise", "=", "False", ")", ":", "model", "=", "modelcontext", "(", "model", ")", "log_py", "=", "log_post_trace", "(", "trace", ",", "model", ")", "lppd_i", "=", "logsumexp", "(", "log_py", ",", "axis", "=", "0", ",", "b", "=", "(", "1.0", "/", "log_py", ".", "shape", "[", "0", "]", ")", ")", "vars_lpd", "=", "np", ".", "var", "(", "log_py", ",", "axis", "=", "0", ")", "if", "np", ".", "any", "(", "(", "vars_lpd", ">", "0.4", ")", ")", ":", "warnings", ".", "warn", "(", "'For one or more samples the posterior variance of the\\n log predictive densities exceeds 0.4. This could be indication of\\n WAIC starting to fail see http://arxiv.org/abs/1507.04544 for details\\n '", ")", "waic_i", "=", "(", "(", "-", "2", ")", "*", "(", "lppd_i", "-", "vars_lpd", ")", ")", "waic_se", "=", "np", ".", "sqrt", "(", "(", "len", "(", "waic_i", ")", "*", "np", ".", "var", "(", "waic_i", ")", ")", ")", "waic", "=", "np", ".", "sum", "(", "waic_i", ")", "p_waic", "=", "np", ".", "sum", "(", "vars_lpd", ")", "if", "n_eff", ":", "return", "(", "waic", ",", "waic_se", ",", "p_waic", ")", "elif", "pointwise", ":", "return", "(", "waic", ",", "waic_se", ",", "waic_i", ",", "p_waic", ")", "else", ":", "return", "(", "waic", ",", "waic_se", ")" ]
calculate the widely available information criterion .
train
false
2,026
def get_array_section_has_problem(course_id): course = modulestore().get_course(course_id, depth=4) b_section_has_problem = ([False] * len(course.get_children())) i = 0 for section in course.get_children(): for subsection in section.get_children(): for unit in subsection.get_children(): for child in unit.get_children(): if (child.location.category == 'problem'): b_section_has_problem[i] = True break if b_section_has_problem[i]: break if b_section_has_problem[i]: break i += 1 return b_section_has_problem
[ "def", "get_array_section_has_problem", "(", "course_id", ")", ":", "course", "=", "modulestore", "(", ")", ".", "get_course", "(", "course_id", ",", "depth", "=", "4", ")", "b_section_has_problem", "=", "(", "[", "False", "]", "*", "len", "(", "course", ".", "get_children", "(", ")", ")", ")", "i", "=", "0", "for", "section", "in", "course", ".", "get_children", "(", ")", ":", "for", "subsection", "in", "section", ".", "get_children", "(", ")", ":", "for", "unit", "in", "subsection", ".", "get_children", "(", ")", ":", "for", "child", "in", "unit", ".", "get_children", "(", ")", ":", "if", "(", "child", ".", "location", ".", "category", "==", "'problem'", ")", ":", "b_section_has_problem", "[", "i", "]", "=", "True", "break", "if", "b_section_has_problem", "[", "i", "]", ":", "break", "if", "b_section_has_problem", "[", "i", "]", ":", "break", "i", "+=", "1", "return", "b_section_has_problem" ]
returns an array of true/false whether each section has problems .
train
false
2,027
def OpenDocumentChart(): doc = OpenDocument('application/vnd.oasis.opendocument.chart') doc.chart = Chart() doc.body.addElement(doc.chart) return doc
[ "def", "OpenDocumentChart", "(", ")", ":", "doc", "=", "OpenDocument", "(", "'application/vnd.oasis.opendocument.chart'", ")", "doc", ".", "chart", "=", "Chart", "(", ")", "doc", ".", "body", ".", "addElement", "(", "doc", ".", "chart", ")", "return", "doc" ]
creates a chart document .
train
false
2,028
def get_themes(): themes = {} builtins = pkg_resources.get_entry_map(dist=u'mkdocs', group=u'mkdocs.themes') for theme in pkg_resources.iter_entry_points(group=u'mkdocs.themes'): if ((theme.name in builtins) and (theme.dist.key != u'mkdocs')): raise exceptions.ConfigurationError(u'The theme {0} is a builtin theme but {1} provides a theme with the same name'.format(theme.name, theme.dist.key)) elif (theme.name in themes): multiple_packages = [themes[theme.name].dist.key, theme.dist.key] log.warning(u"The theme %s is provided by the Python packages '%s'. The one in %s will be used.", theme.name, u','.join(multiple_packages), theme.dist.key) themes[theme.name] = theme return themes
[ "def", "get_themes", "(", ")", ":", "themes", "=", "{", "}", "builtins", "=", "pkg_resources", ".", "get_entry_map", "(", "dist", "=", "u'mkdocs'", ",", "group", "=", "u'mkdocs.themes'", ")", "for", "theme", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "u'mkdocs.themes'", ")", ":", "if", "(", "(", "theme", ".", "name", "in", "builtins", ")", "and", "(", "theme", ".", "dist", ".", "key", "!=", "u'mkdocs'", ")", ")", ":", "raise", "exceptions", ".", "ConfigurationError", "(", "u'The theme {0} is a builtin theme but {1} provides a theme with the same name'", ".", "format", "(", "theme", ".", "name", ",", "theme", ".", "dist", ".", "key", ")", ")", "elif", "(", "theme", ".", "name", "in", "themes", ")", ":", "multiple_packages", "=", "[", "themes", "[", "theme", ".", "name", "]", ".", "dist", ".", "key", ",", "theme", ".", "dist", ".", "key", "]", "log", ".", "warning", "(", "u\"The theme %s is provided by the Python packages '%s'. The one in %s will be used.\"", ",", "theme", ".", "name", ",", "u','", ".", "join", "(", "multiple_packages", ")", ",", "theme", ".", "dist", ".", "key", ")", "themes", "[", "theme", ".", "name", "]", "=", "theme", "return", "themes" ]
returns available themes list .
train
false
2,029
def crc_finalize(crc): return (crc & _MASK)
[ "def", "crc_finalize", "(", "crc", ")", ":", "return", "(", "crc", "&", "_MASK", ")" ]
finalize crc-32c checksum .
train
false
2,030
@logic.validate(logic.schema.default_activity_list_schema) def package_activity_list(context, data_dict): _check_access('package_show', context, data_dict) model = context['model'] package_ref = data_dict.get('id') package = model.Package.get(package_ref) if (package is None): raise logic.NotFound offset = int(data_dict.get('offset', 0)) limit = int(data_dict.get('limit', config.get('ckan.activity_list_limit', 31))) _activity_objects = model.activity.package_activity_list(package.id, limit=limit, offset=offset) activity_objects = _filter_activity_by_user(_activity_objects, _activity_stream_get_filtered_users()) return model_dictize.activity_list_dictize(activity_objects, context)
[ "@", "logic", ".", "validate", "(", "logic", ".", "schema", ".", "default_activity_list_schema", ")", "def", "package_activity_list", "(", "context", ",", "data_dict", ")", ":", "_check_access", "(", "'package_show'", ",", "context", ",", "data_dict", ")", "model", "=", "context", "[", "'model'", "]", "package_ref", "=", "data_dict", ".", "get", "(", "'id'", ")", "package", "=", "model", ".", "Package", ".", "get", "(", "package_ref", ")", "if", "(", "package", "is", "None", ")", ":", "raise", "logic", ".", "NotFound", "offset", "=", "int", "(", "data_dict", ".", "get", "(", "'offset'", ",", "0", ")", ")", "limit", "=", "int", "(", "data_dict", ".", "get", "(", "'limit'", ",", "config", ".", "get", "(", "'ckan.activity_list_limit'", ",", "31", ")", ")", ")", "_activity_objects", "=", "model", ".", "activity", ".", "package_activity_list", "(", "package", ".", "id", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ")", "activity_objects", "=", "_filter_activity_by_user", "(", "_activity_objects", ",", "_activity_stream_get_filtered_users", "(", ")", ")", "return", "model_dictize", ".", "activity_list_dictize", "(", "activity_objects", ",", "context", ")" ]
return the given dataset s public activity stream .
train
false
2,032
def string_concat(*strings): return ''.join([str(el) for el in strings])
[ "def", "string_concat", "(", "*", "strings", ")", ":", "return", "''", ".", "join", "(", "[", "str", "(", "el", ")", "for", "el", "in", "strings", "]", ")" ]
lazy variant of string concatenation .
train
false
2,035
def modifies_known_mutable(obj, attr): for (typespec, unsafe) in _mutable_spec: if isinstance(obj, typespec): return (attr in unsafe) return False
[ "def", "modifies_known_mutable", "(", "obj", ",", "attr", ")", ":", "for", "(", "typespec", ",", "unsafe", ")", "in", "_mutable_spec", ":", "if", "isinstance", "(", "obj", ",", "typespec", ")", ":", "return", "(", "attr", "in", "unsafe", ")", "return", "False" ]
this function checks if an attribute on a builtin mutable object would modify it if called .
train
true
2,036
def getChainMatrixSVGIfNecessary(elementNode, yAxisPointingUpward): matrixSVG = MatrixSVG() if yAxisPointingUpward: return matrixSVG return getChainMatrixSVG(elementNode, matrixSVG)
[ "def", "getChainMatrixSVGIfNecessary", "(", "elementNode", ",", "yAxisPointingUpward", ")", ":", "matrixSVG", "=", "MatrixSVG", "(", ")", "if", "yAxisPointingUpward", ":", "return", "matrixSVG", "return", "getChainMatrixSVG", "(", "elementNode", ",", "matrixSVG", ")" ]
get chain matrixsvg by svgelement and yaxispointingupward .
train
false
2,037
def initRpc(config): rpc_data = {'connect': '127.0.0.1', 'port': '8336', 'user': 'PLACEHOLDER', 'password': 'PLACEHOLDER', 'clienttimeout': '900'} try: fptr = open(config, 'r') lines = fptr.readlines() fptr.close() except: return None for line in lines: if (not line.startswith('rpc')): continue key_val = line.split(None, 1)[0] (key, val) = key_val.split('=', 1) if ((not key) or (not val)): continue rpc_data[key[3:]] = val url = ('http://%(user)s:%(password)s@%(connect)s:%(port)s' % rpc_data) return (url, int(rpc_data['clienttimeout']))
[ "def", "initRpc", "(", "config", ")", ":", "rpc_data", "=", "{", "'connect'", ":", "'127.0.0.1'", ",", "'port'", ":", "'8336'", ",", "'user'", ":", "'PLACEHOLDER'", ",", "'password'", ":", "'PLACEHOLDER'", ",", "'clienttimeout'", ":", "'900'", "}", "try", ":", "fptr", "=", "open", "(", "config", ",", "'r'", ")", "lines", "=", "fptr", ".", "readlines", "(", ")", "fptr", ".", "close", "(", ")", "except", ":", "return", "None", "for", "line", "in", "lines", ":", "if", "(", "not", "line", ".", "startswith", "(", "'rpc'", ")", ")", ":", "continue", "key_val", "=", "line", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", "(", "key", ",", "val", ")", "=", "key_val", ".", "split", "(", "'='", ",", "1", ")", "if", "(", "(", "not", "key", ")", "or", "(", "not", "val", ")", ")", ":", "continue", "rpc_data", "[", "key", "[", "3", ":", "]", "]", "=", "val", "url", "=", "(", "'http://%(user)s:%(password)s@%(connect)s:%(port)s'", "%", "rpc_data", ")", "return", "(", "url", ",", "int", "(", "rpc_data", "[", "'clienttimeout'", "]", ")", ")" ]
initialize namecoin rpc .
train
false
2,038
def get_view(request): return HttpResponse('Hello world')
[ "def", "get_view", "(", "request", ")", ":", "return", "HttpResponse", "(", "'Hello world'", ")" ]
a simple view that expects a get request .
train
false
2,040
def _get_next_prev_month(generic_view, naive_result, is_previous, use_first_day): date_field = generic_view.get_date_field() allow_empty = generic_view.get_allow_empty() allow_future = generic_view.get_allow_future() if allow_empty: result = naive_result else: if is_previous: lookup = {('%s__lte' % date_field): naive_result} ordering = ('-%s' % date_field) else: lookup = {('%s__gte' % date_field): naive_result} ordering = date_field qs = generic_view.get_queryset().filter(**lookup).order_by(ordering) try: result = getattr(qs[0], date_field) except IndexError: result = None if hasattr(result, 'date'): result = result.date() if (result and use_first_day): result = result.replace(day=1) if (result and (allow_future or (result < datetime.date.today()))): return result else: return None
[ "def", "_get_next_prev_month", "(", "generic_view", ",", "naive_result", ",", "is_previous", ",", "use_first_day", ")", ":", "date_field", "=", "generic_view", ".", "get_date_field", "(", ")", "allow_empty", "=", "generic_view", ".", "get_allow_empty", "(", ")", "allow_future", "=", "generic_view", ".", "get_allow_future", "(", ")", "if", "allow_empty", ":", "result", "=", "naive_result", "else", ":", "if", "is_previous", ":", "lookup", "=", "{", "(", "'%s__lte'", "%", "date_field", ")", ":", "naive_result", "}", "ordering", "=", "(", "'-%s'", "%", "date_field", ")", "else", ":", "lookup", "=", "{", "(", "'%s__gte'", "%", "date_field", ")", ":", "naive_result", "}", "ordering", "=", "date_field", "qs", "=", "generic_view", ".", "get_queryset", "(", ")", ".", "filter", "(", "**", "lookup", ")", ".", "order_by", "(", "ordering", ")", "try", ":", "result", "=", "getattr", "(", "qs", "[", "0", "]", ",", "date_field", ")", "except", "IndexError", ":", "result", "=", "None", "if", "hasattr", "(", "result", ",", "'date'", ")", ":", "result", "=", "result", ".", "date", "(", ")", "if", "(", "result", "and", "use_first_day", ")", ":", "result", "=", "result", ".", "replace", "(", "day", "=", "1", ")", "if", "(", "result", "and", "(", "allow_future", "or", "(", "result", "<", "datetime", ".", "date", ".", "today", "(", ")", ")", ")", ")", ":", "return", "result", "else", ":", "return", "None" ]
helper: get the next or the previous valid date .
train
false
2,041
@pytest.fixture(scope='session') def unicode_encode_err(): return UnicodeEncodeError('ascii', '', 0, 2, 'fake exception')
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ")", "def", "unicode_encode_err", "(", ")", ":", "return", "UnicodeEncodeError", "(", "'ascii'", ",", "''", ",", "0", ",", "2", ",", "'fake exception'", ")" ]
provide a fake unicodeencodeerror exception .
train
false
2,042
def read_numpy(fh, byteorder, dtype, count): dtype = ('b' if (dtype[(-1)] == 's') else (byteorder + dtype[(-1)])) return fh.read_array(dtype, count)
[ "def", "read_numpy", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ")", ":", "dtype", "=", "(", "'b'", "if", "(", "dtype", "[", "(", "-", "1", ")", "]", "==", "'s'", ")", "else", "(", "byteorder", "+", "dtype", "[", "(", "-", "1", ")", "]", ")", ")", "return", "fh", ".", "read_array", "(", "dtype", ",", "count", ")" ]
read tag data from file and return as numpy array .
train
true
2,044
def protocol_from_http(protocol_str): return (int(protocol_str[5]), int(protocol_str[7]))
[ "def", "protocol_from_http", "(", "protocol_str", ")", ":", "return", "(", "int", "(", "protocol_str", "[", "5", "]", ")", ",", "int", "(", "protocol_str", "[", "7", "]", ")", ")" ]
return a protocol tuple from the given http/x .
train
false
2,047
def get_temper_devices(): from temperusb.temper import TemperHandler return TemperHandler().get_devices()
[ "def", "get_temper_devices", "(", ")", ":", "from", "temperusb", ".", "temper", "import", "TemperHandler", "return", "TemperHandler", "(", ")", ".", "get_devices", "(", ")" ]
scan the temper devices from temperusb .
train
false
2,048
def _move_to_next(fid, byte=8): now = fid.tell() if ((now % byte) != 0): now = ((now - (now % byte)) + byte) fid.seek(now, 0)
[ "def", "_move_to_next", "(", "fid", ",", "byte", "=", "8", ")", ":", "now", "=", "fid", ".", "tell", "(", ")", "if", "(", "(", "now", "%", "byte", ")", "!=", "0", ")", ":", "now", "=", "(", "(", "now", "-", "(", "now", "%", "byte", ")", ")", "+", "byte", ")", "fid", ".", "seek", "(", "now", ",", "0", ")" ]
move to next byte boundary .
train
false
2,049
def _align(sequenceA, sequenceB, match_fn, gap_A_fn, gap_B_fn, penalize_extend_when_opening, penalize_end_gaps, align_globally, gap_char, force_generic, score_only, one_alignment_only): if ((not sequenceA) or (not sequenceB)): return [] try: (sequenceA + gap_char) (sequenceB + gap_char) except TypeError: raise TypeError((('both sequences must be of the same type, either ' + 'string/sequence object or list. Gap character must ') + 'fit the sequence type (string or list)')) if (not isinstance(sequenceA, list)): sequenceA = str(sequenceA) if (not isinstance(sequenceB, list)): sequenceB = str(sequenceB) if ((not force_generic) and isinstance(gap_A_fn, affine_penalty) and isinstance(gap_B_fn, affine_penalty)): (open_A, extend_A) = (gap_A_fn.open, gap_A_fn.extend) (open_B, extend_B) = (gap_B_fn.open, gap_B_fn.extend) x = _make_score_matrix_fast(sequenceA, sequenceB, match_fn, open_A, extend_A, open_B, extend_B, penalize_extend_when_opening, penalize_end_gaps, align_globally, score_only) else: x = _make_score_matrix_generic(sequenceA, sequenceB, match_fn, gap_A_fn, gap_B_fn, penalize_end_gaps, align_globally, score_only) (score_matrix, trace_matrix) = x starts = _find_start(score_matrix, align_globally) best_score = max([x[0] for x in starts]) if score_only: return best_score tolerance = 0 starts = [(score, pos) for (score, pos) in starts if (rint(abs((score - best_score))) <= rint(tolerance))] return _recover_alignments(sequenceA, sequenceB, starts, score_matrix, trace_matrix, align_globally, gap_char, one_alignment_only, gap_A_fn, gap_B_fn)
[ "def", "_align", "(", "sequenceA", ",", "sequenceB", ",", "match_fn", ",", "gap_A_fn", ",", "gap_B_fn", ",", "penalize_extend_when_opening", ",", "penalize_end_gaps", ",", "align_globally", ",", "gap_char", ",", "force_generic", ",", "score_only", ",", "one_alignment_only", ")", ":", "if", "(", "(", "not", "sequenceA", ")", "or", "(", "not", "sequenceB", ")", ")", ":", "return", "[", "]", "try", ":", "(", "sequenceA", "+", "gap_char", ")", "(", "sequenceB", "+", "gap_char", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "(", "(", "'both sequences must be of the same type, either '", "+", "'string/sequence object or list. Gap character must '", ")", "+", "'fit the sequence type (string or list)'", ")", ")", "if", "(", "not", "isinstance", "(", "sequenceA", ",", "list", ")", ")", ":", "sequenceA", "=", "str", "(", "sequenceA", ")", "if", "(", "not", "isinstance", "(", "sequenceB", ",", "list", ")", ")", ":", "sequenceB", "=", "str", "(", "sequenceB", ")", "if", "(", "(", "not", "force_generic", ")", "and", "isinstance", "(", "gap_A_fn", ",", "affine_penalty", ")", "and", "isinstance", "(", "gap_B_fn", ",", "affine_penalty", ")", ")", ":", "(", "open_A", ",", "extend_A", ")", "=", "(", "gap_A_fn", ".", "open", ",", "gap_A_fn", ".", "extend", ")", "(", "open_B", ",", "extend_B", ")", "=", "(", "gap_B_fn", ".", "open", ",", "gap_B_fn", ".", "extend", ")", "x", "=", "_make_score_matrix_fast", "(", "sequenceA", ",", "sequenceB", ",", "match_fn", ",", "open_A", ",", "extend_A", ",", "open_B", ",", "extend_B", ",", "penalize_extend_when_opening", ",", "penalize_end_gaps", ",", "align_globally", ",", "score_only", ")", "else", ":", "x", "=", "_make_score_matrix_generic", "(", "sequenceA", ",", "sequenceB", ",", "match_fn", ",", "gap_A_fn", ",", "gap_B_fn", ",", "penalize_end_gaps", ",", "align_globally", ",", "score_only", ")", "(", "score_matrix", ",", "trace_matrix", ")", "=", "x", "starts", "=", "_find_start", "(", "score_matrix", ",", "align_globally", ")", "best_score", "=", "max", "(", "[", "x", "[", "0", "]", "for", "x", "in", "starts", "]", ")", "if", "score_only", ":", "return", "best_score", "tolerance", "=", "0", "starts", "=", "[", "(", "score", ",", "pos", ")", "for", "(", "score", ",", "pos", ")", "in", "starts", "if", "(", "rint", "(", "abs", "(", "(", "score", "-", "best_score", ")", ")", ")", "<=", "rint", "(", "tolerance", ")", ")", "]", "return", "_recover_alignments", "(", "sequenceA", ",", "sequenceB", ",", "starts", ",", "score_matrix", ",", "trace_matrix", ",", "align_globally", ",", "gap_char", ",", "one_alignment_only", ",", "gap_A_fn", ",", "gap_B_fn", ")" ]
align a set of terms .
train
false
2,050
def a_product(x, y, z=1): return ((x * y) * z)
[ "def", "a_product", "(", "x", ",", "y", ",", "z", "=", "1", ")", ":", "return", "(", "(", "x", "*", "y", ")", "*", "z", ")" ]
simple function that returns the product of three numbers .
train
false
2,051
def owner(*paths): return __salt__['lowpkg.owner'](*paths)
[ "def", "owner", "(", "*", "paths", ")", ":", "return", "__salt__", "[", "'lowpkg.owner'", "]", "(", "*", "paths", ")" ]
return the name of the package that owns the file .
train
false
2,053
def Enabled(): return (not GlobalProcess().IsDefault())
[ "def", "Enabled", "(", ")", ":", "return", "(", "not", "GlobalProcess", "(", ")", ".", "IsDefault", "(", ")", ")" ]
indicates whether the dev_appserver is running in multiprocess mode .
train
false
2,054
@pytest.mark.network def test_requirements_file(script): (other_lib_name, other_lib_version) = ('anyjson', '0.3') script.scratch_path.join('initools-req.txt').write(textwrap.dedent((' INITools==0.2\n # and something else to test out:\n %s<=%s\n ' % (other_lib_name, other_lib_version)))) result = script.pip('install', '-r', (script.scratch_path / 'initools-req.txt')) assert (((script.site_packages / 'INITools-0.2-py%s.egg-info') % pyversion) in result.files_created) assert ((script.site_packages / 'initools') in result.files_created) assert result.files_created[(script.site_packages / other_lib_name)].dir fn = ('%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion)) assert result.files_created[(script.site_packages / fn)].dir
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_requirements_file", "(", "script", ")", ":", "(", "other_lib_name", ",", "other_lib_version", ")", "=", "(", "'anyjson'", ",", "'0.3'", ")", "script", ".", "scratch_path", ".", "join", "(", "'initools-req.txt'", ")", ".", "write", "(", "textwrap", ".", "dedent", "(", "(", "' INITools==0.2\\n # and something else to test out:\\n %s<=%s\\n '", "%", "(", "other_lib_name", ",", "other_lib_version", ")", ")", ")", ")", "result", "=", "script", ".", "pip", "(", "'install'", ",", "'-r'", ",", "(", "script", ".", "scratch_path", "/", "'initools-req.txt'", ")", ")", "assert", "(", "(", "(", "script", ".", "site_packages", "/", "'INITools-0.2-py%s.egg-info'", ")", "%", "pyversion", ")", "in", "result", ".", "files_created", ")", "assert", "(", "(", "script", ".", "site_packages", "/", "'initools'", ")", "in", "result", ".", "files_created", ")", "assert", "result", ".", "files_created", "[", "(", "script", ".", "site_packages", "/", "other_lib_name", ")", "]", ".", "dir", "fn", "=", "(", "'%s-%s-py%s.egg-info'", "%", "(", "other_lib_name", ",", "other_lib_version", ",", "pyversion", ")", ")", "assert", "result", ".", "files_created", "[", "(", "script", ".", "site_packages", "/", "fn", ")", "]", ".", "dir" ]
test installing from a requirements file .
train
false
2,055
def cnn_pool(pool_dim, convolved_features): num_images = convolved_features.shape[1] num_features = convolved_features.shape[0] convolved_dim = convolved_features.shape[2] assert ((convolved_dim % pool_dim) == 0), 'Pooling dimension is not an exact multiple of convolved dimension' pool_size = (convolved_dim / pool_dim) pooled_features = np.zeros(shape=(num_features, num_images, pool_size, pool_size), dtype=np.float64) for i in range(pool_size): for j in range(pool_size): pool = convolved_features[:, :, (i * pool_dim):((i + 1) * pool_dim), (j * pool_dim):((j + 1) * pool_dim)] pooled_features[:, :, i, j] = np.mean(np.mean(pool, 2), 2) return pooled_features
[ "def", "cnn_pool", "(", "pool_dim", ",", "convolved_features", ")", ":", "num_images", "=", "convolved_features", ".", "shape", "[", "1", "]", "num_features", "=", "convolved_features", ".", "shape", "[", "0", "]", "convolved_dim", "=", "convolved_features", ".", "shape", "[", "2", "]", "assert", "(", "(", "convolved_dim", "%", "pool_dim", ")", "==", "0", ")", ",", "'Pooling dimension is not an exact multiple of convolved dimension'", "pool_size", "=", "(", "convolved_dim", "/", "pool_dim", ")", "pooled_features", "=", "np", ".", "zeros", "(", "shape", "=", "(", "num_features", ",", "num_images", ",", "pool_size", ",", "pool_size", ")", ",", "dtype", "=", "np", ".", "float64", ")", "for", "i", "in", "range", "(", "pool_size", ")", ":", "for", "j", "in", "range", "(", "pool_size", ")", ":", "pool", "=", "convolved_features", "[", ":", ",", ":", ",", "(", "i", "*", "pool_dim", ")", ":", "(", "(", "i", "+", "1", ")", "*", "pool_dim", ")", ",", "(", "j", "*", "pool_dim", ")", ":", "(", "(", "j", "+", "1", ")", "*", "pool_dim", ")", "]", "pooled_features", "[", ":", ",", ":", ",", "i", ",", "j", "]", "=", "np", ".", "mean", "(", "np", ".", "mean", "(", "pool", ",", "2", ")", ",", "2", ")", "return", "pooled_features" ]
pools the given convolved features .
train
false
2,056
def build_output_stream(args, env, request, response, output_options): req_h = (OUT_REQ_HEAD in output_options) req_b = (OUT_REQ_BODY in output_options) resp_h = (OUT_RESP_HEAD in output_options) resp_b = (OUT_RESP_BODY in output_options) req = (req_h or req_b) resp = (resp_h or resp_b) output = [] Stream = get_stream_type(env, args) if req: output.append(Stream(msg=HTTPRequest(request), with_headers=req_h, with_body=req_b)) if (req_b and resp): output.append(['\n\n']) if resp: output.append(Stream(msg=HTTPResponse(response), with_headers=resp_h, with_body=resp_b)) if (env.stdout_isatty and resp_b): output.append(['\n\n']) return chain(*output)
[ "def", "build_output_stream", "(", "args", ",", "env", ",", "request", ",", "response", ",", "output_options", ")", ":", "req_h", "=", "(", "OUT_REQ_HEAD", "in", "output_options", ")", "req_b", "=", "(", "OUT_REQ_BODY", "in", "output_options", ")", "resp_h", "=", "(", "OUT_RESP_HEAD", "in", "output_options", ")", "resp_b", "=", "(", "OUT_RESP_BODY", "in", "output_options", ")", "req", "=", "(", "req_h", "or", "req_b", ")", "resp", "=", "(", "resp_h", "or", "resp_b", ")", "output", "=", "[", "]", "Stream", "=", "get_stream_type", "(", "env", ",", "args", ")", "if", "req", ":", "output", ".", "append", "(", "Stream", "(", "msg", "=", "HTTPRequest", "(", "request", ")", ",", "with_headers", "=", "req_h", ",", "with_body", "=", "req_b", ")", ")", "if", "(", "req_b", "and", "resp", ")", ":", "output", ".", "append", "(", "[", "'\\n\\n'", "]", ")", "if", "resp", ":", "output", ".", "append", "(", "Stream", "(", "msg", "=", "HTTPResponse", "(", "response", ")", ",", "with_headers", "=", "resp_h", ",", "with_body", "=", "resp_b", ")", ")", "if", "(", "env", ".", "stdout_isatty", "and", "resp_b", ")", ":", "output", ".", "append", "(", "[", "'\\n\\n'", "]", ")", "return", "chain", "(", "*", "output", ")" ]
build and return a chain of iterators over the request-response exchange each of which yields bytes chunks .
train
false
2,057
def lanl_graph(): import networkx as nx try: fh = open('lanl_routes.edgelist', 'r') except IOError: print 'lanl.edges not found' raise G = nx.Graph() time = {} time[0] = 0 for line in fh.readlines(): (head, tail, rtt) = line.split() G.add_edge(int(head), int(tail)) time[int(head)] = float(rtt) G0 = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)[0] G0.rtt = {} for n in G0: G0.rtt[n] = time[n] return G0
[ "def", "lanl_graph", "(", ")", ":", "import", "networkx", "as", "nx", "try", ":", "fh", "=", "open", "(", "'lanl_routes.edgelist'", ",", "'r'", ")", "except", "IOError", ":", "print", "'lanl.edges not found'", "raise", "G", "=", "nx", ".", "Graph", "(", ")", "time", "=", "{", "}", "time", "[", "0", "]", "=", "0", "for", "line", "in", "fh", ".", "readlines", "(", ")", ":", "(", "head", ",", "tail", ",", "rtt", ")", "=", "line", ".", "split", "(", ")", "G", ".", "add_edge", "(", "int", "(", "head", ")", ",", "int", "(", "tail", ")", ")", "time", "[", "int", "(", "head", ")", "]", "=", "float", "(", "rtt", ")", "G0", "=", "sorted", "(", "nx", ".", "connected_component_subgraphs", "(", "G", ")", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "[", "0", "]", "G0", ".", "rtt", "=", "{", "}", "for", "n", "in", "G0", ":", "G0", ".", "rtt", "[", "n", "]", "=", "time", "[", "n", "]", "return", "G0" ]
return the lanl internet view graph from lanl .
train
false
2,058
def api_access_enabled_or_404(view_func): @wraps(view_func) def wrapped_view(view_obj, *args, **kwargs): 'Wrapper for the view function.' if ApiAccessConfig.current().enabled: return view_func(view_obj, *args, **kwargs) return HttpResponseNotFound() return wrapped_view
[ "def", "api_access_enabled_or_404", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "wrapped_view", "(", "view_obj", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "ApiAccessConfig", ".", "current", "(", ")", ".", "enabled", ":", "return", "view_func", "(", "view_obj", ",", "*", "args", ",", "**", "kwargs", ")", "return", "HttpResponseNotFound", "(", ")", "return", "wrapped_view" ]
if api access management feature is not enabled .
train
false
2,059
def eventlog(request, event=0): if (not test_user_authenticated(request)): return login(request, next=('/cobbler_web/eventlog/%s' % str(event)), expired=True) event_info = remote.get_events() if (event not in event_info): return HttpResponse('event not found') data = event_info[event] eventname = data[0] eventtime = data[1] eventstate = data[2] eventlog = remote.get_event_log(event) t = get_template('eventlog.tmpl') vars = {'eventlog': eventlog, 'eventname': eventname, 'eventstate': eventstate, 'eventid': event, 'eventtime': eventtime, 'version': remote.extended_version(request.session['token'])['version'], 'username': username} html = t.render(RequestContext(request, vars)) return HttpResponse(html)
[ "def", "eventlog", "(", "request", ",", "event", "=", "0", ")", ":", "if", "(", "not", "test_user_authenticated", "(", "request", ")", ")", ":", "return", "login", "(", "request", ",", "next", "=", "(", "'/cobbler_web/eventlog/%s'", "%", "str", "(", "event", ")", ")", ",", "expired", "=", "True", ")", "event_info", "=", "remote", ".", "get_events", "(", ")", "if", "(", "event", "not", "in", "event_info", ")", ":", "return", "HttpResponse", "(", "'event not found'", ")", "data", "=", "event_info", "[", "event", "]", "eventname", "=", "data", "[", "0", "]", "eventtime", "=", "data", "[", "1", "]", "eventstate", "=", "data", "[", "2", "]", "eventlog", "=", "remote", ".", "get_event_log", "(", "event", ")", "t", "=", "get_template", "(", "'eventlog.tmpl'", ")", "vars", "=", "{", "'eventlog'", ":", "eventlog", ",", "'eventname'", ":", "eventname", ",", "'eventstate'", ":", "eventstate", ",", "'eventid'", ":", "event", ",", "'eventtime'", ":", "eventtime", ",", "'version'", ":", "remote", ".", "extended_version", "(", "request", ".", "session", "[", "'token'", "]", ")", "[", "'version'", "]", ",", "'username'", ":", "username", "}", "html", "=", "t", ".", "render", "(", "RequestContext", "(", "request", ",", "vars", ")", ")", "return", "HttpResponse", "(", "html", ")" ]
shows the log for a given event .
train
false
2,060
def ipv4_to_bin(ip): return addrconv.ipv4.text_to_bin(ip)
[ "def", "ipv4_to_bin", "(", "ip", ")", ":", "return", "addrconv", ".", "ipv4", ".", "text_to_bin", "(", "ip", ")" ]
converts human readable ipv4 string to binary representation .
train
false
2,061
def delete_network(context, net_id): session = context.session with session.begin(subtransactions=True): net = session.query(BrocadeNetwork).filter_by(id=net_id).first() if (net is not None): session.delete(net)
[ "def", "delete_network", "(", "context", ",", "net_id", ")", ":", "session", "=", "context", ".", "session", "with", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "net", "=", "session", ".", "query", "(", "BrocadeNetwork", ")", ".", "filter_by", "(", "id", "=", "net_id", ")", ".", "first", "(", ")", "if", "(", "net", "is", "not", "None", ")", ":", "session", ".", "delete", "(", "net", ")" ]
permanently delete a network .
train
false
2,062
def int_to_bin(i): i1 = (i % 256) i2 = int((i / 256)) return (chr(i1) + chr(i2))
[ "def", "int_to_bin", "(", "i", ")", ":", "i1", "=", "(", "i", "%", "256", ")", "i2", "=", "int", "(", "(", "i", "/", "256", ")", ")", "return", "(", "chr", "(", "i1", ")", "+", "chr", "(", "i2", ")", ")" ]
integer to two bytes .
train
true
2,064
def create_tcp_socket(module): type_ = module.SOCK_STREAM if hasattr(module, 'SOCK_CLOEXEC'): type_ |= module.SOCK_CLOEXEC sock = module.socket(module.AF_INET, type_) _set_default_tcpsock_options(module, sock) return sock
[ "def", "create_tcp_socket", "(", "module", ")", ":", "type_", "=", "module", ".", "SOCK_STREAM", "if", "hasattr", "(", "module", ",", "'SOCK_CLOEXEC'", ")", ":", "type_", "|=", "module", ".", "SOCK_CLOEXEC", "sock", "=", "module", ".", "socket", "(", "module", ".", "AF_INET", ",", "type_", ")", "_set_default_tcpsock_options", "(", "module", ",", "sock", ")", "return", "sock" ]
create a tcp socket with the cloexec flag set .
train
false
2,065
def get_output_ids(ids_bcs_added_field, corrected_bc, num_errors, added_field, max_bc_errors=1.5, enum_val=1): bc_corrected_flag = None if (added_field is None): curr_added_field = '' else: curr_added_field = added_field if (corrected_bc is None): curr_bc = '' else: curr_bc = corrected_bc log_id = '' if (num_errors > max_bc_errors): sample_id = ('Unassigned_%d' % enum_val) bc_corrected_flag = 'not_corrected' else: try: base_sample_id = ids_bcs_added_field[(curr_bc, curr_added_field)] sample_id = ('%s_%d' % (base_sample_id, enum_val)) if corrected_bc: log_id += ('%s' % corrected_bc) if (corrected_bc and added_field): log_id += ',' if added_field: log_id += ('%s' % added_field) if log_id: log_id += ',' log_id += ('%s' % base_sample_id) if (num_errors > 0): bc_corrected_flag = 'corrected' except KeyError: sample_id = ('Unassigned_%d' % enum_val) return (sample_id, log_id, bc_corrected_flag)
[ "def", "get_output_ids", "(", "ids_bcs_added_field", ",", "corrected_bc", ",", "num_errors", ",", "added_field", ",", "max_bc_errors", "=", "1.5", ",", "enum_val", "=", "1", ")", ":", "bc_corrected_flag", "=", "None", "if", "(", "added_field", "is", "None", ")", ":", "curr_added_field", "=", "''", "else", ":", "curr_added_field", "=", "added_field", "if", "(", "corrected_bc", "is", "None", ")", ":", "curr_bc", "=", "''", "else", ":", "curr_bc", "=", "corrected_bc", "log_id", "=", "''", "if", "(", "num_errors", ">", "max_bc_errors", ")", ":", "sample_id", "=", "(", "'Unassigned_%d'", "%", "enum_val", ")", "bc_corrected_flag", "=", "'not_corrected'", "else", ":", "try", ":", "base_sample_id", "=", "ids_bcs_added_field", "[", "(", "curr_bc", ",", "curr_added_field", ")", "]", "sample_id", "=", "(", "'%s_%d'", "%", "(", "base_sample_id", ",", "enum_val", ")", ")", "if", "corrected_bc", ":", "log_id", "+=", "(", "'%s'", "%", "corrected_bc", ")", "if", "(", "corrected_bc", "and", "added_field", ")", ":", "log_id", "+=", "','", "if", "added_field", ":", "log_id", "+=", "(", "'%s'", "%", "added_field", ")", "if", "log_id", ":", "log_id", "+=", "','", "log_id", "+=", "(", "'%s'", "%", "base_sample_id", ")", "if", "(", "num_errors", ">", "0", ")", ":", "bc_corrected_flag", "=", "'corrected'", "except", "KeyError", ":", "sample_id", "=", "(", "'Unassigned_%d'", "%", "enum_val", ")", "return", "(", "sample_id", ",", "log_id", ",", "bc_corrected_flag", ")" ]
returns sampleid to write to output fasta/qual files ids_bcs_added_field: dict of : sampleid corrected_bc: corrected barcode sequence .
train
false
2,066
def fix_local_scheme(home_dir, symlink=True): try: import sysconfig except ImportError: pass else: if (sysconfig._get_default_scheme() == 'posix_local'): local_path = os.path.join(home_dir, 'local') if (not os.path.exists(local_path)): os.mkdir(local_path) for subdir_name in os.listdir(home_dir): if (subdir_name == 'local'): continue copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), os.path.join(local_path, subdir_name), symlink)
[ "def", "fix_local_scheme", "(", "home_dir", ",", "symlink", "=", "True", ")", ":", "try", ":", "import", "sysconfig", "except", "ImportError", ":", "pass", "else", ":", "if", "(", "sysconfig", ".", "_get_default_scheme", "(", ")", "==", "'posix_local'", ")", ":", "local_path", "=", "os", ".", "path", ".", "join", "(", "home_dir", ",", "'local'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "local_path", ")", ")", ":", "os", ".", "mkdir", "(", "local_path", ")", "for", "subdir_name", "in", "os", ".", "listdir", "(", "home_dir", ")", ":", "if", "(", "subdir_name", "==", "'local'", ")", ":", "continue", "copyfile", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "home_dir", ",", "subdir_name", ")", ")", ",", "os", ".", "path", ".", "join", "(", "local_path", ",", "subdir_name", ")", ",", "symlink", ")" ]
platforms that use the "posix_local" install scheme need to be given an additional "local" location .
train
true
2,068
def _get_filename(fd): if hasattr(fd, 'name'): return fd.name return fd
[ "def", "_get_filename", "(", "fd", ")", ":", "if", "hasattr", "(", "fd", ",", "'name'", ")", ":", "return", "fd", ".", "name", "return", "fd" ]
transform the absolute test filenames to relative ones .
train
false
2,069
def __generate_crc16_table(): result = [] for byte in range(256): crc = 0 for _ in range(8): if ((byte ^ crc) & 1): crc = ((crc >> 1) ^ 40961) else: crc >>= 1 byte >>= 1 result.append(crc) return result
[ "def", "__generate_crc16_table", "(", ")", ":", "result", "=", "[", "]", "for", "byte", "in", "range", "(", "256", ")", ":", "crc", "=", "0", "for", "_", "in", "range", "(", "8", ")", ":", "if", "(", "(", "byte", "^", "crc", ")", "&", "1", ")", ":", "crc", "=", "(", "(", "crc", ">>", "1", ")", "^", "40961", ")", "else", ":", "crc", ">>=", "1", "byte", ">>=", "1", "result", ".", "append", "(", "crc", ")", "return", "result" ]
generates a crc16 lookup table .
train
false
2,070
def n_feature_influence(estimators, n_train, n_test, n_features, percentile): percentiles = defaultdict(defaultdict) for n in n_features: print(('benchmarking with %d features' % n)) (X_train, y_train, X_test, y_test) = generate_dataset(n_train, n_test, n) for (cls_name, estimator) in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = (1000000.0 * scoreatpercentile(runtimes, percentile)) return percentiles
[ "def", "n_feature_influence", "(", "estimators", ",", "n_train", ",", "n_test", ",", "n_features", ",", "percentile", ")", ":", "percentiles", "=", "defaultdict", "(", "defaultdict", ")", "for", "n", "in", "n_features", ":", "print", "(", "(", "'benchmarking with %d features'", "%", "n", ")", ")", "(", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ")", "=", "generate_dataset", "(", "n_train", ",", "n_test", ",", "n", ")", "for", "(", "cls_name", ",", "estimator", ")", "in", "estimators", ".", "items", "(", ")", ":", "estimator", ".", "fit", "(", "X_train", ",", "y_train", ")", "gc", ".", "collect", "(", ")", "runtimes", "=", "bulk_benchmark_estimator", "(", "estimator", ",", "X_test", ",", "30", ",", "False", ")", "percentiles", "[", "cls_name", "]", "[", "n", "]", "=", "(", "1000000.0", "*", "scoreatpercentile", "(", "runtimes", ",", "percentile", ")", ")", "return", "percentiles" ]
estimate influence of the number of features on prediction time .
train
false
2,071
def model_query(context, model, *args, **kwargs): session = (kwargs.get('session') or get_session()) read_deleted = (kwargs.get('read_deleted') or context.read_deleted) project_only = kwargs.get('project_only', False) def issubclassof_nova_base(obj): return (isinstance(obj, type) and issubclass(obj, models.NovaBase)) base_model = model if (not issubclassof_nova_base(base_model)): base_model = kwargs.get('base_model', None) if (not issubclassof_nova_base(base_model)): raise Exception(_('model or base_model parameter should be subclass of NovaBase')) query = session.query(model, *args) default_deleted_value = base_model.__mapper__.c.deleted.default.arg if (read_deleted == 'no'): query = query.filter((base_model.deleted == default_deleted_value)) elif (read_deleted == 'yes'): pass elif (read_deleted == 'only'): query = query.filter((base_model.deleted != default_deleted_value)) else: raise Exception((_("Unrecognized read_deleted value '%s'") % read_deleted)) if (nova.context.is_user_context(context) and project_only): if (project_only == 'allow_none'): query = query.filter(or_((base_model.project_id == context.project_id), (base_model.project_id == None))) else: query = query.filter_by(project_id=context.project_id) return query
[ "def", "model_query", "(", "context", ",", "model", ",", "*", "args", ",", "**", "kwargs", ")", ":", "session", "=", "(", "kwargs", ".", "get", "(", "'session'", ")", "or", "get_session", "(", ")", ")", "read_deleted", "=", "(", "kwargs", ".", "get", "(", "'read_deleted'", ")", "or", "context", ".", "read_deleted", ")", "project_only", "=", "kwargs", ".", "get", "(", "'project_only'", ",", "False", ")", "def", "issubclassof_nova_base", "(", "obj", ")", ":", "return", "(", "isinstance", "(", "obj", ",", "type", ")", "and", "issubclass", "(", "obj", ",", "models", ".", "NovaBase", ")", ")", "base_model", "=", "model", "if", "(", "not", "issubclassof_nova_base", "(", "base_model", ")", ")", ":", "base_model", "=", "kwargs", ".", "get", "(", "'base_model'", ",", "None", ")", "if", "(", "not", "issubclassof_nova_base", "(", "base_model", ")", ")", ":", "raise", "Exception", "(", "_", "(", "'model or base_model parameter should be subclass of NovaBase'", ")", ")", "query", "=", "session", ".", "query", "(", "model", ",", "*", "args", ")", "default_deleted_value", "=", "base_model", ".", "__mapper__", ".", "c", ".", "deleted", ".", "default", ".", "arg", "if", "(", "read_deleted", "==", "'no'", ")", ":", "query", "=", "query", ".", "filter", "(", "(", "base_model", ".", "deleted", "==", "default_deleted_value", ")", ")", "elif", "(", "read_deleted", "==", "'yes'", ")", ":", "pass", "elif", "(", "read_deleted", "==", "'only'", ")", ":", "query", "=", "query", ".", "filter", "(", "(", "base_model", ".", "deleted", "!=", "default_deleted_value", ")", ")", "else", ":", "raise", "Exception", "(", "(", "_", "(", "\"Unrecognized read_deleted value '%s'\"", ")", "%", "read_deleted", ")", ")", "if", "(", "nova", ".", "context", ".", "is_user_context", "(", "context", ")", "and", "project_only", ")", ":", "if", "(", "project_only", "==", "'allow_none'", ")", ":", "query", "=", "query", ".", "filter", "(", "or_", "(", "(", "base_model", ".", "project_id", "==", "context", ".", "project_id", ")", ",", "(", "base_model", ".", "project_id", "==", "None", ")", ")", ")", "else", ":", "query", "=", "query", ".", "filter_by", "(", "project_id", "=", "context", ".", "project_id", ")", "return", "query" ]
query helper that accounts for contexts read_deleted field .
train
false
2,072
def bptrs(a): return pycuda.gpuarray.arange(a.ptr, (a.ptr + (a.shape[0] * a.strides[0])), a.strides[0], dtype=cublas.ctypes.c_void_p)
[ "def", "bptrs", "(", "a", ")", ":", "return", "pycuda", ".", "gpuarray", ".", "arange", "(", "a", ".", "ptr", ",", "(", "a", ".", "ptr", "+", "(", "a", ".", "shape", "[", "0", "]", "*", "a", ".", "strides", "[", "0", "]", ")", ")", ",", "a", ".", "strides", "[", "0", "]", ",", "dtype", "=", "cublas", ".", "ctypes", ".", "c_void_p", ")" ]
pointer array when input represents a batch of matrices .
train
false
2,074
def clear_feature(dev, feature, recipient=None): if (feature == ENDPOINT_HALT): dev.clear_halt(recipient) else: (bmRequestType, wIndex) = _parse_recipient(recipient, util.CTRL_OUT) dev.ctrl_transfer(bmRequestType=bmRequestType, bRequest=1, wIndex=wIndex, wValue=feature)
[ "def", "clear_feature", "(", "dev", ",", "feature", ",", "recipient", "=", "None", ")", ":", "if", "(", "feature", "==", "ENDPOINT_HALT", ")", ":", "dev", ".", "clear_halt", "(", "recipient", ")", "else", ":", "(", "bmRequestType", ",", "wIndex", ")", "=", "_parse_recipient", "(", "recipient", ",", "util", ".", "CTRL_OUT", ")", "dev", ".", "ctrl_transfer", "(", "bmRequestType", "=", "bmRequestType", ",", "bRequest", "=", "1", ",", "wIndex", "=", "wIndex", ",", "wValue", "=", "feature", ")" ]
clear/disable a specific feature .
train
true
2,076
def lvresize(size, lvpath): ret = {} cmd = ['lvresize', '-L', str(size), lvpath] cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False) if (cmd_ret['retcode'] != 0): return {} return ret
[ "def", "lvresize", "(", "size", ",", "lvpath", ")", ":", "ret", "=", "{", "}", "cmd", "=", "[", "'lvresize'", ",", "'-L'", ",", "str", "(", "size", ")", ",", "lvpath", "]", "cmd_ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "(", "cmd_ret", "[", "'retcode'", "]", "!=", "0", ")", ":", "return", "{", "}", "return", "ret" ]
return information about the logical volume(s) cli examples: .
train
false
2,078
@depends(HAS_PYVMOMI) def service_restart(host, username, password, service_name, protocol=None, port=None, host_names=None): service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond', 'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg'] ret = {} if ((service_name == 'SSH') or (service_name == 'ssh')): temp_service_name = 'TSM-SSH' else: temp_service_name = service_name for host_name in host_names: if (service_name not in valid_services): ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}}) return ret host_ref = _get_host_ref(service_instance, host, host_name=host_name) service_manager = _get_service_manager(host_ref) log.debug("Restarting the '{0}' service on {1}.".format(service_name, host_name)) try: service_manager.RestartService(id=temp_service_name) except vim.fault.HostConfigFault as err: msg = "'vsphere.service_restart' failed for host {0}: {1}".format(host_name, err) log.debug(msg) ret.update({host_name: {'Error': msg}}) continue except vim.fault.RestrictedVersion as err: log.debug(err) ret.update({host_name: {'Error': err}}) continue ret.update({host_name: {'Service Restarted': True}}) return ret
[ "@", "depends", "(", "HAS_PYVMOMI", ")", "def", "service_restart", "(", "host", ",", "username", ",", "password", ",", "service_name", ",", "protocol", "=", "None", ",", "port", "=", "None", ",", "host_names", "=", "None", ")", ":", "service_instance", "=", "salt", ".", "utils", ".", "vmware", ".", "get_service_instance", "(", "host", "=", "host", ",", "username", "=", "username", ",", "password", "=", "password", ",", "protocol", "=", "protocol", ",", "port", "=", "port", ")", "host_names", "=", "_check_hosts", "(", "service_instance", ",", "host", ",", "host_names", ")", "valid_services", "=", "[", "'DCUI'", ",", "'TSM'", ",", "'SSH'", ",", "'ssh'", ",", "'lbtd'", ",", "'lsassd'", ",", "'lwiod'", ",", "'netlogond'", ",", "'ntpd'", ",", "'sfcbd-watchdog'", ",", "'snmpd'", ",", "'vprobed'", ",", "'vpxa'", ",", "'xorg'", "]", "ret", "=", "{", "}", "if", "(", "(", "service_name", "==", "'SSH'", ")", "or", "(", "service_name", "==", "'ssh'", ")", ")", ":", "temp_service_name", "=", "'TSM-SSH'", "else", ":", "temp_service_name", "=", "service_name", "for", "host_name", "in", "host_names", ":", "if", "(", "service_name", "not", "in", "valid_services", ")", ":", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "'{0} is not a valid service name.'", ".", "format", "(", "service_name", ")", "}", "}", ")", "return", "ret", "host_ref", "=", "_get_host_ref", "(", "service_instance", ",", "host", ",", "host_name", "=", "host_name", ")", "service_manager", "=", "_get_service_manager", "(", "host_ref", ")", "log", ".", "debug", "(", "\"Restarting the '{0}' service on {1}.\"", ".", "format", "(", "service_name", ",", "host_name", ")", ")", "try", ":", "service_manager", ".", "RestartService", "(", "id", "=", "temp_service_name", ")", "except", "vim", ".", "fault", ".", "HostConfigFault", "as", "err", ":", "msg", "=", "\"'vsphere.service_restart' failed for host {0}: {1}\"", ".", "format", "(", "host_name", ",", "err", ")", "log", ".", "debug", "(", "msg", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "msg", "}", "}", ")", "continue", "except", "vim", ".", "fault", ".", "RestrictedVersion", "as", "err", ":", "log", ".", "debug", "(", "err", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "err", "}", "}", ")", "continue", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Service Restarted'", ":", "True", "}", "}", ")", "return", "ret" ]
restart a "service" on the rest server .
train
true
2,079
def _compare_io(inv_op, out_file_ext='.fif'): tempdir = _TempDir() if (out_file_ext == '.fif'): out_file = op.join(tempdir, 'test-inv.fif') elif (out_file_ext == '.gz'): out_file = op.join(tempdir, 'test-inv.fif.gz') else: raise ValueError('IO test could not complete') inv_init = copy.deepcopy(inv_op) write_inverse_operator(out_file, inv_op) read_inv_op = read_inverse_operator(out_file) _compare(inv_init, read_inv_op) _compare(inv_init, inv_op)
[ "def", "_compare_io", "(", "inv_op", ",", "out_file_ext", "=", "'.fif'", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "if", "(", "out_file_ext", "==", "'.fif'", ")", ":", "out_file", "=", "op", ".", "join", "(", "tempdir", ",", "'test-inv.fif'", ")", "elif", "(", "out_file_ext", "==", "'.gz'", ")", ":", "out_file", "=", "op", ".", "join", "(", "tempdir", ",", "'test-inv.fif.gz'", ")", "else", ":", "raise", "ValueError", "(", "'IO test could not complete'", ")", "inv_init", "=", "copy", ".", "deepcopy", "(", "inv_op", ")", "write_inverse_operator", "(", "out_file", ",", "inv_op", ")", "read_inv_op", "=", "read_inverse_operator", "(", "out_file", ")", "_compare", "(", "inv_init", ",", "read_inv_op", ")", "_compare", "(", "inv_init", ",", "inv_op", ")" ]
compare inverse io .
train
false
2,080
def rand_alnum(length=0): jibber = ''.join([letters, digits]) return ''.join((choice(jibber) for _ in xrange((length or randint(10, 30)))))
[ "def", "rand_alnum", "(", "length", "=", "0", ")", ":", "jibber", "=", "''", ".", "join", "(", "[", "letters", ",", "digits", "]", ")", "return", "''", ".", "join", "(", "(", "choice", "(", "jibber", ")", "for", "_", "in", "xrange", "(", "(", "length", "or", "randint", "(", "10", ",", "30", ")", ")", ")", ")", ")" ]
create a random string with random length :return: a random string of with length > 10 and length < 30 .
train
false
2,081
def _get_impl(): global _RPCIMPL if (_RPCIMPL is None): try: _RPCIMPL = importutils.import_module(CONF.rpc_backend) except ImportError: impl = CONF.rpc_backend.replace('nova.rpc', 'nova.openstack.common.rpc') _RPCIMPL = importutils.import_module(impl) return _RPCIMPL
[ "def", "_get_impl", "(", ")", ":", "global", "_RPCIMPL", "if", "(", "_RPCIMPL", "is", "None", ")", ":", "try", ":", "_RPCIMPL", "=", "importutils", ".", "import_module", "(", "CONF", ".", "rpc_backend", ")", "except", "ImportError", ":", "impl", "=", "CONF", ".", "rpc_backend", ".", "replace", "(", "'nova.rpc'", ",", "'nova.openstack.common.rpc'", ")", "_RPCIMPL", "=", "importutils", ".", "import_module", "(", "impl", ")", "return", "_RPCIMPL" ]
delay import of rpc_backend until configuration is loaded .
train
false
2,084
def p_field_type(p): p[0] = p[1]
[ "def", "p_field_type", "(", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
field_type : ref_type | definition_type .
train
false
2,085
def _reverse_cmap_spec(spec): if (u'listed' in spec): return {u'listed': spec[u'listed'][::(-1)]} if (u'red' in spec): return revcmap(spec) else: revspec = list(reversed(spec)) if (len(revspec[0]) == 2): revspec = [((1.0 - a), b) for (a, b) in revspec] return revspec
[ "def", "_reverse_cmap_spec", "(", "spec", ")", ":", "if", "(", "u'listed'", "in", "spec", ")", ":", "return", "{", "u'listed'", ":", "spec", "[", "u'listed'", "]", "[", ":", ":", "(", "-", "1", ")", "]", "}", "if", "(", "u'red'", "in", "spec", ")", ":", "return", "revcmap", "(", "spec", ")", "else", ":", "revspec", "=", "list", "(", "reversed", "(", "spec", ")", ")", "if", "(", "len", "(", "revspec", "[", "0", "]", ")", "==", "2", ")", ":", "revspec", "=", "[", "(", "(", "1.0", "-", "a", ")", ",", "b", ")", "for", "(", "a", ",", "b", ")", "in", "revspec", "]", "return", "revspec" ]
reverses cmap specification *spec* .
train
false
2,087
def to_class_path(cls): return ':'.join([cls.__module__, cls.__name__])
[ "def", "to_class_path", "(", "cls", ")", ":", "return", "':'", ".", "join", "(", "[", "cls", ".", "__module__", ",", "cls", ".", "__name__", "]", ")" ]
returns class path for a class takes a class and returns the class path which is composed of the module plus the class name .
train
false
2,089
def check_partitioners(partitioners, keys): if (partitioners is None): return {} keys = set(keys) if (not (set(partitioners) <= keys)): extra_keys = (set(partitioners) - keys) raise KeyError('Invalid partitioner keys {}, partitioners can only be provided for {}'.format(', '.join(("'{}'".format(key) for key in extra_keys)), ', '.join(("'{}'".format(key) for key in keys)))) def check_nested_callables(dictionary): for (key, entry) in dictionary.iteritems(): if isinstance(entry, dict): check_nested_callables(entry) elif (not callable(entry)): raise TypeError("Partitioner for '{}' is not a callable function or dictionary".format(key)) check_nested_callables(partitioners) return partitioners
[ "def", "check_partitioners", "(", "partitioners", ",", "keys", ")", ":", "if", "(", "partitioners", "is", "None", ")", ":", "return", "{", "}", "keys", "=", "set", "(", "keys", ")", "if", "(", "not", "(", "set", "(", "partitioners", ")", "<=", "keys", ")", ")", ":", "extra_keys", "=", "(", "set", "(", "partitioners", ")", "-", "keys", ")", "raise", "KeyError", "(", "'Invalid partitioner keys {}, partitioners can only be provided for {}'", ".", "format", "(", "', '", ".", "join", "(", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "extra_keys", ")", ")", ",", "', '", ".", "join", "(", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "keys", ")", ")", ")", ")", "def", "check_nested_callables", "(", "dictionary", ")", ":", "for", "(", "key", ",", "entry", ")", "in", "dictionary", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "entry", ",", "dict", ")", ":", "check_nested_callables", "(", "entry", ")", "elif", "(", "not", "callable", "(", "entry", ")", ")", ":", "raise", "TypeError", "(", "\"Partitioner for '{}' is not a callable function or dictionary\"", ".", "format", "(", "key", ")", ")", "check_nested_callables", "(", "partitioners", ")", "return", "partitioners" ]
checks the given partitioners .
train
false
2,090
def inv(a): return (~ a)
[ "def", "inv", "(", "a", ")", ":", "return", "(", "~", "a", ")" ]
computes the inverse of square matrix .
train
false
2,091
def create_api_request(rf, method='get', url='/', data='', user=None, encode_as_json=True): content_type = 'application/x-www-form-urlencoded' if (data and encode_as_json): from pootle.core.utils.json import jsonify content_type = 'application/json' data = jsonify(data) request_method = getattr(rf, method.lower()) request = request_method(url, data=data, content_type=content_type) request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest' if (user is not None): request.user = user return request
[ "def", "create_api_request", "(", "rf", ",", "method", "=", "'get'", ",", "url", "=", "'/'", ",", "data", "=", "''", ",", "user", "=", "None", ",", "encode_as_json", "=", "True", ")", ":", "content_type", "=", "'application/x-www-form-urlencoded'", "if", "(", "data", "and", "encode_as_json", ")", ":", "from", "pootle", ".", "core", ".", "utils", ".", "json", "import", "jsonify", "content_type", "=", "'application/json'", "data", "=", "jsonify", "(", "data", ")", "request_method", "=", "getattr", "(", "rf", ",", "method", ".", "lower", "(", ")", ")", "request", "=", "request_method", "(", "url", ",", "data", "=", "data", ",", "content_type", "=", "content_type", ")", "request", ".", "META", "[", "'HTTP_X_REQUESTED_WITH'", "]", "=", "'XMLHttpRequest'", "if", "(", "user", "is", "not", "None", ")", ":", "request", ".", "user", "=", "user", "return", "request" ]
convenience function to create and setup fake requests .
train
false
2,093
def add_permission(user, model, permission_codename): content_type = ContentType.objects.get_for_model(model) (permission, created) = Permission.objects.get_or_create(codename=permission_codename, content_type=content_type, defaults={'name': permission_codename}) user.user_permissions.add(permission)
[ "def", "add_permission", "(", "user", ",", "model", ",", "permission_codename", ")", ":", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model", ")", "(", "permission", ",", "created", ")", "=", "Permission", ".", "objects", ".", "get_or_create", "(", "codename", "=", "permission_codename", ",", "content_type", "=", "content_type", ",", "defaults", "=", "{", "'name'", ":", "permission_codename", "}", ")", "user", ".", "user_permissions", ".", "add", "(", "permission", ")" ]
add a permission to a lambda function .
train
false
2,094
def profile_(profile, names, vm_overrides=None, opts=None, **kwargs): client = _get_client() if isinstance(opts, dict): client.opts.update(opts) info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs) return info
[ "def", "profile_", "(", "profile", ",", "names", ",", "vm_overrides", "=", "None", ",", "opts", "=", "None", ",", "**", "kwargs", ")", ":", "client", "=", "_get_client", "(", ")", "if", "isinstance", "(", "opts", ",", "dict", ")", ":", "client", ".", "opts", ".", "update", "(", "opts", ")", "info", "=", "client", ".", "profile", "(", "profile", ",", "names", ",", "vm_overrides", "=", "vm_overrides", ",", "**", "kwargs", ")", "return", "info" ]
spin up an instance using salt cloud cli example: .
train
true
2,095
def fake_pgettext(translations): def _pgettext(context, text): return translations.get((context, text), text) return _pgettext
[ "def", "fake_pgettext", "(", "translations", ")", ":", "def", "_pgettext", "(", "context", ",", "text", ")", ":", "return", "translations", ".", "get", "(", "(", "context", ",", "text", ")", ",", "text", ")", "return", "_pgettext" ]
create a fake implementation of pgettext .
train
false
2,097
def _decode_address_list_field(address_list): if (not address_list): return None if (len(address_list) == 1): return _decode_and_join_header(address_list[0]) else: return map(_decode_and_join_header, address_list)
[ "def", "_decode_address_list_field", "(", "address_list", ")", ":", "if", "(", "not", "address_list", ")", ":", "return", "None", "if", "(", "len", "(", "address_list", ")", "==", "1", ")", ":", "return", "_decode_and_join_header", "(", "address_list", "[", "0", "]", ")", "else", ":", "return", "map", "(", "_decode_and_join_header", ",", "address_list", ")" ]
helper function to decode address lists .
train
false
2,098
def _check_params(X, metric, p, metric_params): params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params]) est_params = X.get_params() for (param_name, func_param) in params: if (func_param != est_params[param_name]): raise ValueError(('Got %s for %s, while the estimator has %s for the same parameter.' % (func_param, param_name, est_params[param_name])))
[ "def", "_check_params", "(", "X", ",", "metric", ",", "p", ",", "metric_params", ")", ":", "params", "=", "zip", "(", "[", "'metric'", ",", "'p'", ",", "'metric_params'", "]", ",", "[", "metric", ",", "p", ",", "metric_params", "]", ")", "est_params", "=", "X", ".", "get_params", "(", ")", "for", "(", "param_name", ",", "func_param", ")", "in", "params", ":", "if", "(", "func_param", "!=", "est_params", "[", "param_name", "]", ")", ":", "raise", "ValueError", "(", "(", "'Got %s for %s, while the estimator has %s for the same parameter.'", "%", "(", "func_param", ",", "param_name", ",", "est_params", "[", "param_name", "]", ")", ")", ")" ]
helper to validate params .
train
false
2,099
def luhnCheck(value): arr = [] for c in value: if c.isdigit(): arr.append(int(c)) arr.reverse() for idx in [i for i in range(len(arr)) if (i % 2)]: d = (arr[idx] * 2) if (d > 9): d = ((d / 10) + (d % 10)) arr[idx] = d sm = sum(arr) return (not (sm % 10))
[ "def", "luhnCheck", "(", "value", ")", ":", "arr", "=", "[", "]", "for", "c", "in", "value", ":", "if", "c", ".", "isdigit", "(", ")", ":", "arr", ".", "append", "(", "int", "(", "c", ")", ")", "arr", ".", "reverse", "(", ")", "for", "idx", "in", "[", "i", "for", "i", "in", "range", "(", "len", "(", "arr", ")", ")", "if", "(", "i", "%", "2", ")", "]", ":", "d", "=", "(", "arr", "[", "idx", "]", "*", "2", ")", "if", "(", "d", ">", "9", ")", ":", "d", "=", "(", "(", "d", "/", "10", ")", "+", "(", "d", "%", "10", ")", ")", "arr", "[", "idx", "]", "=", "d", "sm", "=", "sum", "(", "arr", ")", "return", "(", "not", "(", "sm", "%", "10", ")", ")" ]
the luhn check against the value which can be an array of digits .
train
false
2,100
def establish_connection(ip, username='', password='', delay=1): remote_conn = telnetlib.Telnet(ip, TELNET_PORT, TELNET_TIMEOUT) output = remote_conn.read_until('sername:', READ_TIMEOUT) remote_conn.write((username + '\n')) output = remote_conn.read_until('ssword:', READ_TIMEOUT) remote_conn.write((password + '\n')) time.sleep(delay) return remote_conn
[ "def", "establish_connection", "(", "ip", ",", "username", "=", "''", ",", "password", "=", "''", ",", "delay", "=", "1", ")", ":", "remote_conn", "=", "telnetlib", ".", "Telnet", "(", "ip", ",", "TELNET_PORT", ",", "TELNET_TIMEOUT", ")", "output", "=", "remote_conn", ".", "read_until", "(", "'sername:'", ",", "READ_TIMEOUT", ")", "remote_conn", ".", "write", "(", "(", "username", "+", "'\\n'", ")", ")", "output", "=", "remote_conn", ".", "read_until", "(", "'ssword:'", ",", "READ_TIMEOUT", ")", "remote_conn", ".", "write", "(", "(", "password", "+", "'\\n'", ")", ")", "time", ".", "sleep", "(", "delay", ")", "return", "remote_conn" ]
use paramiko to establish an ssh channel to the device must return both return_conn_pre and return_conn so that the ssh connection is not garbage collected .
train
false
2,101
def interpolate(value_from, value_to, step=10): if (type(value_from) in (list, tuple)): out = [] for (x, y) in zip(value_from, value_to): out.append(interpolate(x, y, step)) return out else: return (value_from + ((value_to - value_from) / float(step)))
[ "def", "interpolate", "(", "value_from", ",", "value_to", ",", "step", "=", "10", ")", ":", "if", "(", "type", "(", "value_from", ")", "in", "(", "list", ",", "tuple", ")", ")", ":", "out", "=", "[", "]", "for", "(", "x", ",", "y", ")", "in", "zip", "(", "value_from", ",", "value_to", ")", ":", "out", ".", "append", "(", "interpolate", "(", "x", ",", "y", ",", "step", ")", ")", "return", "out", "else", ":", "return", "(", "value_from", "+", "(", "(", "value_to", "-", "value_from", ")", "/", "float", "(", "step", ")", ")", ")" ]
construct an interpolating polynomial for the data points .
train
false
2,104
def assert_student_view(block, fragment): try: html = lxml.html.fragment_fromstring(fragment.content) except lxml.etree.ParserError: assert_student_view_invalid_html(block, fragment.content) else: assert_student_view_valid_html(block, html)
[ "def", "assert_student_view", "(", "block", ",", "fragment", ")", ":", "try", ":", "html", "=", "lxml", ".", "html", ".", "fragment_fromstring", "(", "fragment", ".", "content", ")", "except", "lxml", ".", "etree", ".", "ParserError", ":", "assert_student_view_invalid_html", "(", "block", ",", "fragment", ".", "content", ")", "else", ":", "assert_student_view_valid_html", "(", "block", ",", "html", ")" ]
helper function to assert that the fragment is valid output the specified blocks student_view .
train
false
2,105
def list_nodes(call=None): if (call == 'action'): raise SaltCloudSystemExit('The list_nodes function must be called with -f or --function.') nodes = list_nodes_full() ret = {} for (instance_id, full_node) in nodes.items(): ret[instance_id] = {'id': full_node['id'], 'image': full_node['image'], 'size': full_node['size'], 'state': full_node['state'], 'public_ips': full_node['public_ips'], 'private_ips': full_node['private_ips']} return ret
[ "def", "list_nodes", "(", "call", "=", "None", ")", ":", "if", "(", "call", "==", "'action'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The list_nodes function must be called with -f or --function.'", ")", "nodes", "=", "list_nodes_full", "(", ")", "ret", "=", "{", "}", "for", "(", "instance_id", ",", "full_node", ")", "in", "nodes", ".", "items", "(", ")", ":", "ret", "[", "instance_id", "]", "=", "{", "'id'", ":", "full_node", "[", "'id'", "]", ",", "'image'", ":", "full_node", "[", "'image'", "]", ",", "'size'", ":", "full_node", "[", "'size'", "]", ",", "'state'", ":", "full_node", "[", "'state'", "]", ",", "'public_ips'", ":", "full_node", "[", "'public_ips'", "]", ",", "'private_ips'", ":", "full_node", "[", "'private_ips'", "]", "}", "return", "ret" ]
because this module is not specific to any cloud providers .
train
true
2,107
def multiply_timedelta(interval, number): return timedelta(seconds=(timedelta_total_seconds(interval) * number))
[ "def", "multiply_timedelta", "(", "interval", ",", "number", ")", ":", "return", "timedelta", "(", "seconds", "=", "(", "timedelta_total_seconds", "(", "interval", ")", "*", "number", ")", ")" ]
timedeltas can not normally be multiplied by floating points .
train
false
2,108
def splint(a, b, tck, full_output=0): if isinstance(tck, BSpline): if (tck.c.ndim > 1): mesg = 'Calling splint() with BSpline objects with c.ndim > 1 is not recommended. Use BSpline.integrate() instead.' warnings.warn(mesg, DeprecationWarning) if (full_output != 0): mesg = ('full_output = %s is not supported. Proceeding as if full_output = 0' % full_output) return tck.integrate(a, b, extrapolate=False) else: return _impl.splint(a, b, tck, full_output)
[ "def", "splint", "(", "a", ",", "b", ",", "tck", ",", "full_output", "=", "0", ")", ":", "if", "isinstance", "(", "tck", ",", "BSpline", ")", ":", "if", "(", "tck", ".", "c", ".", "ndim", ">", "1", ")", ":", "mesg", "=", "'Calling splint() with BSpline objects with c.ndim > 1 is not recommended. Use BSpline.integrate() instead.'", "warnings", ".", "warn", "(", "mesg", ",", "DeprecationWarning", ")", "if", "(", "full_output", "!=", "0", ")", ":", "mesg", "=", "(", "'full_output = %s is not supported. Proceeding as if full_output = 0'", "%", "full_output", ")", "return", "tck", ".", "integrate", "(", "a", ",", "b", ",", "extrapolate", "=", "False", ")", "else", ":", "return", "_impl", ".", "splint", "(", "a", ",", "b", ",", "tck", ",", "full_output", ")" ]
evaluate the definite integral of a b-spline .
train
false
2,109
def _a_generator(foo): (yield 42) (yield foo)
[ "def", "_a_generator", "(", "foo", ")", ":", "(", "yield", "42", ")", "(", "yield", "foo", ")" ]
used to have an object to return for generators .
train
false
2,110
def freq_to_period(freq): if (not isinstance(freq, offsets.DateOffset)): freq = to_offset(freq) freq = freq.rule_code.upper() if ((freq == 'A') or freq.startswith(('A-', 'AS-'))): return 1 elif ((freq == 'Q') or freq.startswith(('Q-', 'QS-'))): return 4 elif ((freq == 'M') or freq.startswith(('M-', 'MS'))): return 12 elif ((freq == 'W') or freq.startswith('W-')): return 52 elif (freq == 'D'): return 7 elif (freq == 'B'): return 5 elif (freq == 'H'): return 24 else: raise ValueError('freq {} not understood. Please report if you think this is in error.'.format(freq))
[ "def", "freq_to_period", "(", "freq", ")", ":", "if", "(", "not", "isinstance", "(", "freq", ",", "offsets", ".", "DateOffset", ")", ")", ":", "freq", "=", "to_offset", "(", "freq", ")", "freq", "=", "freq", ".", "rule_code", ".", "upper", "(", ")", "if", "(", "(", "freq", "==", "'A'", ")", "or", "freq", ".", "startswith", "(", "(", "'A-'", ",", "'AS-'", ")", ")", ")", ":", "return", "1", "elif", "(", "(", "freq", "==", "'Q'", ")", "or", "freq", ".", "startswith", "(", "(", "'Q-'", ",", "'QS-'", ")", ")", ")", ":", "return", "4", "elif", "(", "(", "freq", "==", "'M'", ")", "or", "freq", ".", "startswith", "(", "(", "'M-'", ",", "'MS'", ")", ")", ")", ":", "return", "12", "elif", "(", "(", "freq", "==", "'W'", ")", "or", "freq", ".", "startswith", "(", "'W-'", ")", ")", ":", "return", "52", "elif", "(", "freq", "==", "'D'", ")", ":", "return", "7", "elif", "(", "freq", "==", "'B'", ")", ":", "return", "5", "elif", "(", "freq", "==", "'H'", ")", ":", "return", "24", "else", ":", "raise", "ValueError", "(", "'freq {} not understood. Please report if you think this is in error.'", ".", "format", "(", "freq", ")", ")" ]
convert a pandas frequency to a periodicity parameters freq : str or offset frequency to convert returns period : int periodicity of freq notes annual maps to 1 .
train
false
2,112
def getdate(string_date=None): if (not string_date): return get_datetime().date() if isinstance(string_date, datetime.datetime): return string_date.date() elif isinstance(string_date, datetime.date): return string_date if ((not string_date) or (string_date == u'0000-00-00')): return None return parser.parse(string_date).date()
[ "def", "getdate", "(", "string_date", "=", "None", ")", ":", "if", "(", "not", "string_date", ")", ":", "return", "get_datetime", "(", ")", ".", "date", "(", ")", "if", "isinstance", "(", "string_date", ",", "datetime", ".", "datetime", ")", ":", "return", "string_date", ".", "date", "(", ")", "elif", "isinstance", "(", "string_date", ",", "datetime", ".", "date", ")", ":", "return", "string_date", "if", "(", "(", "not", "string_date", ")", "or", "(", "string_date", "==", "u'0000-00-00'", ")", ")", ":", "return", "None", "return", "parser", ".", "parse", "(", "string_date", ")", ".", "date", "(", ")" ]
coverts string date to datetime .
train
false
2,113
def _tanh(p, x, prec): R = p.ring p1 = R(0) for precx in _giant_steps(prec): tmp = (p - rs_atanh(p1, x, precx)) tmp = rs_mul(tmp, (1 - rs_square(p1, x, prec)), x, precx) p1 += tmp return p1
[ "def", "_tanh", "(", "p", ",", "x", ",", "prec", ")", ":", "R", "=", "p", ".", "ring", "p1", "=", "R", "(", "0", ")", "for", "precx", "in", "_giant_steps", "(", "prec", ")", ":", "tmp", "=", "(", "p", "-", "rs_atanh", "(", "p1", ",", "x", ",", "precx", ")", ")", "tmp", "=", "rs_mul", "(", "tmp", ",", "(", "1", "-", "rs_square", "(", "p1", ",", "x", ",", "prec", ")", ")", ",", "x", ",", "precx", ")", "p1", "+=", "tmp", "return", "p1" ]
helper function of rs_tanh return the series expansion of tanh of a univariate series using newtons method .
train
false
2,114
def BuildAdGroupAdOperations(adgroup_operations): adgroup_ad_operations = [{'xsi_type': 'AdGroupAdOperation', 'operand': {'adGroupId': adgroup_operation['operand']['id'], 'ad': {'xsi_type': 'TextAd', 'headline': 'Luxury Cruise to Mars', 'description1': 'Visit the Red Planet in style.', 'description2': 'Low-gravity fun for everyone!', 'displayUrl': 'www.example.com', 'finalUrls': ['http://www.example.com/1']}}, 'operator': 'ADD'} for adgroup_operation in adgroup_operations] return adgroup_ad_operations
[ "def", "BuildAdGroupAdOperations", "(", "adgroup_operations", ")", ":", "adgroup_ad_operations", "=", "[", "{", "'xsi_type'", ":", "'AdGroupAdOperation'", ",", "'operand'", ":", "{", "'adGroupId'", ":", "adgroup_operation", "[", "'operand'", "]", "[", "'id'", "]", ",", "'ad'", ":", "{", "'xsi_type'", ":", "'TextAd'", ",", "'headline'", ":", "'Luxury Cruise to Mars'", ",", "'description1'", ":", "'Visit the Red Planet in style.'", ",", "'description2'", ":", "'Low-gravity fun for everyone!'", ",", "'displayUrl'", ":", "'www.example.com'", ",", "'finalUrls'", ":", "[", "'http://www.example.com/1'", "]", "}", "}", ",", "'operator'", ":", "'ADD'", "}", "for", "adgroup_operation", "in", "adgroup_operations", "]", "return", "adgroup_ad_operations" ]
builds the operations adding an expandedtextad to each adgroup .
train
false
2,116
def set_course_cohort_settings(course_key, **kwargs): fields = {'is_cohorted': bool, 'always_cohort_inline_discussions': bool, 'cohorted_discussions': list} course_cohort_settings = get_course_cohort_settings(course_key) for (field, field_type) in fields.items(): if (field in kwargs): if (not isinstance(kwargs[field], field_type)): raise ValueError('Incorrect field type for `{}`. Type must be `{}`'.format(field, field_type.__name__)) setattr(course_cohort_settings, field, kwargs[field]) course_cohort_settings.save() return course_cohort_settings
[ "def", "set_course_cohort_settings", "(", "course_key", ",", "**", "kwargs", ")", ":", "fields", "=", "{", "'is_cohorted'", ":", "bool", ",", "'always_cohort_inline_discussions'", ":", "bool", ",", "'cohorted_discussions'", ":", "list", "}", "course_cohort_settings", "=", "get_course_cohort_settings", "(", "course_key", ")", "for", "(", "field", ",", "field_type", ")", "in", "fields", ".", "items", "(", ")", ":", "if", "(", "field", "in", "kwargs", ")", ":", "if", "(", "not", "isinstance", "(", "kwargs", "[", "field", "]", ",", "field_type", ")", ")", ":", "raise", "ValueError", "(", "'Incorrect field type for `{}`. Type must be `{}`'", ".", "format", "(", "field", ",", "field_type", ".", "__name__", ")", ")", "setattr", "(", "course_cohort_settings", ",", "field", ",", "kwargs", "[", "field", "]", ")", "course_cohort_settings", ".", "save", "(", ")", "return", "course_cohort_settings" ]
set cohort settings for a course .
train
false
2,117
def send_event(event, users): queue_json_publish('notify_tornado', dict(event=event, users=users), send_notification_http)
[ "def", "send_event", "(", "event", ",", "users", ")", ":", "queue_json_publish", "(", "'notify_tornado'", ",", "dict", "(", "event", "=", "event", ",", "users", "=", "users", ")", ",", "send_notification_http", ")" ]
users is a list of user ids .
train
false
2,118
def test_config_alterations_class(): class LineConfig(Config, ): no_prefix = True show_legend = False fill = True pretty_print = True x_labels = ['a', 'b', 'c'] line1 = Line(LineConfig) line1.add('_', [1, 2, 3]) l1 = line1.render() LineConfig.stroke = False line2 = Line(LineConfig) line2.add('_', [1, 2, 3]) l2 = line2.render() assert (l1 != l2) l1bis = line1.render() assert (l1 == l1bis)
[ "def", "test_config_alterations_class", "(", ")", ":", "class", "LineConfig", "(", "Config", ",", ")", ":", "no_prefix", "=", "True", "show_legend", "=", "False", "fill", "=", "True", "pretty_print", "=", "True", "x_labels", "=", "[", "'a'", ",", "'b'", ",", "'c'", "]", "line1", "=", "Line", "(", "LineConfig", ")", "line1", ".", "add", "(", "'_'", ",", "[", "1", ",", "2", ",", "3", "]", ")", "l1", "=", "line1", ".", "render", "(", ")", "LineConfig", ".", "stroke", "=", "False", "line2", "=", "Line", "(", "LineConfig", ")", "line2", ".", "add", "(", "'_'", ",", "[", "1", ",", "2", ",", "3", "]", ")", "l2", "=", "line2", ".", "render", "(", ")", "assert", "(", "l1", "!=", "l2", ")", "l1bis", "=", "line1", ".", "render", "(", ")", "assert", "(", "l1", "==", "l1bis", ")" ]
assert a config can be changed on config class .
train
false
2,119
def assoc_laguerre(x, n, k=0.0): return orthogonal.eval_genlaguerre(n, k, x)
[ "def", "assoc_laguerre", "(", "x", ",", "n", ",", "k", "=", "0.0", ")", ":", "return", "orthogonal", ".", "eval_genlaguerre", "(", "n", ",", "k", ",", "x", ")" ]
compute the generalized laguerre polynomial of degree n and order k .
train
false
2,120
def handleFileCollision(fileName, fileCollisionMethod): if (fileCollisionMethod == 'overwrite'): logging.warning(('Data file, %s, will be overwritten' % fileName)) elif (fileCollisionMethod == 'fail'): msg = 'Data file %s already exists. Set argument fileCollisionMethod to overwrite.' raise IOError((msg % fileName)) elif (fileCollisionMethod == 'rename'): (rootName, extension) = os.path.splitext(fileName) matchingFiles = glob.glob(('%s*%s' % (rootName, extension))) if (not matchingFiles): fileName = ('%s%s' % (rootName, extension)) else: fileName = ('%s_%d%s' % (rootName, len(matchingFiles), extension)) if os.path.exists(fileName): msg = 'New fileName %s has already been taken. Something is wrong with the append counter.' raise IOError((msg % fileName)) else: msg = 'Argument fileCollisionMethod was invalid: %s' raise ValueError((msg % str(fileCollisionMethod))) return fileName
[ "def", "handleFileCollision", "(", "fileName", ",", "fileCollisionMethod", ")", ":", "if", "(", "fileCollisionMethod", "==", "'overwrite'", ")", ":", "logging", ".", "warning", "(", "(", "'Data file, %s, will be overwritten'", "%", "fileName", ")", ")", "elif", "(", "fileCollisionMethod", "==", "'fail'", ")", ":", "msg", "=", "'Data file %s already exists. Set argument fileCollisionMethod to overwrite.'", "raise", "IOError", "(", "(", "msg", "%", "fileName", ")", ")", "elif", "(", "fileCollisionMethod", "==", "'rename'", ")", ":", "(", "rootName", ",", "extension", ")", "=", "os", ".", "path", ".", "splitext", "(", "fileName", ")", "matchingFiles", "=", "glob", ".", "glob", "(", "(", "'%s*%s'", "%", "(", "rootName", ",", "extension", ")", ")", ")", "if", "(", "not", "matchingFiles", ")", ":", "fileName", "=", "(", "'%s%s'", "%", "(", "rootName", ",", "extension", ")", ")", "else", ":", "fileName", "=", "(", "'%s_%d%s'", "%", "(", "rootName", ",", "len", "(", "matchingFiles", ")", ",", "extension", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "fileName", ")", ":", "msg", "=", "'New fileName %s has already been taken. Something is wrong with the append counter.'", "raise", "IOError", "(", "(", "msg", "%", "fileName", ")", ")", "else", ":", "msg", "=", "'Argument fileCollisionMethod was invalid: %s'", "raise", "ValueError", "(", "(", "msg", "%", "str", "(", "fileCollisionMethod", ")", ")", ")", "return", "fileName" ]
handle filename collisions by overwriting .
train
false
2,121
def unserializeObject(value): return (base64unpickle(value) if value else None)
[ "def", "unserializeObject", "(", "value", ")", ":", "return", "(", "base64unpickle", "(", "value", ")", "if", "value", "else", "None", ")" ]
unserializes object from given serialized form .
train
false
2,123
def set_fs_home(): fs_home = get_fs_home() if (fs_home is None): return False else: os.environ['FREESURFER_HOME'] = fs_home return True
[ "def", "set_fs_home", "(", ")", ":", "fs_home", "=", "get_fs_home", "(", ")", "if", "(", "fs_home", "is", "None", ")", ":", "return", "False", "else", ":", "os", ".", "environ", "[", "'FREESURFER_HOME'", "]", "=", "fs_home", "return", "True" ]
set the freesurfer_home environment variable .
train
false
2,124
def child_fd_list_add(fd): global child_fd_list child_fd_list.append(fd)
[ "def", "child_fd_list_add", "(", "fd", ")", ":", "global", "child_fd_list", "child_fd_list", ".", "append", "(", "fd", ")" ]
add a file descriptor to list to be closed in child processes .
train
false
2,125
def bucketize(point, bucket_size): return (bucket_size * math.floor((point / bucket_size)))
[ "def", "bucketize", "(", "point", ",", "bucket_size", ")", ":", "return", "(", "bucket_size", "*", "math", ".", "floor", "(", "(", "point", "/", "bucket_size", ")", ")", ")" ]
floor the point to the next lower multiple of bucket_size .
train
false
2,127
def getAlongWayHexadecimalPrimary(beginBrightness, beginRatio, colorWidth, endBrightness, endRatio): brightness = ((beginRatio * float(beginBrightness)) + (endRatio * float(endBrightness))) return getWidthHex(int(round(brightness)), colorWidth)
[ "def", "getAlongWayHexadecimalPrimary", "(", "beginBrightness", ",", "beginRatio", ",", "colorWidth", ",", "endBrightness", ",", "endRatio", ")", ":", "brightness", "=", "(", "(", "beginRatio", "*", "float", "(", "beginBrightness", ")", ")", "+", "(", "endRatio", "*", "float", "(", "endBrightness", ")", ")", ")", "return", "getWidthHex", "(", "int", "(", "round", "(", "brightness", ")", ")", ",", "colorWidth", ")" ]
get a primary color along the way from grey to the end color .
train
false
2,128
def _RetainVerticalSpacingBeforeComments(uwline): prev_token = None for tok in uwline.tokens: if (tok.is_comment and prev_token): if (((tok.lineno - tok.value.count(u'\n')) - prev_token.lineno) > 1): tok.AdjustNewlinesBefore(ONE_BLANK_LINE) prev_token = tok
[ "def", "_RetainVerticalSpacingBeforeComments", "(", "uwline", ")", ":", "prev_token", "=", "None", "for", "tok", "in", "uwline", ".", "tokens", ":", "if", "(", "tok", ".", "is_comment", "and", "prev_token", ")", ":", "if", "(", "(", "(", "tok", ".", "lineno", "-", "tok", ".", "value", ".", "count", "(", "u'\\n'", ")", ")", "-", "prev_token", ".", "lineno", ")", ">", "1", ")", ":", "tok", ".", "AdjustNewlinesBefore", "(", "ONE_BLANK_LINE", ")", "prev_token", "=", "tok" ]
retain vertical spacing before comments .
train
false
2,129
def _iter_unit_summary(namespace): from . import core units = [] has_prefixes = set() for (key, val) in six.iteritems(namespace): if (not isinstance(val, core.UnitBase)): continue if (key != val.name): continue if isinstance(val, core.PrefixUnit): has_prefixes.add(val._represents.bases[0].name) else: units.append(val) units.sort(key=(lambda x: x.name.lower())) for unit in units: doc = _get_first_sentence(unit.__doc__).strip() represents = u'' if isinstance(unit, core.Unit): represents = u':math:`{0}`'.format(unit._represents.to_string(u'latex')[1:(-1)]) aliases = u', '.join((u'``{0}``'.format(x) for x in unit.aliases)) (yield (unit, doc, represents, aliases, (unit.name in has_prefixes)))
[ "def", "_iter_unit_summary", "(", "namespace", ")", ":", "from", ".", "import", "core", "units", "=", "[", "]", "has_prefixes", "=", "set", "(", ")", "for", "(", "key", ",", "val", ")", "in", "six", ".", "iteritems", "(", "namespace", ")", ":", "if", "(", "not", "isinstance", "(", "val", ",", "core", ".", "UnitBase", ")", ")", ":", "continue", "if", "(", "key", "!=", "val", ".", "name", ")", ":", "continue", "if", "isinstance", "(", "val", ",", "core", ".", "PrefixUnit", ")", ":", "has_prefixes", ".", "add", "(", "val", ".", "_represents", ".", "bases", "[", "0", "]", ".", "name", ")", "else", ":", "units", ".", "append", "(", "val", ")", "units", ".", "sort", "(", "key", "=", "(", "lambda", "x", ":", "x", ".", "name", ".", "lower", "(", ")", ")", ")", "for", "unit", "in", "units", ":", "doc", "=", "_get_first_sentence", "(", "unit", ".", "__doc__", ")", ".", "strip", "(", ")", "represents", "=", "u''", "if", "isinstance", "(", "unit", ",", "core", ".", "Unit", ")", ":", "represents", "=", "u':math:`{0}`'", ".", "format", "(", "unit", ".", "_represents", ".", "to_string", "(", "u'latex'", ")", "[", "1", ":", "(", "-", "1", ")", "]", ")", "aliases", "=", "u', '", ".", "join", "(", "(", "u'``{0}``'", ".", "format", "(", "x", ")", "for", "x", "in", "unit", ".", "aliases", ")", ")", "(", "yield", "(", "unit", ",", "doc", ",", "represents", ",", "aliases", ",", "(", "unit", ".", "name", "in", "has_prefixes", ")", ")", ")" ]
generates the tuple used to format the unit summary docs in generate_unit_summary .
train
false
2,130
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30, callback=None): retries = 0 interval_range = fxrange(interval_start, (interval_max + interval_start), interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if (max_retries and (retries >= max_retries)): raise if callback: callback() tts = float((errback(exc, interval_range, retries) if errback else next(interval_range))) if tts: for _ in range(int(tts)): if callback: callback() sleep(1.0) sleep(abs((int(tts) - tts)))
[ "def", "retry_over_time", "(", "fun", ",", "catch", ",", "args", "=", "[", "]", ",", "kwargs", "=", "{", "}", ",", "errback", "=", "None", ",", "max_retries", "=", "None", ",", "interval_start", "=", "2", ",", "interval_step", "=", "2", ",", "interval_max", "=", "30", ",", "callback", "=", "None", ")", ":", "retries", "=", "0", "interval_range", "=", "fxrange", "(", "interval_start", ",", "(", "interval_max", "+", "interval_start", ")", ",", "interval_step", ",", "repeatlast", "=", "True", ")", "for", "retries", "in", "count", "(", ")", ":", "try", ":", "return", "fun", "(", "*", "args", ",", "**", "kwargs", ")", "except", "catch", "as", "exc", ":", "if", "(", "max_retries", "and", "(", "retries", ">=", "max_retries", ")", ")", ":", "raise", "if", "callback", ":", "callback", "(", ")", "tts", "=", "float", "(", "(", "errback", "(", "exc", ",", "interval_range", ",", "retries", ")", "if", "errback", "else", "next", "(", "interval_range", ")", ")", ")", "if", "tts", ":", "for", "_", "in", "range", "(", "int", "(", "tts", ")", ")", ":", "if", "callback", ":", "callback", "(", ")", "sleep", "(", "1.0", ")", "sleep", "(", "abs", "(", "(", "int", "(", "tts", ")", "-", "tts", ")", ")", ")" ]
retry the function over and over until max retries is exceeded .
train
false
2,132
def binarySearch(seq, cmp_func): lower = 0 upper = len(seq) while (lower < upper): index = ((lower + upper) >> 1) diff = cmp_func(seq[index]) if (diff < 0): upper = index elif (diff > 0): lower = (index + 1) else: return index return None
[ "def", "binarySearch", "(", "seq", ",", "cmp_func", ")", ":", "lower", "=", "0", "upper", "=", "len", "(", "seq", ")", "while", "(", "lower", "<", "upper", ")", ":", "index", "=", "(", "(", "lower", "+", "upper", ")", ">>", "1", ")", "diff", "=", "cmp_func", "(", "seq", "[", "index", "]", ")", "if", "(", "diff", "<", "0", ")", ":", "upper", "=", "index", "elif", "(", "diff", ">", "0", ")", ":", "lower", "=", "(", "index", "+", "1", ")", "else", ":", "return", "index", "return", "None" ]
search a value in a sequence using binary search .
train
false
2,134
def verify_python_version(): (major, minor, micro, release_level, serial) = sys.version_info if (major == 2): if (minor != 7): msg = 'Error: Python 2.%s found but Python 2.7 required.' print (msg % minor) elif (major > 2): msg = 'It seems that you are running w3af using Python3, which is not officially supported by the w3af team.\nTo force w3af to be run using python2.7 run it as follows (depending on your OS):\n\n * python2.7 w3af_console\n * python2 w3af_console\n\nTo make this change permanent modify the shebang line in the w3af_console, w3af_gui and w3af_api scripts.' print msg sys.exit(1)
[ "def", "verify_python_version", "(", ")", ":", "(", "major", ",", "minor", ",", "micro", ",", "release_level", ",", "serial", ")", "=", "sys", ".", "version_info", "if", "(", "major", "==", "2", ")", ":", "if", "(", "minor", "!=", "7", ")", ":", "msg", "=", "'Error: Python 2.%s found but Python 2.7 required.'", "print", "(", "msg", "%", "minor", ")", "elif", "(", "major", ">", "2", ")", ":", "msg", "=", "'It seems that you are running w3af using Python3, which is not officially supported by the w3af team.\\nTo force w3af to be run using python2.7 run it as follows (depending on your OS):\\n\\n * python2.7 w3af_console\\n * python2 w3af_console\\n\\nTo make this change permanent modify the shebang line in the w3af_console, w3af_gui and w3af_api scripts.'", "print", "msg", "sys", ".", "exit", "(", "1", ")" ]
check python version eq 2 .
train
false
2,136
def get_dataset_filename(name, ext, hid): base = ''.join(((((c in FILENAME_VALID_CHARS) and c) or '_') for c in name)) return (base + ('_%s.%s' % (hid, ext)))
[ "def", "get_dataset_filename", "(", "name", ",", "ext", ",", "hid", ")", ":", "base", "=", "''", ".", "join", "(", "(", "(", "(", "(", "c", "in", "FILENAME_VALID_CHARS", ")", "and", "c", ")", "or", "'_'", ")", "for", "c", "in", "name", ")", ")", "return", "(", "base", "+", "(", "'_%s.%s'", "%", "(", "hid", ",", "ext", ")", ")", ")" ]
builds a filename for a dataset using its name an extension .
train
false
2,139
def merge_adjacent(gen): gen = iter(gen) last = next(gen) for this in gen: if (this.merge_key == last.merge_key): last.merge(this) elif (last < this): (yield last) last = this else: raise AssertionError(('Bad order, %s > %s' % (last, this))) (yield last)
[ "def", "merge_adjacent", "(", "gen", ")", ":", "gen", "=", "iter", "(", "gen", ")", "last", "=", "next", "(", "gen", ")", "for", "this", "in", "gen", ":", "if", "(", "this", ".", "merge_key", "==", "last", ".", "merge_key", ")", ":", "last", ".", "merge", "(", "this", ")", "elif", "(", "last", "<", "this", ")", ":", "(", "yield", "last", ")", "last", "=", "this", "else", ":", "raise", "AssertionError", "(", "(", "'Bad order, %s > %s'", "%", "(", "last", ",", "this", ")", ")", ")", "(", "yield", "last", ")" ]
merge adjacent messages that compare equal .
train
false
2,140
def test_decorator_string_issue(): source = dedent(' """\n @"""\n def bla():\n pass\n\n bla.') s = jedi.Script(source) assert s.completions() assert (s._get_module().get_code() == source)
[ "def", "test_decorator_string_issue", "(", ")", ":", "source", "=", "dedent", "(", "' \"\"\"\\n @\"\"\"\\n def bla():\\n pass\\n\\n bla.'", ")", "s", "=", "jedi", ".", "Script", "(", "source", ")", "assert", "s", ".", "completions", "(", ")", "assert", "(", "s", ".", "_get_module", "(", ")", ".", "get_code", "(", ")", "==", "source", ")" ]
test case from #589 .
train
false
2,141
def pportInSelected(): if (port.DlPortReadPortUchar(statusRegAdrs) & 16): return 1 else: return 0
[ "def", "pportInSelected", "(", ")", ":", "if", "(", "port", ".", "DlPortReadPortUchar", "(", "statusRegAdrs", ")", "&", "16", ")", ":", "return", "1", "else", ":", "return", "0" ]
input from select pin .
train
false
2,142
def _attempt_YYYYMMDD(arg, errors): def calc(carg): carg = carg.astype(object) parsed = lib.try_parse_year_month_day((carg / 10000), ((carg / 100) % 100), (carg % 100)) return tslib.array_to_datetime(parsed, errors=errors) def calc_with_mask(carg, mask): result = np.empty(carg.shape, dtype='M8[ns]') iresult = result.view('i8') iresult[(~ mask)] = tslib.iNaT result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).astype('M8[ns]') return result try: return calc(arg.astype(np.int64)) except: pass try: carg = arg.astype(np.float64) return calc_with_mask(carg, notnull(carg)) except: pass try: mask = (~ lib.ismember(arg, tslib._nat_strings)) return calc_with_mask(arg, mask) except: pass return None
[ "def", "_attempt_YYYYMMDD", "(", "arg", ",", "errors", ")", ":", "def", "calc", "(", "carg", ")", ":", "carg", "=", "carg", ".", "astype", "(", "object", ")", "parsed", "=", "lib", ".", "try_parse_year_month_day", "(", "(", "carg", "/", "10000", ")", ",", "(", "(", "carg", "/", "100", ")", "%", "100", ")", ",", "(", "carg", "%", "100", ")", ")", "return", "tslib", ".", "array_to_datetime", "(", "parsed", ",", "errors", "=", "errors", ")", "def", "calc_with_mask", "(", "carg", ",", "mask", ")", ":", "result", "=", "np", ".", "empty", "(", "carg", ".", "shape", ",", "dtype", "=", "'M8[ns]'", ")", "iresult", "=", "result", ".", "view", "(", "'i8'", ")", "iresult", "[", "(", "~", "mask", ")", "]", "=", "tslib", ".", "iNaT", "result", "[", "mask", "]", "=", "calc", "(", "carg", "[", "mask", "]", ".", "astype", "(", "np", ".", "float64", ")", ".", "astype", "(", "np", ".", "int64", ")", ")", ".", "astype", "(", "'M8[ns]'", ")", "return", "result", "try", ":", "return", "calc", "(", "arg", ".", "astype", "(", "np", ".", "int64", ")", ")", "except", ":", "pass", "try", ":", "carg", "=", "arg", ".", "astype", "(", "np", ".", "float64", ")", "return", "calc_with_mask", "(", "carg", ",", "notnull", "(", "carg", ")", ")", "except", ":", "pass", "try", ":", "mask", "=", "(", "~", "lib", ".", "ismember", "(", "arg", ",", "tslib", ".", "_nat_strings", ")", ")", "return", "calc_with_mask", "(", "arg", ",", "mask", ")", "except", ":", "pass", "return", "None" ]
try to parse the yyyymmdd/%y%m%d format .
train
false
2,143
def disallow(nodes): def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) name = 'visit_{0}'.format(node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed
[ "def", "disallow", "(", "nodes", ")", ":", "def", "disallowed", "(", "cls", ")", ":", "cls", ".", "unsupported_nodes", "=", "(", ")", "for", "node", "in", "nodes", ":", "new_method", "=", "_node_not_implemented", "(", "node", ",", "cls", ")", "name", "=", "'visit_{0}'", ".", "format", "(", "node", ")", "cls", ".", "unsupported_nodes", "+=", "(", "name", ",", ")", "setattr", "(", "cls", ",", "name", ",", "new_method", ")", "return", "cls", "return", "disallowed" ]
decorator to disallow certain nodes from parsing .
train
true
2,144
def get_translated_storefile(store, pootle_path=None): storeclass = store.syncer.file_class filestore = store.syncer.convert(storeclass) for unit in filestore.units: if (not unit.istranslated()): unit.target = ('Translation of %s' % unit.source) path = (pootle_path if (pootle_path is not None) else store.pootle_path) filestore.updateheader(add=True, X_Pootle_Path=path) filestore.updateheader(add=True, X_Pootle_Revision=store.get_max_unit_revision()) return filestore
[ "def", "get_translated_storefile", "(", "store", ",", "pootle_path", "=", "None", ")", ":", "storeclass", "=", "store", ".", "syncer", ".", "file_class", "filestore", "=", "store", ".", "syncer", ".", "convert", "(", "storeclass", ")", "for", "unit", "in", "filestore", ".", "units", ":", "if", "(", "not", "unit", ".", "istranslated", "(", ")", ")", ":", "unit", ".", "target", "=", "(", "'Translation of %s'", "%", "unit", ".", "source", ")", "path", "=", "(", "pootle_path", "if", "(", "pootle_path", "is", "not", "None", ")", "else", "store", ".", "pootle_path", ")", "filestore", ".", "updateheader", "(", "add", "=", "True", ",", "X_Pootle_Path", "=", "path", ")", "filestore", ".", "updateheader", "(", "add", "=", "True", ",", "X_Pootle_Revision", "=", "store", ".", "get_max_unit_revision", "(", ")", ")", "return", "filestore" ]
returns file store with added translations for untranslated units .
train
false
2,148
def interdiffs_with_comments(review, current_pair): if (not review): return diffsets = DiffSet.objects.filter(files__comments__review=review) diffsets = diffsets.filter(files__comments__interfilediff__isnull=False) diffsets = diffsets.distinct() for diffset in diffsets: interdiffs = DiffSet.objects.filter(files__interdiff_comments__filediff__diffset=diffset).distinct() for interdiff in interdiffs: (yield {u'diffset': diffset, u'interdiff': interdiff, u'is_current': ((current_pair[0] == diffset) and (current_pair[1] == interdiff))})
[ "def", "interdiffs_with_comments", "(", "review", ",", "current_pair", ")", ":", "if", "(", "not", "review", ")", ":", "return", "diffsets", "=", "DiffSet", ".", "objects", ".", "filter", "(", "files__comments__review", "=", "review", ")", "diffsets", "=", "diffsets", ".", "filter", "(", "files__comments__interfilediff__isnull", "=", "False", ")", "diffsets", "=", "diffsets", ".", "distinct", "(", ")", "for", "diffset", "in", "diffsets", ":", "interdiffs", "=", "DiffSet", ".", "objects", ".", "filter", "(", "files__interdiff_comments__filediff__diffset", "=", "diffset", ")", ".", "distinct", "(", ")", "for", "interdiff", "in", "interdiffs", ":", "(", "yield", "{", "u'diffset'", ":", "diffset", ",", "u'interdiff'", ":", "interdiff", ",", "u'is_current'", ":", "(", "(", "current_pair", "[", "0", "]", "==", "diffset", ")", "and", "(", "current_pair", "[", "1", "]", "==", "interdiff", ")", ")", "}", ")" ]
get a list of interdiffs in the review that contain draft comments .
train
false
2,149
@task() @timeit def send_group_email(announcement_id): try: announcement = Announcement.objects.get(pk=announcement_id) except Announcement.DoesNotExist: return group = announcement.group users = User.objects.filter(groups__in=[group]) plain_content = bleach.clean(announcement.content_parsed, tags=[], strip=True).strip() email_kwargs = {'content': plain_content, 'content_html': announcement.content_parsed, 'domain': Site.objects.get_current().domain} text_template = 'announcements/email/announcement.ltxt' html_template = 'announcements/email/announcement.html' @safe_translation def _make_mail(locale, user): subject = _('New announcement for {group}').format(group=group.name) mail = make_mail(subject=subject, text_template=text_template, html_template=html_template, context_vars=email_kwargs, from_email=settings.TIDINGS_FROM_ADDRESS, to_email=user.email) return mail messages = [] for u in users: locale = (u.profile.locale or settings.LANGUAGE_CODE) messages.append(_make_mail(locale, u)) send_messages(messages)
[ "@", "task", "(", ")", "@", "timeit", "def", "send_group_email", "(", "announcement_id", ")", ":", "try", ":", "announcement", "=", "Announcement", ".", "objects", ".", "get", "(", "pk", "=", "announcement_id", ")", "except", "Announcement", ".", "DoesNotExist", ":", "return", "group", "=", "announcement", ".", "group", "users", "=", "User", ".", "objects", ".", "filter", "(", "groups__in", "=", "[", "group", "]", ")", "plain_content", "=", "bleach", ".", "clean", "(", "announcement", ".", "content_parsed", ",", "tags", "=", "[", "]", ",", "strip", "=", "True", ")", ".", "strip", "(", ")", "email_kwargs", "=", "{", "'content'", ":", "plain_content", ",", "'content_html'", ":", "announcement", ".", "content_parsed", ",", "'domain'", ":", "Site", ".", "objects", ".", "get_current", "(", ")", ".", "domain", "}", "text_template", "=", "'announcements/email/announcement.ltxt'", "html_template", "=", "'announcements/email/announcement.html'", "@", "safe_translation", "def", "_make_mail", "(", "locale", ",", "user", ")", ":", "subject", "=", "_", "(", "'New announcement for {group}'", ")", ".", "format", "(", "group", "=", "group", ".", "name", ")", "mail", "=", "make_mail", "(", "subject", "=", "subject", ",", "text_template", "=", "text_template", ",", "html_template", "=", "html_template", ",", "context_vars", "=", "email_kwargs", ",", "from_email", "=", "settings", ".", "TIDINGS_FROM_ADDRESS", ",", "to_email", "=", "user", ".", "email", ")", "return", "mail", "messages", "=", "[", "]", "for", "u", "in", "users", ":", "locale", "=", "(", "u", ".", "profile", ".", "locale", "or", "settings", ".", "LANGUAGE_CODE", ")", "messages", ".", "append", "(", "_make_mail", "(", "locale", ",", "u", ")", ")", "send_messages", "(", "messages", ")" ]
build and send the announcement emails to a group .
train
false
2,150
def assertUrisEqual(testcase, expected, actual): expected = urlparse(expected) actual = urlparse(actual) testcase.assertEqual(expected.scheme, actual.scheme) testcase.assertEqual(expected.netloc, actual.netloc) testcase.assertEqual(expected.path, actual.path) testcase.assertEqual(expected.params, actual.params) testcase.assertEqual(expected.fragment, actual.fragment) expected_query = parse_qs(expected.query) actual_query = parse_qs(actual.query) for name in list(expected_query.keys()): testcase.assertEqual(expected_query[name], actual_query[name]) for name in list(actual_query.keys()): testcase.assertEqual(expected_query[name], actual_query[name])
[ "def", "assertUrisEqual", "(", "testcase", ",", "expected", ",", "actual", ")", ":", "expected", "=", "urlparse", "(", "expected", ")", "actual", "=", "urlparse", "(", "actual", ")", "testcase", ".", "assertEqual", "(", "expected", ".", "scheme", ",", "actual", ".", "scheme", ")", "testcase", ".", "assertEqual", "(", "expected", ".", "netloc", ",", "actual", ".", "netloc", ")", "testcase", ".", "assertEqual", "(", "expected", ".", "path", ",", "actual", ".", "path", ")", "testcase", ".", "assertEqual", "(", "expected", ".", "params", ",", "actual", ".", "params", ")", "testcase", ".", "assertEqual", "(", "expected", ".", "fragment", ",", "actual", ".", "fragment", ")", "expected_query", "=", "parse_qs", "(", "expected", ".", "query", ")", "actual_query", "=", "parse_qs", "(", "actual", ".", "query", ")", "for", "name", "in", "list", "(", "expected_query", ".", "keys", "(", ")", ")", ":", "testcase", ".", "assertEqual", "(", "expected_query", "[", "name", "]", ",", "actual_query", "[", "name", "]", ")", "for", "name", "in", "list", "(", "actual_query", ".", "keys", "(", ")", ")", ":", "testcase", ".", "assertEqual", "(", "expected_query", "[", "name", "]", ",", "actual_query", "[", "name", "]", ")" ]
test that uris are the same .
train
false
2,151
def makeMimi(upid): strSeed = 'gGddgPfeaf_gzyr' prehash = ((upid + '_') + strSeed) return md5(prehash.encode('utf-8')).hexdigest()
[ "def", "makeMimi", "(", "upid", ")", ":", "strSeed", "=", "'gGddgPfeaf_gzyr'", "prehash", "=", "(", "(", "upid", "+", "'_'", ")", "+", "strSeed", ")", "return", "md5", "(", "prehash", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
from URL also com .
train
true
2,152
def is_multigraphical(sequence): deg_sequence = list(sequence) if (not nx.utils.is_list_of_ints(deg_sequence)): return False (dsum, dmax) = (0, 0) for d in deg_sequence: if (d < 0): return False (dsum, dmax) = ((dsum + d), max(dmax, d)) if ((dsum % 2) or (dsum < (2 * dmax))): return False return True
[ "def", "is_multigraphical", "(", "sequence", ")", ":", "deg_sequence", "=", "list", "(", "sequence", ")", "if", "(", "not", "nx", ".", "utils", ".", "is_list_of_ints", "(", "deg_sequence", ")", ")", ":", "return", "False", "(", "dsum", ",", "dmax", ")", "=", "(", "0", ",", "0", ")", "for", "d", "in", "deg_sequence", ":", "if", "(", "d", "<", "0", ")", ":", "return", "False", "(", "dsum", ",", "dmax", ")", "=", "(", "(", "dsum", "+", "d", ")", ",", "max", "(", "dmax", ",", "d", ")", ")", "if", "(", "(", "dsum", "%", "2", ")", "or", "(", "dsum", "<", "(", "2", "*", "dmax", ")", ")", ")", ":", "return", "False", "return", "True" ]
returns true if some multigraph can realize the sequence .
train
false
2,153
def add_ssh_public_keys(name, filenames): from fabtools.require.files import directory as _require_directory, file as _require_file ssh_dir = posixpath.join(home_directory(name), '.ssh') _require_directory(ssh_dir, mode='700', owner=name, use_sudo=True) authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys') _require_file(authorized_keys_filename, mode='600', owner=name, use_sudo=True) for filename in filenames: with open(filename) as public_key_file: public_keys = public_key_file.read().strip().split('\n') for public_key in public_keys: if (public_key not in authorized_keys(name)): sudo(('echo %s >>%s' % (quote(public_key), quote(authorized_keys_filename))))
[ "def", "add_ssh_public_keys", "(", "name", ",", "filenames", ")", ":", "from", "fabtools", ".", "require", ".", "files", "import", "directory", "as", "_require_directory", ",", "file", "as", "_require_file", "ssh_dir", "=", "posixpath", ".", "join", "(", "home_directory", "(", "name", ")", ",", "'.ssh'", ")", "_require_directory", "(", "ssh_dir", ",", "mode", "=", "'700'", ",", "owner", "=", "name", ",", "use_sudo", "=", "True", ")", "authorized_keys_filename", "=", "posixpath", ".", "join", "(", "ssh_dir", ",", "'authorized_keys'", ")", "_require_file", "(", "authorized_keys_filename", ",", "mode", "=", "'600'", ",", "owner", "=", "name", ",", "use_sudo", "=", "True", ")", "for", "filename", "in", "filenames", ":", "with", "open", "(", "filename", ")", "as", "public_key_file", ":", "public_keys", "=", "public_key_file", ".", "read", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "for", "public_key", "in", "public_keys", ":", "if", "(", "public_key", "not", "in", "authorized_keys", "(", "name", ")", ")", ":", "sudo", "(", "(", "'echo %s >>%s'", "%", "(", "quote", "(", "public_key", ")", ",", "quote", "(", "authorized_keys_filename", ")", ")", ")", ")" ]
add multiple public keys to the users authorized ssh keys .
train
false
2,155
def discretize_bilinear_2D(model, x_range, y_range): x = np.arange((x_range[0] - 0.5), (x_range[1] + 0.5)) y = np.arange((y_range[0] - 0.5), (y_range[1] + 0.5)) (x, y) = np.meshgrid(x, y) values_intermediate_grid = model(x, y) values = (0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:(-1), :])) values = (0.5 * (values[:, 1:] + values[:, :(-1)])) return values
[ "def", "discretize_bilinear_2D", "(", "model", ",", "x_range", ",", "y_range", ")", ":", "x", "=", "np", ".", "arange", "(", "(", "x_range", "[", "0", "]", "-", "0.5", ")", ",", "(", "x_range", "[", "1", "]", "+", "0.5", ")", ")", "y", "=", "np", ".", "arange", "(", "(", "y_range", "[", "0", "]", "-", "0.5", ")", ",", "(", "y_range", "[", "1", "]", "+", "0.5", ")", ")", "(", "x", ",", "y", ")", "=", "np", ".", "meshgrid", "(", "x", ",", "y", ")", "values_intermediate_grid", "=", "model", "(", "x", ",", "y", ")", "values", "=", "(", "0.5", "*", "(", "values_intermediate_grid", "[", "1", ":", ",", ":", "]", "+", "values_intermediate_grid", "[", ":", "(", "-", "1", ")", ",", ":", "]", ")", ")", "values", "=", "(", "0.5", "*", "(", "values", "[", ":", ",", "1", ":", "]", "+", "values", "[", ":", ",", ":", "(", "-", "1", ")", "]", ")", ")", "return", "values" ]
discretize model by performing a bilinear interpolation .
train
false