id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
50,752
def copy_current_request_context(f): top = _request_ctx_stack.top if (top is None): raise RuntimeError('This decorator can only be used at local scopes when a request context is on the stack. For instance within view functions.') reqctx = top.copy() def wrapper(*args, **kwargs): with reqctx: return f(*args, **kwargs) return update_wrapper(wrapper, f)
[ "def", "copy_current_request_context", "(", "f", ")", ":", "top", "=", "_request_ctx_stack", ".", "top", "if", "(", "top", "is", "None", ")", ":", "raise", "RuntimeError", "(", "'This decorator can only be used at local scopes when a request context is on the stack. For instance within view functions.'", ")", "reqctx", "=", "top", ".", "copy", "(", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "with", "reqctx", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "update_wrapper", "(", "wrapper", ",", "f", ")" ]
a helper function that decorates a function to retain the current request context .
train
true
50,753
def DecodeControlTuples(ldapControlTuples): return [knownLDAPControls.get(controlType, LDAPControl)(controlType, criticality, encodedControlValue=encodedControlValue) for (controlType, criticality, encodedControlValue) in (ldapControlTuples or [])]
[ "def", "DecodeControlTuples", "(", "ldapControlTuples", ")", ":", "return", "[", "knownLDAPControls", ".", "get", "(", "controlType", ",", "LDAPControl", ")", "(", "controlType", ",", "criticality", ",", "encodedControlValue", "=", "encodedControlValue", ")", "for", "(", "controlType", ",", "criticality", ",", "encodedControlValue", ")", "in", "(", "ldapControlTuples", "or", "[", "]", ")", "]" ]
return list of readily encoded 3-tuples which can be directly passed to c module _ldap .
train
false
50,755
def _new_mods(pre_mods, post_mods): pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return (post - pre)
[ "def", "_new_mods", "(", "pre_mods", ",", "post_mods", ")", ":", "pre", "=", "set", "(", ")", "post", "=", "set", "(", ")", "for", "mod", "in", "pre_mods", ":", "pre", ".", "add", "(", "mod", "[", "'module'", "]", ")", "for", "mod", "in", "post_mods", ":", "post", ".", "add", "(", "mod", "[", "'module'", "]", ")", "return", "(", "post", "-", "pre", ")" ]
return a list of the new modules .
train
true
50,756
def parse_constantlike(environment, parser): expr = parser.parse_expression() if isinstance(expr, Name): return expr.name try: return expr.as_const(EvalContext(environment)) except Impossible: raise NonConstant((u'Not constant: %r' % expr))
[ "def", "parse_constantlike", "(", "environment", ",", "parser", ")", ":", "expr", "=", "parser", ".", "parse_expression", "(", ")", "if", "isinstance", "(", "expr", ",", "Name", ")", ":", "return", "expr", ".", "name", "try", ":", "return", "expr", ".", "as_const", "(", "EvalContext", "(", "environment", ")", ")", "except", "Impossible", ":", "raise", "NonConstant", "(", "(", "u'Not constant: %r'", "%", "expr", ")", ")" ]
parse the next expression as a "constantlike" expression .
train
false
50,757
def get_spec(func): if (inspect.isfunction(func) or inspect.ismethod(func)): spec = inspect.getargspec(func) elif hasattr(func, '__call__'): spec = inspect.getargspec(func.__call__) else: raise TypeError(('%s is not callable' % type(func))) defaults = (spec.defaults or []) firstdefault = (len(spec.args) - len(defaults)) args = spec.args[:firstdefault] kwargs = dict(zip(spec.args[firstdefault:], defaults)) return (args, kwargs)
[ "def", "get_spec", "(", "func", ")", ":", "if", "(", "inspect", ".", "isfunction", "(", "func", ")", "or", "inspect", ".", "ismethod", "(", "func", ")", ")", ":", "spec", "=", "inspect", ".", "getargspec", "(", "func", ")", "elif", "hasattr", "(", "func", ",", "'__call__'", ")", ":", "spec", "=", "inspect", ".", "getargspec", "(", "func", ".", "__call__", ")", "else", ":", "raise", "TypeError", "(", "(", "'%s is not callable'", "%", "type", "(", "func", ")", ")", ")", "defaults", "=", "(", "spec", ".", "defaults", "or", "[", "]", ")", "firstdefault", "=", "(", "len", "(", "spec", ".", "args", ")", "-", "len", "(", "defaults", ")", ")", "args", "=", "spec", ".", "args", "[", ":", "firstdefault", "]", "kwargs", "=", "dict", "(", "zip", "(", "spec", ".", "args", "[", "firstdefault", ":", "]", ",", "defaults", ")", ")", "return", "(", "args", ",", "kwargs", ")" ]
returns tuple for a function .
train
false
50,758
def verify_signed_jwt_with_certs(jwt, certs, audience=None): jwt = _helpers._to_bytes(jwt) if (jwt.count('.') != 2): raise AppIdentityError('Wrong number of segments in token: {0}'.format(jwt)) (header, payload, signature) = jwt.split('.') message_to_sign = ((header + '.') + payload) signature = _helpers._urlsafe_b64decode(signature) payload_bytes = _helpers._urlsafe_b64decode(payload) try: payload_dict = json.loads(_helpers._from_bytes(payload_bytes)) except: raise AppIdentityError("Can't parse token: {0}".format(payload_bytes)) _verify_signature(message_to_sign, signature, certs.values()) _verify_time_range(payload_dict) _check_audience(payload_dict, audience) return payload_dict
[ "def", "verify_signed_jwt_with_certs", "(", "jwt", ",", "certs", ",", "audience", "=", "None", ")", ":", "jwt", "=", "_helpers", ".", "_to_bytes", "(", "jwt", ")", "if", "(", "jwt", ".", "count", "(", "'.'", ")", "!=", "2", ")", ":", "raise", "AppIdentityError", "(", "'Wrong number of segments in token: {0}'", ".", "format", "(", "jwt", ")", ")", "(", "header", ",", "payload", ",", "signature", ")", "=", "jwt", ".", "split", "(", "'.'", ")", "message_to_sign", "=", "(", "(", "header", "+", "'.'", ")", "+", "payload", ")", "signature", "=", "_helpers", ".", "_urlsafe_b64decode", "(", "signature", ")", "payload_bytes", "=", "_helpers", ".", "_urlsafe_b64decode", "(", "payload", ")", "try", ":", "payload_dict", "=", "json", ".", "loads", "(", "_helpers", ".", "_from_bytes", "(", "payload_bytes", ")", ")", "except", ":", "raise", "AppIdentityError", "(", "\"Can't parse token: {0}\"", ".", "format", "(", "payload_bytes", ")", ")", "_verify_signature", "(", "message_to_sign", ",", "signature", ",", "certs", ".", "values", "(", ")", ")", "_verify_time_range", "(", "payload_dict", ")", "_check_audience", "(", "payload_dict", ",", "audience", ")", "return", "payload_dict" ]
verify a jwt against public certs .
train
true
50,759
def stack_sparse_frame(frame): lengths = [s.sp_index.npoints for (_, s) in compat.iteritems(frame)] nobs = sum(lengths) minor_labels = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] for (_, series) in compat.iteritems(frame): if (not np.isnan(series.fill_value)): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_labels = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], labels=[major_labels, minor_labels], verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) return lp.sort_index(level=0)
[ "def", "stack_sparse_frame", "(", "frame", ")", ":", "lengths", "=", "[", "s", ".", "sp_index", ".", "npoints", "for", "(", "_", ",", "s", ")", "in", "compat", ".", "iteritems", "(", "frame", ")", "]", "nobs", "=", "sum", "(", "lengths", ")", "minor_labels", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "len", "(", "frame", ".", "columns", ")", ")", ",", "lengths", ")", "inds_to_concat", "=", "[", "]", "vals_to_concat", "=", "[", "]", "for", "(", "_", ",", "series", ")", "in", "compat", ".", "iteritems", "(", "frame", ")", ":", "if", "(", "not", "np", ".", "isnan", "(", "series", ".", "fill_value", ")", ")", ":", "raise", "TypeError", "(", "'This routine assumes NaN fill value'", ")", "int_index", "=", "series", ".", "sp_index", ".", "to_int_index", "(", ")", "inds_to_concat", ".", "append", "(", "int_index", ".", "indices", ")", "vals_to_concat", ".", "append", "(", "series", ".", "sp_values", ")", "major_labels", "=", "np", ".", "concatenate", "(", "inds_to_concat", ")", "stacked_values", "=", "np", ".", "concatenate", "(", "vals_to_concat", ")", "index", "=", "MultiIndex", "(", "levels", "=", "[", "frame", ".", "index", ",", "frame", ".", "columns", "]", ",", "labels", "=", "[", "major_labels", ",", "minor_labels", "]", ",", "verify_integrity", "=", "False", ")", "lp", "=", "DataFrame", "(", "stacked_values", ".", "reshape", "(", "(", "nobs", ",", "1", ")", ")", ",", "index", "=", "index", ",", "columns", "=", "[", "'foo'", "]", ")", "return", "lp", ".", "sort_index", "(", "level", "=", "0", ")" ]
only makes sense when fill_value is nan .
train
true
50,760
def update_prediction_data(): min_daily_by_sr = _min_daily_pageviews_by_sr(NDAYS_TO_QUERY) if ('' in min_daily_by_sr): fp = DefaultSR.name.lower() min_daily_by_sr[fp] = (min_daily_by_sr.get(fp, 0) + min_daily_by_sr['']) del min_daily_by_sr[''] filtered = {sr_name: num for (sr_name, num) in min_daily_by_sr.iteritems() if (num > 100)} PromoMetrics.set(MIN_DAILY_CASS_KEY, filtered)
[ "def", "update_prediction_data", "(", ")", ":", "min_daily_by_sr", "=", "_min_daily_pageviews_by_sr", "(", "NDAYS_TO_QUERY", ")", "if", "(", "''", "in", "min_daily_by_sr", ")", ":", "fp", "=", "DefaultSR", ".", "name", ".", "lower", "(", ")", "min_daily_by_sr", "[", "fp", "]", "=", "(", "min_daily_by_sr", ".", "get", "(", "fp", ",", "0", ")", "+", "min_daily_by_sr", "[", "''", "]", ")", "del", "min_daily_by_sr", "[", "''", "]", "filtered", "=", "{", "sr_name", ":", "num", "for", "(", "sr_name", ",", "num", ")", "in", "min_daily_by_sr", ".", "iteritems", "(", ")", "if", "(", "num", ">", "100", ")", "}", "PromoMetrics", ".", "set", "(", "MIN_DAILY_CASS_KEY", ",", "filtered", ")" ]
fetch prediction data and write it to cassandra .
train
false
50,761
def read_int32(fid): return _unpack_simple(fid, '>i4', np.int32)
[ "def", "read_int32", "(", "fid", ")", ":", "return", "_unpack_simple", "(", "fid", ",", "'>i4'", ",", "np", ".", "int32", ")" ]
read 32bit integer from bti file .
train
false
50,762
def xssescape(text): return escape(text, quote=True).replace(':', ':')
[ "def", "xssescape", "(", "text", ")", ":", "return", "escape", "(", "text", ",", "quote", "=", "True", ")", ".", "replace", "(", "':'", ",", "':'", ")" ]
gets rid of < and > and & and .
train
false
50,763
def _is_x11(): return ('DISPLAY' in os.environ)
[ "def", "_is_x11", "(", ")", ":", "return", "(", "'DISPLAY'", "in", "os", ".", "environ", ")" ]
return whether the x window system is in use .
train
false
50,765
def test_complete_graph_global_efficiency(): for n in range(10): G = nx.complete_graph(5) assert_equal(nx.global_efficiency(G), 1)
[ "def", "test_complete_graph_global_efficiency", "(", ")", ":", "for", "n", "in", "range", "(", "10", ")", ":", "G", "=", "nx", ".", "complete_graph", "(", "5", ")", "assert_equal", "(", "nx", ".", "global_efficiency", "(", "G", ")", ",", "1", ")" ]
tests that the average global efficiency of the complete graph is one .
train
false
50,766
def guess_format(filename): last_period = filename.rfind('.') if (last_period == (-1)): return 'fixed' extension = filename[(last_period + 1):].lower() if (extension in ('csv', 'dbf', 'fixed', 'xls', 'xlsx')): return extension elif (extension in ['json', 'js']): return 'json' return None
[ "def", "guess_format", "(", "filename", ")", ":", "last_period", "=", "filename", ".", "rfind", "(", "'.'", ")", "if", "(", "last_period", "==", "(", "-", "1", ")", ")", ":", "return", "'fixed'", "extension", "=", "filename", "[", "(", "last_period", "+", "1", ")", ":", "]", ".", "lower", "(", ")", "if", "(", "extension", "in", "(", "'csv'", ",", "'dbf'", ",", "'fixed'", ",", "'xls'", ",", "'xlsx'", ")", ")", ":", "return", "extension", "elif", "(", "extension", "in", "[", "'json'", ",", "'js'", "]", ")", ":", "return", "'json'", "return", "None" ]
try to guess a files format based on its extension .
train
false
50,768
def Search(pattern, s): if (pattern not in _regexp_compile_cache): _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)
[ "def", "Search", "(", "pattern", ",", "s", ")", ":", "if", "(", "pattern", "not", "in", "_regexp_compile_cache", ")", ":", "_regexp_compile_cache", "[", "pattern", "]", "=", "sre_compile", ".", "compile", "(", "pattern", ")", "return", "_regexp_compile_cache", "[", "pattern", "]", ".", "search", "(", "s", ")" ]
searches the string for the pattern .
train
true
50,769
def def_physical_type(unit, name): r = unit._get_physical_type_id() if (r in _physical_unit_mapping): raise ValueError(u'{0!r} ({1!r}) already defined as {2!r}'.format(r, name, _physical_unit_mapping[r])) _physical_unit_mapping[r] = name
[ "def", "def_physical_type", "(", "unit", ",", "name", ")", ":", "r", "=", "unit", ".", "_get_physical_type_id", "(", ")", "if", "(", "r", "in", "_physical_unit_mapping", ")", ":", "raise", "ValueError", "(", "u'{0!r} ({1!r}) already defined as {2!r}'", ".", "format", "(", "r", ",", "name", ",", "_physical_unit_mapping", "[", "r", "]", ")", ")", "_physical_unit_mapping", "[", "r", "]", "=", "name" ]
adds a new physical unit mapping .
train
false
50,771
def init_viewbox(): global win, vb win = pg.GraphicsWindow() win.ci.layout.setContentsMargins(0, 0, 0, 0) win.resize(200, 200) win.show() vb = win.addViewBox() vb.setRange(xRange=[0, 10], yRange=[0, 10], padding=0) qtest.qWaitForWindowShown(win) g = pg.GridItem() vb.addItem(g) app.processEvents()
[ "def", "init_viewbox", "(", ")", ":", "global", "win", ",", "vb", "win", "=", "pg", ".", "GraphicsWindow", "(", ")", "win", ".", "ci", ".", "layout", ".", "setContentsMargins", "(", "0", ",", "0", ",", "0", ",", "0", ")", "win", ".", "resize", "(", "200", ",", "200", ")", "win", ".", "show", "(", ")", "vb", "=", "win", ".", "addViewBox", "(", ")", "vb", ".", "setRange", "(", "xRange", "=", "[", "0", ",", "10", "]", ",", "yRange", "=", "[", "0", ",", "10", "]", ",", "padding", "=", "0", ")", "qtest", ".", "qWaitForWindowShown", "(", "win", ")", "g", "=", "pg", ".", "GridItem", "(", ")", "vb", ".", "addItem", "(", "g", ")", "app", ".", "processEvents", "(", ")" ]
helper function to init the viewbox .
train
false
50,772
def getXNormalizedVector3Path(path): if (len(path) < 1): return path minimumX = path[0].x for point in path[1:]: minimumX = min(minimumX, point.x) for point in path: point.x -= minimumX maximumX = path[0].x for point in path[1:]: maximumX = max(maximumX, point.x) for point in path: point.x /= maximumX return path
[ "def", "getXNormalizedVector3Path", "(", "path", ")", ":", "if", "(", "len", "(", "path", ")", "<", "1", ")", ":", "return", "path", "minimumX", "=", "path", "[", "0", "]", ".", "x", "for", "point", "in", "path", "[", "1", ":", "]", ":", "minimumX", "=", "min", "(", "minimumX", ",", "point", ".", "x", ")", "for", "point", "in", "path", ":", "point", ".", "x", "-=", "minimumX", "maximumX", "=", "path", "[", "0", "]", ".", "x", "for", "point", "in", "path", "[", "1", ":", "]", ":", "maximumX", "=", "max", "(", "maximumX", ",", "point", ".", "x", ")", "for", "point", "in", "path", ":", "point", ".", "x", "/=", "maximumX", "return", "path" ]
get path where the x ranges from 0 to 1 .
train
false
50,774
def _to_gapic_feature(feature): return image_annotator_pb2.Feature(type=getattr(image_annotator_pb2.Feature, feature.feature_type), max_results=feature.max_results)
[ "def", "_to_gapic_feature", "(", "feature", ")", ":", "return", "image_annotator_pb2", ".", "Feature", "(", "type", "=", "getattr", "(", "image_annotator_pb2", ".", "Feature", ",", "feature", ".", "feature_type", ")", ",", "max_results", "=", "feature", ".", "max_results", ")" ]
helper function to convert a feature to a grpc feature .
train
false
50,775
def deep_format(obj, paramdict, allow_empty=False): if hasattr(obj, 'format'): try: ret = CustomFormatter(allow_empty).format(obj, **paramdict) except KeyError as exc: missing_key = exc.args[0] desc = ('%s parameter missing to format %s\nGiven:\n%s' % (missing_key, obj, pformat(paramdict))) raise JenkinsJobsException(desc) elif isinstance(obj, list): ret = type(obj)() for item in obj: ret.append(deep_format(item, paramdict, allow_empty)) elif isinstance(obj, dict): ret = type(obj)() for item in obj: try: ret[CustomFormatter(allow_empty).format(item, **paramdict)] = deep_format(obj[item], paramdict, allow_empty) except KeyError as exc: missing_key = exc.args[0] desc = ('%s parameter missing to format %s\nGiven:\n%s' % (missing_key, obj, pformat(paramdict))) raise JenkinsJobsException(desc) else: ret = obj return ret
[ "def", "deep_format", "(", "obj", ",", "paramdict", ",", "allow_empty", "=", "False", ")", ":", "if", "hasattr", "(", "obj", ",", "'format'", ")", ":", "try", ":", "ret", "=", "CustomFormatter", "(", "allow_empty", ")", ".", "format", "(", "obj", ",", "**", "paramdict", ")", "except", "KeyError", "as", "exc", ":", "missing_key", "=", "exc", ".", "args", "[", "0", "]", "desc", "=", "(", "'%s parameter missing to format %s\\nGiven:\\n%s'", "%", "(", "missing_key", ",", "obj", ",", "pformat", "(", "paramdict", ")", ")", ")", "raise", "JenkinsJobsException", "(", "desc", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "ret", "=", "type", "(", "obj", ")", "(", ")", "for", "item", "in", "obj", ":", "ret", ".", "append", "(", "deep_format", "(", "item", ",", "paramdict", ",", "allow_empty", ")", ")", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "ret", "=", "type", "(", "obj", ")", "(", ")", "for", "item", "in", "obj", ":", "try", ":", "ret", "[", "CustomFormatter", "(", "allow_empty", ")", ".", "format", "(", "item", ",", "**", "paramdict", ")", "]", "=", "deep_format", "(", "obj", "[", "item", "]", ",", "paramdict", ",", "allow_empty", ")", "except", "KeyError", "as", "exc", ":", "missing_key", "=", "exc", ".", "args", "[", "0", "]", "desc", "=", "(", "'%s parameter missing to format %s\\nGiven:\\n%s'", "%", "(", "missing_key", ",", "obj", ",", "pformat", "(", "paramdict", ")", ")", ")", "raise", "JenkinsJobsException", "(", "desc", ")", "else", ":", "ret", "=", "obj", "return", "ret" ]
apply the paramdict via str .
train
false
50,776
def quaternion_imag(quaternion): return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
[ "def", "quaternion_imag", "(", "quaternion", ")", ":", "return", "numpy", ".", "array", "(", "quaternion", "[", "1", ":", "4", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "True", ")" ]
return imaginary part of quaternion .
train
true
50,777
def _delete_folder(folder_path, warn=False): try: if os.path.exists(folder_path): shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn(('Could not delete temporary folder %s' % folder_path))
[ "def", "_delete_folder", "(", "folder_path", ",", "warn", "=", "False", ")", ":", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "folder_path", ")", ":", "shutil", ".", "rmtree", "(", "folder_path", ")", "except", "WindowsError", ":", "if", "warn", ":", "warnings", ".", "warn", "(", "(", "'Could not delete temporary folder %s'", "%", "folder_path", ")", ")" ]
utility function to cleanup a temporary folder if still existing .
train
false
50,778
def _show_receipt_json(order): order_info = {'orderNum': order.id, 'currency': order.currency, 'status': order.status, 'purchase_datetime': (get_default_time_display(order.purchase_time) if order.purchase_time else None), 'billed_to': {'first_name': order.bill_to_first, 'last_name': order.bill_to_last, 'street1': order.bill_to_street1, 'street2': order.bill_to_street2, 'city': order.bill_to_city, 'state': order.bill_to_state, 'postal_code': order.bill_to_postalcode, 'country': order.bill_to_country}, 'total_cost': order.total_cost, 'items': [{'quantity': item.qty, 'unit_cost': item.unit_cost, 'line_cost': item.line_cost, 'line_desc': item.line_desc, 'course_key': unicode(item.course_id)} for item in OrderItem.objects.filter(order=order).select_subclasses()]} return JsonResponse(order_info)
[ "def", "_show_receipt_json", "(", "order", ")", ":", "order_info", "=", "{", "'orderNum'", ":", "order", ".", "id", ",", "'currency'", ":", "order", ".", "currency", ",", "'status'", ":", "order", ".", "status", ",", "'purchase_datetime'", ":", "(", "get_default_time_display", "(", "order", ".", "purchase_time", ")", "if", "order", ".", "purchase_time", "else", "None", ")", ",", "'billed_to'", ":", "{", "'first_name'", ":", "order", ".", "bill_to_first", ",", "'last_name'", ":", "order", ".", "bill_to_last", ",", "'street1'", ":", "order", ".", "bill_to_street1", ",", "'street2'", ":", "order", ".", "bill_to_street2", ",", "'city'", ":", "order", ".", "bill_to_city", ",", "'state'", ":", "order", ".", "bill_to_state", ",", "'postal_code'", ":", "order", ".", "bill_to_postalcode", ",", "'country'", ":", "order", ".", "bill_to_country", "}", ",", "'total_cost'", ":", "order", ".", "total_cost", ",", "'items'", ":", "[", "{", "'quantity'", ":", "item", ".", "qty", ",", "'unit_cost'", ":", "item", ".", "unit_cost", ",", "'line_cost'", ":", "item", ".", "line_cost", ",", "'line_desc'", ":", "item", ".", "line_desc", ",", "'course_key'", ":", "unicode", "(", "item", ".", "course_id", ")", "}", "for", "item", "in", "OrderItem", ".", "objects", ".", "filter", "(", "order", "=", "order", ")", ".", "select_subclasses", "(", ")", "]", "}", "return", "JsonResponse", "(", "order_info", ")" ]
render the receipt page as json .
train
false
50,780
def dist_in_usersite(dist): if user_site: return normalize_path(dist_location(dist)).startswith(normalize_path(user_site)) else: return False
[ "def", "dist_in_usersite", "(", "dist", ")", ":", "if", "user_site", ":", "return", "normalize_path", "(", "dist_location", "(", "dist", ")", ")", ".", "startswith", "(", "normalize_path", "(", "user_site", ")", ")", "else", ":", "return", "False" ]
return true if given distribution is installed in user site .
train
true
50,782
def test_property_always_set_descriptor(): class C(object, ): x = property((lambda self: self._x)) def __init__(self): self._x = 42 c = C() c.__dict__['x'] = 43 AreEqual(c.x, 42) class MyDescriptor(object, ): def __get__(self, *args): return 42 class C(object, ): x = MyDescriptor() c = C() c.__dict__['x'] = 43 AreEqual(c.x, 43) class MyDescriptor(object, ): def __get__(self, *args): return 42 def __set__(self, *args): pass class C(object, ): x = MyDescriptor() c = C() c.__dict__['x'] = 43 AreEqual(c.x, 42)
[ "def", "test_property_always_set_descriptor", "(", ")", ":", "class", "C", "(", "object", ",", ")", ":", "x", "=", "property", "(", "(", "lambda", "self", ":", "self", ".", "_x", ")", ")", "def", "__init__", "(", "self", ")", ":", "self", ".", "_x", "=", "42", "c", "=", "C", "(", ")", "c", ".", "__dict__", "[", "'x'", "]", "=", "43", "AreEqual", "(", "c", ".", "x", ",", "42", ")", "class", "MyDescriptor", "(", "object", ",", ")", ":", "def", "__get__", "(", "self", ",", "*", "args", ")", ":", "return", "42", "class", "C", "(", "object", ",", ")", ":", "x", "=", "MyDescriptor", "(", ")", "c", "=", "C", "(", ")", "c", ".", "__dict__", "[", "'x'", "]", "=", "43", "AreEqual", "(", "c", ".", "x", ",", "43", ")", "class", "MyDescriptor", "(", "object", ",", ")", ":", "def", "__get__", "(", "self", ",", "*", "args", ")", ":", "return", "42", "def", "__set__", "(", "self", ",", "*", "args", ")", ":", "pass", "class", "C", "(", "object", ",", ")", ":", "x", "=", "MyDescriptor", "(", ")", "c", "=", "C", "(", ")", "c", ".", "__dict__", "[", "'x'", "]", "=", "43", "AreEqual", "(", "c", ".", "x", ",", "42", ")" ]
verifies that set descriptors take precedence over dictionary entries and properties are always treated as set descriptors .
train
false
50,784
def _torational_factor_list(p, x): from sympy.simplify.simplify import simplify p1 = Poly(p, x, domain='EX') n = p1.degree() res = to_rational_coeffs(p1) if (not res): return None (lc, r, t, g) = res factors = factor_list(g.as_expr()) if lc: c = simplify(((factors[0] * lc) * (r ** n))) r1 = simplify((1 / r)) a = [] for z in factors[1:][0]: a.append((simplify(z[0].subs({x: (x * r1)})), z[1])) else: c = factors[0] a = [] for z in factors[1:][0]: a.append((z[0].subs({x: (x - t)}), z[1])) return (c, a)
[ "def", "_torational_factor_list", "(", "p", ",", "x", ")", ":", "from", "sympy", ".", "simplify", ".", "simplify", "import", "simplify", "p1", "=", "Poly", "(", "p", ",", "x", ",", "domain", "=", "'EX'", ")", "n", "=", "p1", ".", "degree", "(", ")", "res", "=", "to_rational_coeffs", "(", "p1", ")", "if", "(", "not", "res", ")", ":", "return", "None", "(", "lc", ",", "r", ",", "t", ",", "g", ")", "=", "res", "factors", "=", "factor_list", "(", "g", ".", "as_expr", "(", ")", ")", "if", "lc", ":", "c", "=", "simplify", "(", "(", "(", "factors", "[", "0", "]", "*", "lc", ")", "*", "(", "r", "**", "n", ")", ")", ")", "r1", "=", "simplify", "(", "(", "1", "/", "r", ")", ")", "a", "=", "[", "]", "for", "z", "in", "factors", "[", "1", ":", "]", "[", "0", "]", ":", "a", ".", "append", "(", "(", "simplify", "(", "z", "[", "0", "]", ".", "subs", "(", "{", "x", ":", "(", "x", "*", "r1", ")", "}", ")", ")", ",", "z", "[", "1", "]", ")", ")", "else", ":", "c", "=", "factors", "[", "0", "]", "a", "=", "[", "]", "for", "z", "in", "factors", "[", "1", ":", "]", "[", "0", "]", ":", "a", ".", "append", "(", "(", "z", "[", "0", "]", ".", "subs", "(", "{", "x", ":", "(", "x", "-", "t", ")", "}", ")", ",", "z", "[", "1", "]", ")", ")", "return", "(", "c", ",", "a", ")" ]
helper function to factor polynomial using to_rational_coeffs examples .
train
false
50,786
def get_sum_dtype(dtype): if np.issubdtype(dtype, np.float_): return np.float_ if ((dtype.kind == 'u') and np.can_cast(dtype, np.uint)): return np.uint if np.can_cast(dtype, np.int_): return np.int_ return dtype
[ "def", "get_sum_dtype", "(", "dtype", ")", ":", "if", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "float_", ")", ":", "return", "np", ".", "float_", "if", "(", "(", "dtype", ".", "kind", "==", "'u'", ")", "and", "np", ".", "can_cast", "(", "dtype", ",", "np", ".", "uint", ")", ")", ":", "return", "np", ".", "uint", "if", "np", ".", "can_cast", "(", "dtype", ",", "np", ".", "int_", ")", ":", "return", "np", ".", "int_", "return", "dtype" ]
mimic numpys casting for np .
train
false
50,788
def get_client_class(version): warnings.warn(_LW("'get_client_class' is deprecated. Please use `novaclient.client.Client` instead.")) (_api_version, client_class) = _get_client_class_and_version(version) return client_class
[ "def", "get_client_class", "(", "version", ")", ":", "warnings", ".", "warn", "(", "_LW", "(", "\"'get_client_class' is deprecated. Please use `novaclient.client.Client` instead.\"", ")", ")", "(", "_api_version", ",", "client_class", ")", "=", "_get_client_class_and_version", "(", "version", ")", "return", "client_class" ]
returns client class based on given version .
train
false
50,789
def _normalize_diallable_chars_only(number): return _normalize_helper(number, _DIALLABLE_CHAR_MAPPINGS, True)
[ "def", "_normalize_diallable_chars_only", "(", "number", ")", ":", "return", "_normalize_helper", "(", "number", ",", "_DIALLABLE_CHAR_MAPPINGS", ",", "True", ")" ]
normalizes a string of characters representing a phone number .
train
false
50,790
def get_dependency_graph(package_list): result = {package: set() for package in package_list} for package in package_list: setup_file = os.path.join(PROJECT_ROOT, package, 'setup.py') with open(setup_file, 'r') as file_obj: file_contents = file_obj.read() requirements = get_required_packages(file_contents) for requirement in requirements: if (not requirement.startswith(PACKAGE_PREFIX)): continue (_, req_package) = requirement.split(PACKAGE_PREFIX) req_package = req_package.replace('-', '_') result[req_package].add(package) return result
[ "def", "get_dependency_graph", "(", "package_list", ")", ":", "result", "=", "{", "package", ":", "set", "(", ")", "for", "package", "in", "package_list", "}", "for", "package", "in", "package_list", ":", "setup_file", "=", "os", ".", "path", ".", "join", "(", "PROJECT_ROOT", ",", "package", ",", "'setup.py'", ")", "with", "open", "(", "setup_file", ",", "'r'", ")", "as", "file_obj", ":", "file_contents", "=", "file_obj", ".", "read", "(", ")", "requirements", "=", "get_required_packages", "(", "file_contents", ")", "for", "requirement", "in", "requirements", ":", "if", "(", "not", "requirement", ".", "startswith", "(", "PACKAGE_PREFIX", ")", ")", ":", "continue", "(", "_", ",", "req_package", ")", "=", "requirement", ".", "split", "(", "PACKAGE_PREFIX", ")", "req_package", "=", "req_package", ".", "replace", "(", "'-'", ",", "'_'", ")", "result", "[", "req_package", "]", ".", "add", "(", "package", ")", "return", "result" ]
get a directed graph of package dependencies .
train
false
50,791
def is_installed_by_package_control(): settings = sublime.load_settings('Package Control.sublime-settings') return str((__pc_name__ in set(settings.get('installed_packages', []))))
[ "def", "is_installed_by_package_control", "(", ")", ":", "settings", "=", "sublime", ".", "load_settings", "(", "'Package Control.sublime-settings'", ")", "return", "str", "(", "(", "__pc_name__", "in", "set", "(", "settings", ".", "get", "(", "'installed_packages'", ",", "[", "]", ")", ")", ")", ")" ]
check if installed by package control .
train
false
50,796
def parseoptions(module, options): options_dict = keydict() if options: regex = re.compile('((?:[^,"\']|"[^"]*"|\'[^\']*\')+)') parts = regex.split(options)[1:(-1)] for part in parts: if ('=' in part): (key, value) = part.split('=', 1) options_dict[key] = value elif (part != ','): options_dict[part] = None return options_dict
[ "def", "parseoptions", "(", "module", ",", "options", ")", ":", "options_dict", "=", "keydict", "(", ")", "if", "options", ":", "regex", "=", "re", ".", "compile", "(", "'((?:[^,\"\\']|\"[^\"]*\"|\\'[^\\']*\\')+)'", ")", "parts", "=", "regex", ".", "split", "(", "options", ")", "[", "1", ":", "(", "-", "1", ")", "]", "for", "part", "in", "parts", ":", "if", "(", "'='", "in", "part", ")", ":", "(", "key", ",", "value", ")", "=", "part", ".", "split", "(", "'='", ",", "1", ")", "options_dict", "[", "key", "]", "=", "value", "elif", "(", "part", "!=", "','", ")", ":", "options_dict", "[", "part", "]", "=", "None", "return", "options_dict" ]
reads a string containing ssh-key options and returns a dictionary of those options .
train
false
50,797
def test_coreg(): assert_true(hasattr(mne_coreg, 'run'))
[ "def", "test_coreg", "(", ")", ":", "assert_true", "(", "hasattr", "(", "mne_coreg", ",", "'run'", ")", ")" ]
test mne coreg .
train
false
50,798
def _get_stage_variables(stage_variables): ret = dict() if (stage_variables is None): return ret if isinstance(stage_variables, six.string_types): if (stage_variables in __opts__): ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if (stage_variables in master_opts): ret = master_opts[stage_variables] if (stage_variables in __pillar__): ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if (not isinstance(ret, dict)): ret = dict() return ret
[ "def", "_get_stage_variables", "(", "stage_variables", ")", ":", "ret", "=", "dict", "(", ")", "if", "(", "stage_variables", "is", "None", ")", ":", "return", "ret", "if", "isinstance", "(", "stage_variables", ",", "six", ".", "string_types", ")", ":", "if", "(", "stage_variables", "in", "__opts__", ")", ":", "ret", "=", "__opts__", "[", "stage_variables", "]", "master_opts", "=", "__pillar__", ".", "get", "(", "'master'", ",", "{", "}", ")", "if", "(", "stage_variables", "in", "master_opts", ")", ":", "ret", "=", "master_opts", "[", "stage_variables", "]", "if", "(", "stage_variables", "in", "__pillar__", ")", ":", "ret", "=", "__pillar__", "[", "stage_variables", "]", "elif", "isinstance", "(", "stage_variables", ",", "dict", ")", ":", "ret", "=", "stage_variables", "if", "(", "not", "isinstance", "(", "ret", ",", "dict", ")", ")", ":", "ret", "=", "dict", "(", ")", "return", "ret" ]
helper function to retrieve stage variables from pillars/options .
train
true
50,799
def parse_response_start_line(line): line = native_str(line) match = re.match('(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)', line) if (not match): raise HTTPInputError('Error parsing response start line') return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3))
[ "def", "parse_response_start_line", "(", "line", ")", ":", "line", "=", "native_str", "(", "line", ")", "match", "=", "re", ".", "match", "(", "'(HTTP/1.[0-9]) ([0-9]+) ([^\\r]*)'", ",", "line", ")", "if", "(", "not", "match", ")", ":", "raise", "HTTPInputError", "(", "'Error parsing response start line'", ")", "return", "ResponseStartLine", "(", "match", ".", "group", "(", "1", ")", ",", "int", "(", "match", ".", "group", "(", "2", ")", ")", ",", "match", ".", "group", "(", "3", ")", ")" ]
returns a tuple for an http 1 .
train
true
50,800
def drop_field(expr, field, *fields): to_remove = set((field,)).union(fields) new_fields = [] for field in expr.fields: if (field not in to_remove): new_fields.append(field) else: to_remove.remove(field) if to_remove: raise ValueError(('fields %r were not in the fields of expr (%r)' % (sorted(to_remove), expr.fields))) return expr[new_fields]
[ "def", "drop_field", "(", "expr", ",", "field", ",", "*", "fields", ")", ":", "to_remove", "=", "set", "(", "(", "field", ",", ")", ")", ".", "union", "(", "fields", ")", "new_fields", "=", "[", "]", "for", "field", "in", "expr", ".", "fields", ":", "if", "(", "field", "not", "in", "to_remove", ")", ":", "new_fields", ".", "append", "(", "field", ")", "else", ":", "to_remove", ".", "remove", "(", "field", ")", "if", "to_remove", ":", "raise", "ValueError", "(", "(", "'fields %r were not in the fields of expr (%r)'", "%", "(", "sorted", "(", "to_remove", ")", ",", "expr", ".", "fields", ")", ")", ")", "return", "expr", "[", "new_fields", "]" ]
drop a field or fields from a tabular expression .
train
false
50,801
def server_group_list(request): try: return api.nova.server_group_list(request) except Exception: exceptions.handle(request, _('Unable to retrieve Nova server groups.')) return []
[ "def", "server_group_list", "(", "request", ")", ":", "try", ":", "return", "api", ".", "nova", ".", "server_group_list", "(", "request", ")", "except", "Exception", ":", "exceptions", ".", "handle", "(", "request", ",", "_", "(", "'Unable to retrieve Nova server groups.'", ")", ")", "return", "[", "]" ]
utility method to retrieve a list of server groups .
train
true
50,802
def make_iter(obj): return (((not hasattr(obj, '__iter__')) and [obj]) or obj)
[ "def", "make_iter", "(", "obj", ")", ":", "return", "(", "(", "(", "not", "hasattr", "(", "obj", ",", "'__iter__'", ")", ")", "and", "[", "obj", "]", ")", "or", "obj", ")" ]
makes sure that the object is always iterable .
train
false
50,804
def get_build_dir(*append): return __get_root('build', *append)
[ "def", "get_build_dir", "(", "*", "append", ")", ":", "return", "__get_root", "(", "'build'", ",", "*", "append", ")" ]
returns build directory for desktop .
train
false
50,805
def text_error_template(lookup=None): import mako.template return mako.template.Template('\n<%page args="error=None, traceback=None"/>\n<%!\n from mako.exceptions import RichTraceback\n%>\\\n<%\n tback = RichTraceback(error=error, traceback=traceback)\n%>\\\nTraceback (most recent call last):\n% for (filename, lineno, function, line) in tback.traceback:\n File "${filename}", line ${lineno}, in ${function or \'?\'}\n ${line | trim}\n% endfor\n${tback.errorname}: ${tback.message}\n')
[ "def", "text_error_template", "(", "lookup", "=", "None", ")", ":", "import", "mako", ".", "template", "return", "mako", ".", "template", ".", "Template", "(", "'\\n<%page args=\"error=None, traceback=None\"/>\\n<%!\\n from mako.exceptions import RichTraceback\\n%>\\\\\\n<%\\n tback = RichTraceback(error=error, traceback=traceback)\\n%>\\\\\\nTraceback (most recent call last):\\n% for (filename, lineno, function, line) in tback.traceback:\\n File \"${filename}\", line ${lineno}, in ${function or \\'?\\'}\\n ${line | trim}\\n% endfor\\n${tback.errorname}: ${tback.message}\\n'", ")" ]
provides a template that renders a stack trace in a similar format to the python interpreter .
train
false
50,806
def walk_egg(egg_dir): walker = os.walk(egg_dir) (base, dirs, files) = walker.next() if ('EGG-INFO' in dirs): dirs.remove('EGG-INFO') (yield (base, dirs, files)) for bdf in walker: (yield bdf)
[ "def", "walk_egg", "(", "egg_dir", ")", ":", "walker", "=", "os", ".", "walk", "(", "egg_dir", ")", "(", "base", ",", "dirs", ",", "files", ")", "=", "walker", ".", "next", "(", ")", "if", "(", "'EGG-INFO'", "in", "dirs", ")", ":", "dirs", ".", "remove", "(", "'EGG-INFO'", ")", "(", "yield", "(", "base", ",", "dirs", ",", "files", ")", ")", "for", "bdf", "in", "walker", ":", "(", "yield", "bdf", ")" ]
walk an unpacked eggs contents .
train
true
50,807
def _min_daily_pageviews_by_sr(ndays=NDAYS_TO_QUERY, end_date=None): if (not end_date): last_modified = traffic.get_traffic_last_modified() end_date = (last_modified - timedelta(days=1)) stop = end_date start = (stop - timedelta(ndays)) time_points = traffic.get_time_points('day', start, stop) cls = traffic.PageviewsBySubredditAndPath q = traffic.Session.query(cls.srpath, func.min(cls.pageview_count)).filter((cls.interval == 'day')).filter(cls.date.in_(time_points)).filter(cls.srpath.like('%-GET_listing')).group_by(cls.srpath) retval = {} for row in q: m = PAGEVIEWS_REGEXP.match(row[0]) if m: retval[m.group(1)] = row[1] return retval
[ "def", "_min_daily_pageviews_by_sr", "(", "ndays", "=", "NDAYS_TO_QUERY", ",", "end_date", "=", "None", ")", ":", "if", "(", "not", "end_date", ")", ":", "last_modified", "=", "traffic", ".", "get_traffic_last_modified", "(", ")", "end_date", "=", "(", "last_modified", "-", "timedelta", "(", "days", "=", "1", ")", ")", "stop", "=", "end_date", "start", "=", "(", "stop", "-", "timedelta", "(", "ndays", ")", ")", "time_points", "=", "traffic", ".", "get_time_points", "(", "'day'", ",", "start", ",", "stop", ")", "cls", "=", "traffic", ".", "PageviewsBySubredditAndPath", "q", "=", "traffic", ".", "Session", ".", "query", "(", "cls", ".", "srpath", ",", "func", ".", "min", "(", "cls", ".", "pageview_count", ")", ")", ".", "filter", "(", "(", "cls", ".", "interval", "==", "'day'", ")", ")", ".", "filter", "(", "cls", ".", "date", ".", "in_", "(", "time_points", ")", ")", ".", "filter", "(", "cls", ".", "srpath", ".", "like", "(", "'%-GET_listing'", ")", ")", ".", "group_by", "(", "cls", ".", "srpath", ")", "retval", "=", "{", "}", "for", "row", "in", "q", ":", "m", "=", "PAGEVIEWS_REGEXP", ".", "match", "(", "row", "[", "0", "]", ")", "if", "m", ":", "retval", "[", "m", ".", "group", "(", "1", ")", "]", "=", "row", "[", "1", "]", "return", "retval" ]
return dict mapping sr_name to min_pageviews over the last ndays .
train
false
50,809
def itermulti(seqn): return chain(imap((lambda x: x), iterfunc(IterGen(Sequence(seqn)))))
[ "def", "itermulti", "(", "seqn", ")", ":", "return", "chain", "(", "imap", "(", "(", "lambda", "x", ":", "x", ")", ",", "iterfunc", "(", "IterGen", "(", "Sequence", "(", "seqn", ")", ")", ")", ")", ")" ]
test multiple tiers of iterators .
train
false
50,811
def boot_session(bootinfo): import frappe bootinfo.custom_css = (frappe.db.get_value(u'Style Settings', None, u'custom_css') or u'') bootinfo.website_settings = frappe.get_doc(u'Website Settings') if (frappe.session[u'user'] != u'Guest'): update_page_info(bootinfo) load_country_and_currency(bootinfo) bootinfo.notification_settings = frappe.get_doc(u'Notification Control', u'Notification Control') bootinfo.customer_count = frappe.db.sql(u'select count(*) from tabCustomer')[0][0] if (not bootinfo.customer_count): bootinfo.setup_complete = ((frappe.db.sql(u'select name from\n DCTB DCTB DCTB DCTB tabCompany limit 1') and u'Yes') or u'No') bootinfo.docs += frappe.db.sql(u'select name, default_currency, cost_center,\n DCTB DCTB DCTB default_terms, default_letter_head, default_bank_account from `tabCompany`', as_dict=1, update={u'doctype': u':Company'})
[ "def", "boot_session", "(", "bootinfo", ")", ":", "import", "frappe", "bootinfo", ".", "custom_css", "=", "(", "frappe", ".", "db", ".", "get_value", "(", "u'Style Settings'", ",", "None", ",", "u'custom_css'", ")", "or", "u''", ")", "bootinfo", ".", "website_settings", "=", "frappe", ".", "get_doc", "(", "u'Website Settings'", ")", "if", "(", "frappe", ".", "session", "[", "u'user'", "]", "!=", "u'Guest'", ")", ":", "update_page_info", "(", "bootinfo", ")", "load_country_and_currency", "(", "bootinfo", ")", "bootinfo", ".", "notification_settings", "=", "frappe", ".", "get_doc", "(", "u'Notification Control'", ",", "u'Notification Control'", ")", "bootinfo", ".", "customer_count", "=", "frappe", ".", "db", ".", "sql", "(", "u'select count(*) from tabCustomer'", ")", "[", "0", "]", "[", "0", "]", "if", "(", "not", "bootinfo", ".", "customer_count", ")", ":", "bootinfo", ".", "setup_complete", "=", "(", "(", "frappe", ".", "db", ".", "sql", "(", "u'select name from\\n DCTB DCTB DCTB DCTB tabCompany limit 1'", ")", "and", "u'Yes'", ")", "or", "u'No'", ")", "bootinfo", ".", "docs", "+=", "frappe", ".", "db", ".", "sql", "(", "u'select name, default_currency, cost_center,\\n DCTB DCTB DCTB default_terms, default_letter_head, default_bank_account from `tabCompany`'", ",", "as_dict", "=", "1", ",", "update", "=", "{", "u'doctype'", ":", "u':Company'", "}", ")" ]
boot session - send website info if guest .
train
false
50,813
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = (X - mu) with np.errstate(under='ignore'): avg_cv = (np.dot((post * diff.T), diff) / (post.sum() + (10 * EPS))) cv[c] = (avg_cv + (min_covar * np.eye(n_features))) return cv
[ "def", "_covar_mstep_full", "(", "gmm", ",", "X", ",", "responsibilities", ",", "weighted_X_sum", ",", "norm", ",", "min_covar", ")", ":", "n_features", "=", "X", ".", "shape", "[", "1", "]", "cv", "=", "np", ".", "empty", "(", "(", "gmm", ".", "n_components", ",", "n_features", ",", "n_features", ")", ")", "for", "c", "in", "range", "(", "gmm", ".", "n_components", ")", ":", "post", "=", "responsibilities", "[", ":", ",", "c", "]", "mu", "=", "gmm", ".", "means_", "[", "c", "]", "diff", "=", "(", "X", "-", "mu", ")", "with", "np", ".", "errstate", "(", "under", "=", "'ignore'", ")", ":", "avg_cv", "=", "(", "np", ".", "dot", "(", "(", "post", "*", "diff", ".", "T", ")", ",", "diff", ")", "/", "(", "post", ".", "sum", "(", ")", "+", "(", "10", "*", "EPS", ")", ")", ")", "cv", "[", "c", "]", "=", "(", "avg_cv", "+", "(", "min_covar", "*", "np", ".", "eye", "(", "n_features", ")", ")", ")", "return", "cv" ]
perform the covariance m step for full cases .
train
true
50,814
def groupupdate(group, options=None): manager = MANAGER if (options is None): options = [] elif isinstance(options, str): options = [options] options = ' '.join(options) run_as_root(('%(manager)s %(options)s groupupdate "%(group)s"' % locals()))
[ "def", "groupupdate", "(", "group", ",", "options", "=", "None", ")", ":", "manager", "=", "MANAGER", "if", "(", "options", "is", "None", ")", ":", "options", "=", "[", "]", "elif", "isinstance", "(", "options", ",", "str", ")", ":", "options", "=", "[", "options", "]", "options", "=", "' '", ".", "join", "(", "options", ")", "run_as_root", "(", "(", "'%(manager)s %(options)s groupupdate \"%(group)s\"'", "%", "locals", "(", ")", ")", ")" ]
update an existing software group .
train
true
50,815
def _default_gs_bucket_name(): request = file_service_pb.GetDefaultGsBucketNameRequest() response = file_service_pb.GetDefaultGsBucketNameResponse() _make_call('GetDefaultGsBucketName', request, response) return response.default_gs_bucket_name()
[ "def", "_default_gs_bucket_name", "(", ")", ":", "request", "=", "file_service_pb", ".", "GetDefaultGsBucketNameRequest", "(", ")", "response", "=", "file_service_pb", ".", "GetDefaultGsBucketNameResponse", "(", ")", "_make_call", "(", "'GetDefaultGsBucketName'", ",", "request", ",", "response", ")", "return", "response", ".", "default_gs_bucket_name", "(", ")" ]
return the default google storage bucket name for the application .
train
false
50,816
def libvlc_media_list_remove_index(p_ml, i_pos): f = (_Cfunctions.get('libvlc_media_list_remove_index', None) or _Cfunction('libvlc_media_list_remove_index', ((1,), (1,)), None, ctypes.c_int, MediaList, ctypes.c_int)) return f(p_ml, i_pos)
[ "def", "libvlc_media_list_remove_index", "(", "p_ml", ",", "i_pos", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_remove_index'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_remove_index'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaList", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_ml", ",", "i_pos", ")" ]
remove media instance from media list on a position the l{libvlc_media_list_lock} should be held upon entering this function .
train
true
50,817
@pytest.fixture(scope='session') def qnam(qapp): from PyQt5.QtNetwork import QNetworkAccessManager nam = QNetworkAccessManager() nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible) return nam
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ")", "def", "qnam", "(", "qapp", ")", ":", "from", "PyQt5", ".", "QtNetwork", "import", "QNetworkAccessManager", "nam", "=", "QNetworkAccessManager", "(", ")", "nam", ".", "setNetworkAccessible", "(", "QNetworkAccessManager", ".", "NotAccessible", ")", "return", "nam" ]
session-wide qnetworkaccessmanager .
train
false
50,818
def reset_ramsey(res, degree=5): order = (degree + 1) k_vars = res.model.exog.shape[1] y_fitted_vander = np.vander(res.fittedvalues, order)[:, :(-2)] exog = np.column_stack((res.model.exog, y_fitted_vander)) res_aux = OLS(res.model.endog, exog).fit() r_matrix = np.eye((degree - 1), exog.shape[1], k_vars) return res_aux.f_test(r_matrix)
[ "def", "reset_ramsey", "(", "res", ",", "degree", "=", "5", ")", ":", "order", "=", "(", "degree", "+", "1", ")", "k_vars", "=", "res", ".", "model", ".", "exog", ".", "shape", "[", "1", "]", "y_fitted_vander", "=", "np", ".", "vander", "(", "res", ".", "fittedvalues", ",", "order", ")", "[", ":", ",", ":", "(", "-", "2", ")", "]", "exog", "=", "np", ".", "column_stack", "(", "(", "res", ".", "model", ".", "exog", ",", "y_fitted_vander", ")", ")", "res_aux", "=", "OLS", "(", "res", ".", "model", ".", "endog", ",", "exog", ")", ".", "fit", "(", ")", "r_matrix", "=", "np", ".", "eye", "(", "(", "degree", "-", "1", ")", ",", "exog", ".", "shape", "[", "1", "]", ",", "k_vars", ")", "return", "res_aux", ".", "f_test", "(", "r_matrix", ")" ]
ramseys reset specification test for linear models this is a general specification test .
train
false
50,819
def write_key_value_file(csvfile, dictionary, append=False): writer = csv.writer(csvfile, delimiter=',') if (not append): writer.writerow(['key', 'value']) for (key, val) in dictionary.items(): writer.writerow([key, val])
[ "def", "write_key_value_file", "(", "csvfile", ",", "dictionary", ",", "append", "=", "False", ")", ":", "writer", "=", "csv", ".", "writer", "(", "csvfile", ",", "delimiter", "=", "','", ")", "if", "(", "not", "append", ")", ":", "writer", ".", "writerow", "(", "[", "'key'", ",", "'value'", "]", ")", "for", "(", "key", ",", "val", ")", "in", "dictionary", ".", "items", "(", ")", ":", "writer", ".", "writerow", "(", "[", "key", ",", "val", "]", ")" ]
writes a dictionary to a writable file in a csv format args: csvfile : writable file dictionary : dictionary containing key-value pairs append : writes key .
train
false
50,820
def get_services(profile='pagerduty', subdomain=None, api_key=None): return _list_items('services', 'id', profile=profile, subdomain=subdomain, api_key=api_key)
[ "def", "get_services", "(", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "return", "_list_items", "(", "'services'", ",", "'id'", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ")" ]
list services belonging to this account cli example: salt myminion pagerduty .
train
true
50,821
def metadef_object_delete_namespace_content(context, namespace_name, session=None): session = (session or get_session()) return metadef_object_api.delete_by_namespace_name(context, namespace_name, session)
[ "def", "metadef_object_delete_namespace_content", "(", "context", ",", "namespace_name", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "return", "metadef_object_api", ".", "delete_by_namespace_name", "(", "context", ",", "namespace_name", ",", "session", ")" ]
delete an object or raise if namespace or object doesnt exist .
train
false
50,822
def matchBrackets(string): rest = string[1:] inside = u'(' while ((rest != u'') and (not rest.startswith(u')'))): if rest.startswith(u'('): (part, rest) = matchBrackets(rest) inside = (inside + part) else: inside = (inside + rest[0]) rest = rest[1:] if rest.startswith(u')'): return ((inside + u')'), rest[1:]) raise AssertionError(((u"Unmatched bracket in string '" + string) + u"'"))
[ "def", "matchBrackets", "(", "string", ")", ":", "rest", "=", "string", "[", "1", ":", "]", "inside", "=", "u'('", "while", "(", "(", "rest", "!=", "u''", ")", "and", "(", "not", "rest", ".", "startswith", "(", "u')'", ")", ")", ")", ":", "if", "rest", ".", "startswith", "(", "u'('", ")", ":", "(", "part", ",", "rest", ")", "=", "matchBrackets", "(", "rest", ")", "inside", "=", "(", "inside", "+", "part", ")", "else", ":", "inside", "=", "(", "inside", "+", "rest", "[", "0", "]", ")", "rest", "=", "rest", "[", "1", ":", "]", "if", "rest", ".", "startswith", "(", "u')'", ")", ":", "return", "(", "(", "inside", "+", "u')'", ")", ",", "rest", "[", "1", ":", "]", ")", "raise", "AssertionError", "(", "(", "(", "u\"Unmatched bracket in string '\"", "+", "string", ")", "+", "u\"'\"", ")", ")" ]
separate the contents matching the first set of brackets from the rest of the input .
train
false
50,823
def test_rgb_to_hsl_part_4(): assert (rgb_to_hsl(255, 0, 0) == (0, 100, 50)) assert (rgb_to_hsl(255, 51, 0) == (12, 100, 50)) assert (rgb_to_hsl(255, 102, 0) == (24, 100, 50)) assert (rgb_to_hsl(255, 153, 0) == (36, 100, 50)) assert (rgb_to_hsl(255, 204, 0) == (48, 100, 50)) assert (rgb_to_hsl(255, 255, 0) == (60, 100, 50)) assert (rgb_to_hsl(204, 255, 0) == (72, 100, 50)) assert (rgb_to_hsl(153, 255, 0) == (84, 100, 50)) assert (rgb_to_hsl(102, 255, 0) == (96, 100, 50)) assert (rgb_to_hsl(51, 255, 0) == (108, 100, 50)) assert (rgb_to_hsl(0, 255, 0) == (120, 100, 50))
[ "def", "test_rgb_to_hsl_part_4", "(", ")", ":", "assert", "(", "rgb_to_hsl", "(", "255", ",", "0", ",", "0", ")", "==", "(", "0", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "255", ",", "51", ",", "0", ")", "==", "(", "12", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "255", ",", "102", ",", "0", ")", "==", "(", "24", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "255", ",", "153", ",", "0", ")", "==", "(", "36", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "255", ",", "204", ",", "0", ")", "==", "(", "48", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "255", ",", "255", ",", "0", ")", "==", "(", "60", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "204", ",", "255", ",", "0", ")", "==", "(", "72", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "153", ",", "255", ",", "0", ")", "==", "(", "84", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "102", ",", "255", ",", "0", ")", "==", "(", "96", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "51", ",", "255", ",", "0", ")", "==", "(", "108", ",", "100", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "0", ",", "255", ",", "0", ")", "==", "(", "120", ",", "100", ",", "50", ")", ")" ]
test rgb to hsl color function .
train
false
50,825
@core_helper def urls_for_resource(resource): r = getattr(fanstatic_resources, resource) resources = list(r.resources) core = fanstatic_resources.fanstatic_extensions.core f = core.get_needed() lib = r.library root_path = f.library_url(lib) resources = core.sort_resources(resources) if f._bundle: resources = core.bundle_resources(resources) out = [] for resource in resources: if isinstance(resource, core.Bundle): paths = [resource.relpath for resource in resource.resources()] relpath = ';'.join(paths) relpath = (core.BUNDLE_PREFIX + relpath) else: relpath = resource.relpath out.append(('%s/%s' % (root_path, relpath))) return out
[ "@", "core_helper", "def", "urls_for_resource", "(", "resource", ")", ":", "r", "=", "getattr", "(", "fanstatic_resources", ",", "resource", ")", "resources", "=", "list", "(", "r", ".", "resources", ")", "core", "=", "fanstatic_resources", ".", "fanstatic_extensions", ".", "core", "f", "=", "core", ".", "get_needed", "(", ")", "lib", "=", "r", ".", "library", "root_path", "=", "f", ".", "library_url", "(", "lib", ")", "resources", "=", "core", ".", "sort_resources", "(", "resources", ")", "if", "f", ".", "_bundle", ":", "resources", "=", "core", ".", "bundle_resources", "(", "resources", ")", "out", "=", "[", "]", "for", "resource", "in", "resources", ":", "if", "isinstance", "(", "resource", ",", "core", ".", "Bundle", ")", ":", "paths", "=", "[", "resource", ".", "relpath", "for", "resource", "in", "resource", ".", "resources", "(", ")", "]", "relpath", "=", "';'", ".", "join", "(", "paths", ")", "relpath", "=", "(", "core", ".", "BUNDLE_PREFIX", "+", "relpath", ")", "else", ":", "relpath", "=", "resource", ".", "relpath", "out", ".", "append", "(", "(", "'%s/%s'", "%", "(", "root_path", ",", "relpath", ")", ")", ")", "return", "out" ]
returns a list of urls for the resource specified .
train
false
50,826
def test_swap_gate(): swap_gate_matrix = Matrix(((1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 1))) assert (represent(SwapGate(1, 0).decompose(), nqubits=2) == swap_gate_matrix) assert (qapply((SwapGate(1, 3) * Qubit('0010'))) == Qubit('1000')) nqubits = 4 for i in range(nqubits): for j in range(i): assert (represent(SwapGate(i, j), nqubits=nqubits) == represent(SwapGate(i, j).decompose(), nqubits=nqubits))
[ "def", "test_swap_gate", "(", ")", ":", "swap_gate_matrix", "=", "Matrix", "(", "(", "(", "1", ",", "0", ",", "0", ",", "0", ")", ",", "(", "0", ",", "0", ",", "1", ",", "0", ")", ",", "(", "0", ",", "1", ",", "0", ",", "0", ")", ",", "(", "0", ",", "0", ",", "0", ",", "1", ")", ")", ")", "assert", "(", "represent", "(", "SwapGate", "(", "1", ",", "0", ")", ".", "decompose", "(", ")", ",", "nqubits", "=", "2", ")", "==", "swap_gate_matrix", ")", "assert", "(", "qapply", "(", "(", "SwapGate", "(", "1", ",", "3", ")", "*", "Qubit", "(", "'0010'", ")", ")", ")", "==", "Qubit", "(", "'1000'", ")", ")", "nqubits", "=", "4", "for", "i", "in", "range", "(", "nqubits", ")", ":", "for", "j", "in", "range", "(", "i", ")", ":", "assert", "(", "represent", "(", "SwapGate", "(", "i", ",", "j", ")", ",", "nqubits", "=", "nqubits", ")", "==", "represent", "(", "SwapGate", "(", "i", ",", "j", ")", ".", "decompose", "(", ")", ",", "nqubits", "=", "nqubits", ")", ")" ]
test the swap gate .
train
false
50,827
def split_semicolon(line, maxsplit=None): splitted_line = line.split(';') splitted_line_size = len(splitted_line) if ((maxsplit is None) or (0 > maxsplit)): maxsplit = splitted_line_size i = 0 while (i < (splitted_line_size - 1)): ends = splitted_line[i].endswith('\\') if ends: splitted_line[i] = splitted_line[i][:(-1)] if ((ends or (i >= maxsplit)) and (i < (splitted_line_size - 1))): splitted_line[i] = ';'.join([splitted_line[i], splitted_line[(i + 1)]]) del splitted_line[(i + 1)] splitted_line_size -= 1 else: i += 1 return splitted_line
[ "def", "split_semicolon", "(", "line", ",", "maxsplit", "=", "None", ")", ":", "splitted_line", "=", "line", ".", "split", "(", "';'", ")", "splitted_line_size", "=", "len", "(", "splitted_line", ")", "if", "(", "(", "maxsplit", "is", "None", ")", "or", "(", "0", ">", "maxsplit", ")", ")", ":", "maxsplit", "=", "splitted_line_size", "i", "=", "0", "while", "(", "i", "<", "(", "splitted_line_size", "-", "1", ")", ")", ":", "ends", "=", "splitted_line", "[", "i", "]", ".", "endswith", "(", "'\\\\'", ")", "if", "ends", ":", "splitted_line", "[", "i", "]", "=", "splitted_line", "[", "i", "]", "[", ":", "(", "-", "1", ")", "]", "if", "(", "(", "ends", "or", "(", "i", ">=", "maxsplit", ")", ")", "and", "(", "i", "<", "(", "splitted_line_size", "-", "1", ")", ")", ")", ":", "splitted_line", "[", "i", "]", "=", "';'", ".", "join", "(", "[", "splitted_line", "[", "i", "]", ",", "splitted_line", "[", "(", "i", "+", "1", ")", "]", "]", ")", "del", "splitted_line", "[", "(", "i", "+", "1", ")", "]", "splitted_line_size", "-=", "1", "else", ":", "i", "+=", "1", "return", "splitted_line" ]
split a line on semicolons characters but not on the escaped semicolons .
train
false
50,828
def get_item(x, slices): return GetItem(slices)(x)
[ "def", "get_item", "(", "x", ",", "slices", ")", ":", "return", "GetItem", "(", "slices", ")", "(", "x", ")" ]
extract elements from array with specified shape .
train
false
50,829
def read_images(path, sz=None): c = 0 (X, y) = ([], []) for (dirname, dirnames, filenames) in os.walk(path): for subdirname in dirnames: subject_path = os.path.join(dirname, subdirname) for filename in os.listdir(subject_path): try: im = Image.open(os.path.join(subject_path, filename)) im = im.convert('L') if (sz is not None): im = im.resize(sz, Image.ANTIALIAS) X.append(np.asarray(im, dtype=np.uint8)) y.append(c) except IOError as e: print 'I/O error: {0}'.format(e) raise e except: print 'Unexpected error: {0}'.format(sys.exc_info()[0]) raise c = (c + 1) return [X, y]
[ "def", "read_images", "(", "path", ",", "sz", "=", "None", ")", ":", "c", "=", "0", "(", "X", ",", "y", ")", "=", "(", "[", "]", ",", "[", "]", ")", "for", "(", "dirname", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "subdirname", "in", "dirnames", ":", "subject_path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "subdirname", ")", "for", "filename", "in", "os", ".", "listdir", "(", "subject_path", ")", ":", "try", ":", "im", "=", "Image", ".", "open", "(", "os", ".", "path", ".", "join", "(", "subject_path", ",", "filename", ")", ")", "im", "=", "im", ".", "convert", "(", "'L'", ")", "if", "(", "sz", "is", "not", "None", ")", ":", "im", "=", "im", ".", "resize", "(", "sz", ",", "Image", ".", "ANTIALIAS", ")", "X", ".", "append", "(", "np", ".", "asarray", "(", "im", ",", "dtype", "=", "np", ".", "uint8", ")", ")", "y", ".", "append", "(", "c", ")", "except", "IOError", "as", "e", ":", "print", "'I/O error: {0}'", ".", "format", "(", "e", ")", "raise", "e", "except", ":", "print", "'Unexpected error: {0}'", ".", "format", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", "raise", "c", "=", "(", "c", "+", "1", ")", "return", "[", "X", ",", "y", "]" ]
reads the images in a given folder .
train
false
50,830
def degree_mixing_dict(G, x='out', y='in', weight=None, nodes=None, normalized=False): xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) return mixing_dict(xy_iter, normalized=normalized)
[ "def", "degree_mixing_dict", "(", "G", ",", "x", "=", "'out'", ",", "y", "=", "'in'", ",", "weight", "=", "None", ",", "nodes", "=", "None", ",", "normalized", "=", "False", ")", ":", "xy_iter", "=", "node_degree_xy", "(", "G", ",", "x", "=", "x", ",", "y", "=", "y", ",", "nodes", "=", "nodes", ",", "weight", "=", "weight", ")", "return", "mixing_dict", "(", "xy_iter", ",", "normalized", "=", "normalized", ")" ]
return dictionary representation of mixing matrix for degree .
train
false
50,831
def _getTempFileName(): handle = tempfile.NamedTemporaryFile(prefix='test', suffix='.txt', dir='.') filename = handle.name handle.close() return filename
[ "def", "_getTempFileName", "(", ")", ":", "handle", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "'test'", ",", "suffix", "=", "'.txt'", ",", "dir", "=", "'.'", ")", "filename", "=", "handle", ".", "name", "handle", ".", "close", "(", ")", "return", "filename" ]
creates unique file name that starts with test and ends with .
train
false
50,833
def shutdown_signal(signum, frame): LOG.warning('shutting down, got signal %d', signum) shutdown()
[ "def", "shutdown_signal", "(", "signum", ",", "frame", ")", ":", "LOG", ".", "warning", "(", "'shutting down, got signal %d'", ",", "signum", ")", "shutdown", "(", ")" ]
called when we get a signal and need to terminate .
train
false
50,835
def load_countgraph(filename, small=False): if small: countgraph = _SmallCountgraph(1, [1]) countgraph.load(filename) else: countgraph = _Countgraph(1, [1]) countgraph.load(filename) return countgraph
[ "def", "load_countgraph", "(", "filename", ",", "small", "=", "False", ")", ":", "if", "small", ":", "countgraph", "=", "_SmallCountgraph", "(", "1", ",", "[", "1", "]", ")", "countgraph", ".", "load", "(", "filename", ")", "else", ":", "countgraph", "=", "_Countgraph", "(", "1", ",", "[", "1", "]", ")", "countgraph", ".", "load", "(", "filename", ")", "return", "countgraph" ]
load a countgraph object from the given filename and return it .
train
false
50,836
def askinteger(title, prompt, **kw): d = _QueryInteger(title, prompt, **kw) return d.result
[ "def", "askinteger", "(", "title", ",", "prompt", ",", "**", "kw", ")", ":", "d", "=", "_QueryInteger", "(", "title", ",", "prompt", ",", "**", "kw", ")", "return", "d", ".", "result" ]
get an integer from the user arguments: title -- the dialog title prompt -- the label text **kw -- see simpledialog class return value is an integer .
train
false
50,837
def _process_worker(call_queue, result_queue): while True: call_item = call_queue.get(block=True) if (call_item is None): result_queue.put(os.getpid()) return try: r = call_item.fn(*call_item.args, **call_item.kwargs) except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r))
[ "def", "_process_worker", "(", "call_queue", ",", "result_queue", ")", ":", "while", "True", ":", "call_item", "=", "call_queue", ".", "get", "(", "block", "=", "True", ")", "if", "(", "call_item", "is", "None", ")", ":", "result_queue", ".", "put", "(", "os", ".", "getpid", "(", ")", ")", "return", "try", ":", "r", "=", "call_item", ".", "fn", "(", "*", "call_item", ".", "args", ",", "**", "call_item", ".", "kwargs", ")", "except", "BaseException", "as", "e", ":", "result_queue", ".", "put", "(", "_ResultItem", "(", "call_item", ".", "work_id", ",", "exception", "=", "e", ")", ")", "else", ":", "result_queue", ".", "put", "(", "_ResultItem", "(", "call_item", ".", "work_id", ",", "result", "=", "r", ")", ")" ]
evaluates calls from call_queue and places the results in result_queue .
train
true
50,838
def _netstat_route_netbsd(): ret = [] cmd = 'netstat -f inet -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({'addr_family': 'inet', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[3], 'interface': comps[6]}) cmd = 'netstat -f inet6 -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({'addr_family': 'inet6', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[3], 'interface': comps[6]}) return ret
[ "def", "_netstat_route_netbsd", "(", ")", ":", "ret", "=", "[", "]", "cmd", "=", "'netstat -f inet -rn | tail -n+5'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "comps", "=", "line", ".", "split", "(", ")", "ret", ".", "append", "(", "{", "'addr_family'", ":", "'inet'", ",", "'destination'", ":", "comps", "[", "0", "]", ",", "'gateway'", ":", "comps", "[", "1", "]", ",", "'netmask'", ":", "''", ",", "'flags'", ":", "comps", "[", "3", "]", ",", "'interface'", ":", "comps", "[", "6", "]", "}", ")", "cmd", "=", "'netstat -f inet6 -rn | tail -n+5'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "comps", "=", "line", ".", "split", "(", ")", "ret", ".", "append", "(", "{", "'addr_family'", ":", "'inet6'", ",", "'destination'", ":", "comps", "[", "0", "]", ",", "'gateway'", ":", "comps", "[", "1", "]", ",", "'netmask'", ":", "''", ",", "'flags'", ":", "comps", "[", "3", "]", ",", "'interface'", ":", "comps", "[", "6", "]", "}", ")", "return", "ret" ]
return netstat routing information for netbsd .
train
true
50,842
def jackknife_resampling(data): n = data.shape[0] assert (n > 0), u'data must contain at least one measurement' resamples = np.empty([n, (n - 1)]) for i in range(n): resamples[i] = np.delete(data, i) return resamples
[ "def", "jackknife_resampling", "(", "data", ")", ":", "n", "=", "data", ".", "shape", "[", "0", "]", "assert", "(", "n", ">", "0", ")", ",", "u'data must contain at least one measurement'", "resamples", "=", "np", ".", "empty", "(", "[", "n", ",", "(", "n", "-", "1", ")", "]", ")", "for", "i", "in", "range", "(", "n", ")", ":", "resamples", "[", "i", "]", "=", "np", ".", "delete", "(", "data", ",", "i", ")", "return", "resamples" ]
performs jackknife resampling on numpy arrays .
train
false
50,843
def _retrieve_location_from_ssdp(response): parsed_headers = re.findall('(?P<name>.*?): (?P<value>.*?)\\r\\n', response) header_locations = [header[1] for header in parsed_headers if (header[0].lower() == 'location')] if (len(header_locations) < 1): raise IGDError('IGD response does not contain a "location" header.') return urlparse(header_locations[0])
[ "def", "_retrieve_location_from_ssdp", "(", "response", ")", ":", "parsed_headers", "=", "re", ".", "findall", "(", "'(?P<name>.*?): (?P<value>.*?)\\\\r\\\\n'", ",", "response", ")", "header_locations", "=", "[", "header", "[", "1", "]", "for", "header", "in", "parsed_headers", "if", "(", "header", "[", "0", "]", ".", "lower", "(", ")", "==", "'location'", ")", "]", "if", "(", "len", "(", "header_locations", ")", "<", "1", ")", ":", "raise", "IGDError", "(", "'IGD response does not contain a \"location\" header.'", ")", "return", "urlparse", "(", "header_locations", "[", "0", "]", ")" ]
parse raw http response to retrieve the upnp location header and return a parseresult object .
train
false
50,844
def isfinite(arr): return (np.isfinite(np.max(arr)) and np.isfinite(np.min(arr)))
[ "def", "isfinite", "(", "arr", ")", ":", "return", "(", "np", ".", "isfinite", "(", "np", ".", "max", "(", "arr", ")", ")", "and", "np", ".", "isfinite", "(", "np", ".", "min", "(", "arr", ")", ")", ")" ]
test whether a numpy .
train
false
50,846
def set_dhcp_ip(iface): cmd = ['netsh', 'interface', 'ip', 'set', 'address', iface, 'dhcp'] __salt__['cmd.run'](cmd, python_shell=False) return {'Interface': iface, 'DHCP enabled': 'Yes'}
[ "def", "set_dhcp_ip", "(", "iface", ")", ":", "cmd", "=", "[", "'netsh'", ",", "'interface'", ",", "'ip'", ",", "'set'", ",", "'address'", ",", "iface", ",", "'dhcp'", "]", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "{", "'Interface'", ":", "iface", ",", "'DHCP enabled'", ":", "'Yes'", "}" ]
set windows nic to get ip from dhcp cli example: .
train
false
50,847
def rev_list(ref, count=None, repo_dir=None): assert (not ref.startswith('-')) opts = [] if count: opts += ['-n', str(atoi(count))] argv = ((['git', 'rev-list', '--pretty=format:%at'] + opts) + [ref, '--']) p = subprocess.Popen(argv, preexec_fn=_gitenv(repo_dir), stdout=subprocess.PIPE) commit = None for row in p.stdout: s = row.strip() if s.startswith('commit '): commit = s[7:].decode('hex') else: date = int(s) (yield (date, commit)) rv = p.wait() if rv: raise GitError, ('git rev-list returned error %d' % rv)
[ "def", "rev_list", "(", "ref", ",", "count", "=", "None", ",", "repo_dir", "=", "None", ")", ":", "assert", "(", "not", "ref", ".", "startswith", "(", "'-'", ")", ")", "opts", "=", "[", "]", "if", "count", ":", "opts", "+=", "[", "'-n'", ",", "str", "(", "atoi", "(", "count", ")", ")", "]", "argv", "=", "(", "(", "[", "'git'", ",", "'rev-list'", ",", "'--pretty=format:%at'", "]", "+", "opts", ")", "+", "[", "ref", ",", "'--'", "]", ")", "p", "=", "subprocess", ".", "Popen", "(", "argv", ",", "preexec_fn", "=", "_gitenv", "(", "repo_dir", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "commit", "=", "None", "for", "row", "in", "p", ".", "stdout", ":", "s", "=", "row", ".", "strip", "(", ")", "if", "s", ".", "startswith", "(", "'commit '", ")", ":", "commit", "=", "s", "[", "7", ":", "]", ".", "decode", "(", "'hex'", ")", "else", ":", "date", "=", "int", "(", "s", ")", "(", "yield", "(", "date", ",", "commit", ")", ")", "rv", "=", "p", ".", "wait", "(", ")", "if", "rv", ":", "raise", "GitError", ",", "(", "'git rev-list returned error %d'", "%", "rv", ")" ]
generate a list of reachable commits in reverse chronological order .
train
false
50,851
def _get_events(): return read_events(event_name)
[ "def", "_get_events", "(", ")", ":", "return", "read_events", "(", "event_name", ")" ]
get events .
train
false
50,852
def parse_body_arguments(content_type, body, arguments, files, headers=None): if (headers and ('Content-Encoding' in headers)): gen_log.warning('Unsupported Content-Encoding: %s', headers['Content-Encoding']) return if content_type.startswith('application/x-www-form-urlencoded'): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for (name, values) in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith('multipart/form-data'): try: fields = content_type.split(';') for field in fields: (k, sep, v) = field.strip().partition('=') if ((k == 'boundary') and v): parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError('multipart boundary not found') except Exception as e: gen_log.warning('Invalid multipart/form-data: %s', e)
[ "def", "parse_body_arguments", "(", "content_type", ",", "body", ",", "arguments", ",", "files", ",", "headers", "=", "None", ")", ":", "if", "(", "headers", "and", "(", "'Content-Encoding'", "in", "headers", ")", ")", ":", "gen_log", ".", "warning", "(", "'Unsupported Content-Encoding: %s'", ",", "headers", "[", "'Content-Encoding'", "]", ")", "return", "if", "content_type", ".", "startswith", "(", "'application/x-www-form-urlencoded'", ")", ":", "try", ":", "uri_arguments", "=", "parse_qs_bytes", "(", "native_str", "(", "body", ")", ",", "keep_blank_values", "=", "True", ")", "except", "Exception", "as", "e", ":", "gen_log", ".", "warning", "(", "'Invalid x-www-form-urlencoded body: %s'", ",", "e", ")", "uri_arguments", "=", "{", "}", "for", "(", "name", ",", "values", ")", "in", "uri_arguments", ".", "items", "(", ")", ":", "if", "values", ":", "arguments", ".", "setdefault", "(", "name", ",", "[", "]", ")", ".", "extend", "(", "values", ")", "elif", "content_type", ".", "startswith", "(", "'multipart/form-data'", ")", ":", "try", ":", "fields", "=", "content_type", ".", "split", "(", "';'", ")", "for", "field", "in", "fields", ":", "(", "k", ",", "sep", ",", "v", ")", "=", "field", ".", "strip", "(", ")", ".", "partition", "(", "'='", ")", "if", "(", "(", "k", "==", "'boundary'", ")", "and", "v", ")", ":", "parse_multipart_form_data", "(", "utf8", "(", "v", ")", ",", "body", ",", "arguments", ",", "files", ")", "break", "else", ":", "raise", "ValueError", "(", "'multipart boundary not found'", ")", "except", "Exception", "as", "e", ":", "gen_log", ".", "warning", "(", "'Invalid multipart/form-data: %s'", ",", "e", ")" ]
parses a form request body .
train
true
50,855
def _bytes_for_block(content): content = ''.join(content.split('\n')[1:(-1)]) return base64.b64decode(stem.util.str_tools._to_bytes(content))
[ "def", "_bytes_for_block", "(", "content", ")", ":", "content", "=", "''", ".", "join", "(", "content", ".", "split", "(", "'\\n'", ")", "[", "1", ":", "(", "-", "1", ")", "]", ")", "return", "base64", ".", "b64decode", "(", "stem", ".", "util", ".", "str_tools", ".", "_to_bytes", "(", "content", ")", ")" ]
provides the base64 decoded content of a pgp-style block .
train
false
50,856
def build_filename(instance, filename): now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') name = hashlib.md5(('%s' % now)).hexdigest() ext = os.path.splitext(filename) return os.path.join(('%s/%s' % (instance._meta.app_label, instance._meta.module_name)), ('%s%s' % (name, ext[1])))
[ "def", "build_filename", "(", "instance", ",", "filename", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "name", "=", "hashlib", ".", "md5", "(", "(", "'%s'", "%", "now", ")", ")", ".", "hexdigest", "(", ")", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "return", "os", ".", "path", ".", "join", "(", "(", "'%s/%s'", "%", "(", "instance", ".", "_meta", ".", "app_label", ",", "instance", ".", "_meta", ".", "module_name", ")", ")", ",", "(", "'%s%s'", "%", "(", "name", ",", "ext", "[", "1", "]", ")", ")", ")" ]
converts an image filename to a hash .
train
false
50,858
def __binskim(name, bin_an_dic, run_local=False, app_dir=None): print '[INFO] Running binskim.' if run_local: bin_path = os.path.join(app_dir, bin_an_dic['bin']) if platform.machine().endswith('64'): binskim_path = config['binskim']['file_x64'] else: binskim_path = config['binskim']['file_x86'] command = 'analyze' path = bin_path output_p = '-o' output_d = (bin_path + '_binskim') policy_p = '--config' policy_d = 'default' params = [binskim_path, command, path, output_p, output_d, policy_p, policy_d] pipe = subprocess.Popen(subprocess.list2cmdline(params)) pipe.wait() out_file = open(output_d) output = json.loads(out_file.read()) else: response = proxy.binskim(name, _get_token()) output = json.loads(response) bin_an_dic = __parse_binskim(bin_an_dic, output) return bin_an_dic
[ "def", "__binskim", "(", "name", ",", "bin_an_dic", ",", "run_local", "=", "False", ",", "app_dir", "=", "None", ")", ":", "print", "'[INFO] Running binskim.'", "if", "run_local", ":", "bin_path", "=", "os", ".", "path", ".", "join", "(", "app_dir", ",", "bin_an_dic", "[", "'bin'", "]", ")", "if", "platform", ".", "machine", "(", ")", ".", "endswith", "(", "'64'", ")", ":", "binskim_path", "=", "config", "[", "'binskim'", "]", "[", "'file_x64'", "]", "else", ":", "binskim_path", "=", "config", "[", "'binskim'", "]", "[", "'file_x86'", "]", "command", "=", "'analyze'", "path", "=", "bin_path", "output_p", "=", "'-o'", "output_d", "=", "(", "bin_path", "+", "'_binskim'", ")", "policy_p", "=", "'--config'", "policy_d", "=", "'default'", "params", "=", "[", "binskim_path", ",", "command", ",", "path", ",", "output_p", ",", "output_d", ",", "policy_p", ",", "policy_d", "]", "pipe", "=", "subprocess", ".", "Popen", "(", "subprocess", ".", "list2cmdline", "(", "params", ")", ")", "pipe", ".", "wait", "(", ")", "out_file", "=", "open", "(", "output_d", ")", "output", "=", "json", ".", "loads", "(", "out_file", ".", "read", "(", ")", ")", "else", ":", "response", "=", "proxy", ".", "binskim", "(", "name", ",", "_get_token", "(", ")", ")", "output", "=", "json", ".", "loads", "(", "response", ")", "bin_an_dic", "=", "__parse_binskim", "(", "bin_an_dic", ",", "output", ")", "return", "bin_an_dic" ]
run the binskim analysis .
train
false
50,859
def scrubStr(str): groups = re.findall(GROUP_RGX, str) for group in groups: if group[0]: sub = group[0] else: sub = group[1].split(':')[1] sub = sub.replace('-', ' ') str = re.sub(SUB_RGX, sub, str, 1) return str
[ "def", "scrubStr", "(", "str", ")", ":", "groups", "=", "re", ".", "findall", "(", "GROUP_RGX", ",", "str", ")", "for", "group", "in", "groups", ":", "if", "group", "[", "0", "]", ":", "sub", "=", "group", "[", "0", "]", "else", ":", "sub", "=", "group", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "1", "]", "sub", "=", "sub", ".", "replace", "(", "'-'", ",", "' '", ")", "str", "=", "re", ".", "sub", "(", "SUB_RGX", ",", "sub", ",", "str", ",", "1", ")", "return", "str" ]
the purpose of this function is to scrub the weird template mark-up out of strings that veekun is using for their pokedex .
train
false
50,860
def attacks(pos): row = filter((lambda r: (pos in r)), posrows) col = filter((lambda c: (pos in c)), poscols) dia = filter((lambda d: (pos in d)), posdiag) gai = filter((lambda g: (pos in g)), posgaid) assert (len(row) == len(col) == len(dia) == len(gai) == 1) return (frozenset(row[0]), frozenset(col[0]), frozenset(dia[0]), frozenset(gai[0]))
[ "def", "attacks", "(", "pos", ")", ":", "row", "=", "filter", "(", "(", "lambda", "r", ":", "(", "pos", "in", "r", ")", ")", ",", "posrows", ")", "col", "=", "filter", "(", "(", "lambda", "c", ":", "(", "pos", "in", "c", ")", ")", ",", "poscols", ")", "dia", "=", "filter", "(", "(", "lambda", "d", ":", "(", "pos", "in", "d", ")", ")", ",", "posdiag", ")", "gai", "=", "filter", "(", "(", "lambda", "g", ":", "(", "pos", "in", "g", ")", ")", ",", "posgaid", ")", "assert", "(", "len", "(", "row", ")", "==", "len", "(", "col", ")", "==", "len", "(", "dia", ")", "==", "len", "(", "gai", ")", "==", "1", ")", "return", "(", "frozenset", "(", "row", "[", "0", "]", ")", ",", "frozenset", "(", "col", "[", "0", "]", ")", ",", "frozenset", "(", "dia", "[", "0", "]", ")", ",", "frozenset", "(", "gai", "[", "0", "]", ")", ")" ]
all attacked positions .
train
false
50,861
@Profiler.profile def test_orm_commit(n): for i in range(n): session = Session(bind=engine) session.add(Customer(name=('customer name %d' % i), description=('customer description %d' % i))) session.commit()
[ "@", "Profiler", ".", "profile", "def", "test_orm_commit", "(", "n", ")", ":", "for", "i", "in", "range", "(", "n", ")", ":", "session", "=", "Session", "(", "bind", "=", "engine", ")", "session", ".", "add", "(", "Customer", "(", "name", "=", "(", "'customer name %d'", "%", "i", ")", ",", "description", "=", "(", "'customer description %d'", "%", "i", ")", ")", ")", "session", ".", "commit", "(", ")" ]
individual insert/commit pairs via the orm .
train
false
50,863
def list_problem_responses(course_key, problem_location): problem_key = UsageKey.from_string(problem_location) run = problem_key.run if (not run): problem_key = course_key.make_usage_key_from_deprecated_string(problem_location) if (problem_key.course_key != course_key): return [] smdat = StudentModule.objects.filter(course_id=course_key, module_state_key=problem_key) smdat = smdat.order_by('student') return [{'username': response.student.username, 'state': response.state} for response in smdat]
[ "def", "list_problem_responses", "(", "course_key", ",", "problem_location", ")", ":", "problem_key", "=", "UsageKey", ".", "from_string", "(", "problem_location", ")", "run", "=", "problem_key", ".", "run", "if", "(", "not", "run", ")", ":", "problem_key", "=", "course_key", ".", "make_usage_key_from_deprecated_string", "(", "problem_location", ")", "if", "(", "problem_key", ".", "course_key", "!=", "course_key", ")", ":", "return", "[", "]", "smdat", "=", "StudentModule", ".", "objects", ".", "filter", "(", "course_id", "=", "course_key", ",", "module_state_key", "=", "problem_key", ")", "smdat", "=", "smdat", ".", "order_by", "(", "'student'", ")", "return", "[", "{", "'username'", ":", "response", ".", "student", ".", "username", ",", "'state'", ":", "response", ".", "state", "}", "for", "response", "in", "smdat", "]" ]
return responses to a given problem as a dict .
train
false
50,864
def grab_tree(api_handle, item): settings = api_handle.settings() results = [item] parent = item.get_parent() while (parent is not None): results.append(parent) parent = parent.get_parent() results.append(settings) return results
[ "def", "grab_tree", "(", "api_handle", ",", "item", ")", ":", "settings", "=", "api_handle", ".", "settings", "(", ")", "results", "=", "[", "item", "]", "parent", "=", "item", ".", "get_parent", "(", ")", "while", "(", "parent", "is", "not", "None", ")", ":", "results", ".", "append", "(", "parent", ")", "parent", "=", "parent", ".", "get_parent", "(", ")", "results", ".", "append", "(", "settings", ")", "return", "results" ]
climb the tree and get every node .
train
false
50,866
def lowest(logging=logging): return _set_priority('Lowest', logging=logging)
[ "def", "lowest", "(", "logging", "=", "logging", ")", ":", "return", "_set_priority", "(", "'Lowest'", ",", "logging", "=", "logging", ")" ]
process will only execute when system is idle .
train
false
50,867
def _elem_from_scoperef(scoperef): elem = scoperef[0] for lname in scoperef[1]: try: elem = elem.names[lname] except KeyError: return None return elem
[ "def", "_elem_from_scoperef", "(", "scoperef", ")", ":", "elem", "=", "scoperef", "[", "0", "]", "for", "lname", "in", "scoperef", "[", "1", "]", ":", "try", ":", "elem", "=", "elem", ".", "names", "[", "lname", "]", "except", "KeyError", ":", "return", "None", "return", "elem" ]
a scoperef is .
train
false
50,868
def _find_cached_image(session, image_id, sr_ref): name_label = _get_image_vdi_label(image_id) recs = session.call_xenapi('VDI.get_all_records_where', ('field "name__label"="%s"' % name_label)) number_found = len(recs) if (number_found > 0): if (number_found > 1): LOG.warning(_LW('Multiple base images for image: %s'), image_id) return list(recs.keys())[0]
[ "def", "_find_cached_image", "(", "session", ",", "image_id", ",", "sr_ref", ")", ":", "name_label", "=", "_get_image_vdi_label", "(", "image_id", ")", "recs", "=", "session", ".", "call_xenapi", "(", "'VDI.get_all_records_where'", ",", "(", "'field \"name__label\"=\"%s\"'", "%", "name_label", ")", ")", "number_found", "=", "len", "(", "recs", ")", "if", "(", "number_found", ">", "0", ")", ":", "if", "(", "number_found", ">", "1", ")", ":", "LOG", ".", "warning", "(", "_LW", "(", "'Multiple base images for image: %s'", ")", ",", "image_id", ")", "return", "list", "(", "recs", ".", "keys", "(", ")", ")", "[", "0", "]" ]
returns the vdi-ref of the cached image .
train
false
50,869
def gegenbauer(n, alpha, monic=False): base = jacobi(n, (alpha - 0.5), (alpha - 0.5), monic=monic) if monic: return base factor = (((_gam(((2 * alpha) + n)) * _gam((alpha + 0.5))) / _gam((2 * alpha))) / _gam(((alpha + 0.5) + n))) base._scale(factor) base.__dict__['_eval_func'] = (lambda x: eval_gegenbauer(float(n), alpha, x)) return base
[ "def", "gegenbauer", "(", "n", ",", "alpha", ",", "monic", "=", "False", ")", ":", "base", "=", "jacobi", "(", "n", ",", "(", "alpha", "-", "0.5", ")", ",", "(", "alpha", "-", "0.5", ")", ",", "monic", "=", "monic", ")", "if", "monic", ":", "return", "base", "factor", "=", "(", "(", "(", "_gam", "(", "(", "(", "2", "*", "alpha", ")", "+", "n", ")", ")", "*", "_gam", "(", "(", "alpha", "+", "0.5", ")", ")", ")", "/", "_gam", "(", "(", "2", "*", "alpha", ")", ")", ")", "/", "_gam", "(", "(", "(", "alpha", "+", "0.5", ")", "+", "n", ")", ")", ")", "base", ".", "_scale", "(", "factor", ")", "base", ".", "__dict__", "[", "'_eval_func'", "]", "=", "(", "lambda", "x", ":", "eval_gegenbauer", "(", "float", "(", "n", ")", ",", "alpha", ",", "x", ")", ")", "return", "base" ]
gegenbauer polynomial .
train
false
50,872
def _iptables_cmd(family='ipv4'): if (family == 'ipv6'): return salt.utils.which('ip6tables') else: return salt.utils.which('iptables')
[ "def", "_iptables_cmd", "(", "family", "=", "'ipv4'", ")", ":", "if", "(", "family", "==", "'ipv6'", ")", ":", "return", "salt", ".", "utils", ".", "which", "(", "'ip6tables'", ")", "else", ":", "return", "salt", ".", "utils", ".", "which", "(", "'iptables'", ")" ]
return correct command based on the family .
train
true
50,873
def check_version_info(conn, version_table, expected_version): version_from_table = conn.execute(sa.select((version_table.c.version,))).scalar() if (version_from_table is None): version_from_table = 0 if (version_from_table != expected_version): raise AssetDBVersionError(db_version=version_from_table, expected_version=expected_version)
[ "def", "check_version_info", "(", "conn", ",", "version_table", ",", "expected_version", ")", ":", "version_from_table", "=", "conn", ".", "execute", "(", "sa", ".", "select", "(", "(", "version_table", ".", "c", ".", "version", ",", ")", ")", ")", ".", "scalar", "(", ")", "if", "(", "version_from_table", "is", "None", ")", ":", "version_from_table", "=", "0", "if", "(", "version_from_table", "!=", "expected_version", ")", ":", "raise", "AssetDBVersionError", "(", "db_version", "=", "version_from_table", ",", "expected_version", "=", "expected_version", ")" ]
checks for a version value in the version table .
train
true
50,876
def get_scanner(hass, config): scanner = AsusWrtDeviceScanner(config[DOMAIN]) return (scanner if scanner.success_init else None)
[ "def", "get_scanner", "(", "hass", ",", "config", ")", ":", "scanner", "=", "AsusWrtDeviceScanner", "(", "config", "[", "DOMAIN", "]", ")", "return", "(", "scanner", "if", "scanner", ".", "success_init", "else", "None", ")" ]
validate the configuration and return an actiontec scanner .
train
false
50,877
def asquote(astr): astr = astr.replace('"', '" & quote & "') return '"{}"'.format(astr)
[ "def", "asquote", "(", "astr", ")", ":", "astr", "=", "astr", ".", "replace", "(", "'\"'", ",", "'\" & quote & \"'", ")", "return", "'\"{}\"'", ".", "format", "(", "astr", ")" ]
return the applescript equivalent of the given string .
train
false
50,878
@control_command() def disable_events(state): dispatcher = state.consumer.event_dispatcher if (u'task' in dispatcher.groups): dispatcher.groups.discard(u'task') logger.info(u'Events of group {task} disabled by remote.') return ok(u'task events disabled') return ok(u'task events already disabled')
[ "@", "control_command", "(", ")", "def", "disable_events", "(", "state", ")", ":", "dispatcher", "=", "state", ".", "consumer", ".", "event_dispatcher", "if", "(", "u'task'", "in", "dispatcher", ".", "groups", ")", ":", "dispatcher", ".", "groups", ".", "discard", "(", "u'task'", ")", "logger", ".", "info", "(", "u'Events of group {task} disabled by remote.'", ")", "return", "ok", "(", "u'task events disabled'", ")", "return", "ok", "(", "u'task events already disabled'", ")" ]
tell worker(s) to stop sending task-related events .
train
false
50,879
def to_tuple(x): if (not isinstance(x, (tuple, list))): return x return tuple(map(to_tuple, x))
[ "def", "to_tuple", "(", "x", ")", ":", "if", "(", "not", "isinstance", "(", "x", ",", "(", "tuple", ",", "list", ")", ")", ")", ":", "return", "x", "return", "tuple", "(", "map", "(", "to_tuple", ",", "x", ")", ")" ]
converts lists to tuples .
train
false
50,880
def vm_detach(name, kwargs=None, call=None): if (call != 'action'): raise SaltCloudSystemExit('The vm_detach action must be called with -a or --action.') if (kwargs is None): kwargs = {} disk_id = kwargs.get('disk_id', None) if (disk_id is None): raise SaltCloudSystemExit("The vm_detach function requires a 'disk_id' to be provided.") (server, user, password) = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.detach(auth, vm_id, int(disk_id)) data = {'action': 'vm.detach', 'detached': response[0], 'vm_id': response[1], 'error_code': response[2]} return data
[ "def", "vm_detach", "(", "name", ",", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "!=", "'action'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The vm_detach action must be called with -a or --action.'", ")", "if", "(", "kwargs", "is", "None", ")", ":", "kwargs", "=", "{", "}", "disk_id", "=", "kwargs", ".", "get", "(", "'disk_id'", ",", "None", ")", "if", "(", "disk_id", "is", "None", ")", ":", "raise", "SaltCloudSystemExit", "(", "\"The vm_detach function requires a 'disk_id' to be provided.\"", ")", "(", "server", ",", "user", ",", "password", ")", "=", "_get_xml_rpc", "(", ")", "auth", "=", "':'", ".", "join", "(", "[", "user", ",", "password", "]", ")", "vm_id", "=", "int", "(", "get_vm_id", "(", "kwargs", "=", "{", "'name'", ":", "name", "}", ")", ")", "response", "=", "server", ".", "one", ".", "vm", ".", "detach", "(", "auth", ",", "vm_id", ",", "int", "(", "disk_id", ")", ")", "data", "=", "{", "'action'", ":", "'vm.detach'", ",", "'detached'", ":", "response", "[", "0", "]", ",", "'vm_id'", ":", "response", "[", "1", "]", ",", "'error_code'", ":", "response", "[", "2", "]", "}", "return", "data" ]
detaches a disk from a virtual machine .
train
true
50,881
def send_mail_raise_smtp(messages): raise SMTPRecipientsRefused(recipients=messages[0].recipients())
[ "def", "send_mail_raise_smtp", "(", "messages", ")", ":", "raise", "SMTPRecipientsRefused", "(", "recipients", "=", "messages", "[", "0", "]", ".", "recipients", "(", ")", ")" ]
patch email_utils .
train
false
50,884
def fullmatch(pattern, string, flags=0): return _compile(pattern, flags).fullmatch(string)
[ "def", "fullmatch", "(", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "return", "_compile", "(", "pattern", ",", "flags", ")", ".", "fullmatch", "(", "string", ")" ]
try to apply the pattern against all of the string .
train
false
50,885
def Column(*args, **kw): test_opts = dict([(k, kw.pop(k)) for k in list(kw) if k.startswith('test_')]) if config.requirements.foreign_key_ddl.predicate(config): args = [arg for arg in args if (not isinstance(arg, schema.ForeignKey))] col = schema.Column(*args, **kw) if (('test_needs_autoincrement' in test_opts) and kw.get('primary_key', False)): col.info['test_needs_autoincrement'] = True if exclusions.against(config._current, 'firebird', 'oracle'): def add_seq(c, tbl): c._init_items(schema.Sequence(_truncate_name(config.db.dialect, (((tbl.name + '_') + c.name) + '_seq')), optional=True)) event.listen(col, 'after_parent_attach', add_seq, propagate=True) return col
[ "def", "Column", "(", "*", "args", ",", "**", "kw", ")", ":", "test_opts", "=", "dict", "(", "[", "(", "k", ",", "kw", ".", "pop", "(", "k", ")", ")", "for", "k", "in", "list", "(", "kw", ")", "if", "k", ".", "startswith", "(", "'test_'", ")", "]", ")", "if", "config", ".", "requirements", ".", "foreign_key_ddl", ".", "predicate", "(", "config", ")", ":", "args", "=", "[", "arg", "for", "arg", "in", "args", "if", "(", "not", "isinstance", "(", "arg", ",", "schema", ".", "ForeignKey", ")", ")", "]", "col", "=", "schema", ".", "Column", "(", "*", "args", ",", "**", "kw", ")", "if", "(", "(", "'test_needs_autoincrement'", "in", "test_opts", ")", "and", "kw", ".", "get", "(", "'primary_key'", ",", "False", ")", ")", ":", "col", ".", "info", "[", "'test_needs_autoincrement'", "]", "=", "True", "if", "exclusions", ".", "against", "(", "config", ".", "_current", ",", "'firebird'", ",", "'oracle'", ")", ":", "def", "add_seq", "(", "c", ",", "tbl", ")", ":", "c", ".", "_init_items", "(", "schema", ".", "Sequence", "(", "_truncate_name", "(", "config", ".", "db", ".", "dialect", ",", "(", "(", "(", "tbl", ".", "name", "+", "'_'", ")", "+", "c", ".", "name", ")", "+", "'_seq'", ")", ")", ",", "optional", "=", "True", ")", ")", "event", ".", "listen", "(", "col", ",", "'after_parent_attach'", ",", "add_seq", ",", "propagate", "=", "True", ")", "return", "col" ]
a schema .
train
false
50,886
def get_model_and_form_class(model, form_class): if form_class: return (form_class._meta.model, form_class) if model: tmp_model = model class Meta: model = tmp_model class_name = (model.__name__ + 'Form') form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta}) return (model, form_class) raise GenericViewError('Generic view must be called with either a model or form_class argument.')
[ "def", "get_model_and_form_class", "(", "model", ",", "form_class", ")", ":", "if", "form_class", ":", "return", "(", "form_class", ".", "_meta", ".", "model", ",", "form_class", ")", "if", "model", ":", "tmp_model", "=", "model", "class", "Meta", ":", "model", "=", "tmp_model", "class_name", "=", "(", "model", ".", "__name__", "+", "'Form'", ")", "form_class", "=", "ModelFormMetaclass", "(", "class_name", ",", "(", "ModelForm", ",", ")", ",", "{", "'Meta'", ":", "Meta", "}", ")", "return", "(", "model", ",", "form_class", ")", "raise", "GenericViewError", "(", "'Generic view must be called with either a model or form_class argument.'", ")" ]
returns a model and form class based on the model and form_class parameters that were passed to the generic view .
train
true
50,887
def validate_campaign(campaign): return (campaign and (campaign in campaigns.get_campaigns()))
[ "def", "validate_campaign", "(", "campaign", ")", ":", "return", "(", "campaign", "and", "(", "campaign", "in", "campaigns", ".", "get_campaigns", "(", ")", ")", ")" ]
non-view helper function that validates campaign .
train
false
50,888
def _encode_quopri_mhtml(msg): orig = msg.get_payload(decode=True) encdata = quopri.encodestring(orig, quotetabs=False) msg.set_payload(encdata) msg['Content-Transfer-Encoding'] = 'quoted-printable'
[ "def", "_encode_quopri_mhtml", "(", "msg", ")", ":", "orig", "=", "msg", ".", "get_payload", "(", "decode", "=", "True", ")", "encdata", "=", "quopri", ".", "encodestring", "(", "orig", ",", "quotetabs", "=", "False", ")", "msg", ".", "set_payload", "(", "encdata", ")", "msg", "[", "'Content-Transfer-Encoding'", "]", "=", "'quoted-printable'" ]
encode the messages payload in quoted-printable .
train
false
50,893
def heartbeat_expires(timestamp, freq=60, expire_window=HEARTBEAT_EXPIRE_WINDOW, Decimal=Decimal, float=float, isinstance=isinstance): freq = (float(freq) if isinstance(freq, Decimal) else freq) if isinstance(timestamp, Decimal): timestamp = float(timestamp) return (timestamp + (freq * (expire_window / 100.0)))
[ "def", "heartbeat_expires", "(", "timestamp", ",", "freq", "=", "60", ",", "expire_window", "=", "HEARTBEAT_EXPIRE_WINDOW", ",", "Decimal", "=", "Decimal", ",", "float", "=", "float", ",", "isinstance", "=", "isinstance", ")", ":", "freq", "=", "(", "float", "(", "freq", ")", "if", "isinstance", "(", "freq", ",", "Decimal", ")", "else", "freq", ")", "if", "isinstance", "(", "timestamp", ",", "Decimal", ")", ":", "timestamp", "=", "float", "(", "timestamp", ")", "return", "(", "timestamp", "+", "(", "freq", "*", "(", "expire_window", "/", "100.0", ")", ")", ")" ]
return time when heartbeat expires .
train
false
50,894
def _children_with_tags(element, tags): return itertools.chain(*(_children_with_tag(element, tag) for tag in tags))
[ "def", "_children_with_tags", "(", "element", ",", "tags", ")", ":", "return", "itertools", ".", "chain", "(", "*", "(", "_children_with_tag", "(", "element", ",", "tag", ")", "for", "tag", "in", "tags", ")", ")" ]
returns child elements of the given element whose tag is in a given list .
train
false