id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
38,168
def upload_file_to_s3_by_job_id(file_path, content_type='text/html', extra_message=''): s3_filename = join(job_id, file_path) return upload_file_to_s3(file_path, s3_filename, content_type, extra_message)
[ "def", "upload_file_to_s3_by_job_id", "(", "file_path", ",", "content_type", "=", "'text/html'", ",", "extra_message", "=", "''", ")", ":", "s3_filename", "=", "join", "(", "job_id", ",", "file_path", ")", "return", "upload_file_to_s3", "(", "file_path", ",", "s3_filename", ",", "content_type", ",", "extra_message", ")" ]
uploads a file to bokeh-travis s3 bucket under a job_id folder .
train
false
38,170
def remote_pm(conn): import pdb pdb.post_mortem(conn.modules.sys.last_traceback)
[ "def", "remote_pm", "(", "conn", ")", ":", "import", "pdb", "pdb", ".", "post_mortem", "(", "conn", ".", "modules", ".", "sys", ".", "last_traceback", ")" ]
a version of pdb .
train
false
38,172
@task(ignore_result=True) def member_removed_email(group_pk, user_pk): from mozillians.groups.models import Group group = Group.objects.get(pk=group_pk) user = User.objects.get(pk=user_pk) activate('en-us') template_name = 'groups/email/member_removed.txt' subject = (_('Removed from Mozillians group "%s"') % group.name) template = get_template(template_name) context = {'group': group, 'user': user} body = template.render(context) send_mail(subject, body, settings.FROM_NOREPLY, [user.email], fail_silently=False)
[ "@", "task", "(", "ignore_result", "=", "True", ")", "def", "member_removed_email", "(", "group_pk", ",", "user_pk", ")", ":", "from", "mozillians", ".", "groups", ".", "models", "import", "Group", "group", "=", "Group", ".", "objects", ".", "get", "(", "pk", "=", "group_pk", ")", "user", "=", "User", ".", "objects", ".", "get", "(", "pk", "=", "user_pk", ")", "activate", "(", "'en-us'", ")", "template_name", "=", "'groups/email/member_removed.txt'", "subject", "=", "(", "_", "(", "'Removed from Mozillians group \"%s\"'", ")", "%", "group", ".", "name", ")", "template", "=", "get_template", "(", "template_name", ")", "context", "=", "{", "'group'", ":", "group", ",", "'user'", ":", "user", "}", "body", "=", "template", ".", "render", "(", "context", ")", "send_mail", "(", "subject", ",", "body", ",", "settings", ".", "FROM_NOREPLY", ",", "[", "user", ".", "email", "]", ",", "fail_silently", "=", "False", ")" ]
email to member when he is removed from group .
train
false
38,173
def incident_report(): def prep(r): if (r.http == 'GET'): if (r.method in ('create', 'create.popup')): field = r.table.location_id lat = get_vars.get('lat', None) if (lat is not None): lon = get_vars.get('lon', None) if (lon is not None): form_vars = Storage(lat=float(lat), lon=float(lon)) form = Storage(vars=form_vars) s3db.gis_location_onvalidation(form) id = s3db.gis_location.insert(**form_vars) field.default = id wkt = get_vars.get('wkt', None) if (wkt is not None): form_vars = Storage(wkt=wkt) form = Storage(vars=form_vars) s3db.gis_location_onvalidation(form) id = s3db.gis_location.insert(**form_vars) field.default = id return True s3.prep = prep return s3_rest_controller()
[ "def", "incident_report", "(", ")", ":", "def", "prep", "(", "r", ")", ":", "if", "(", "r", ".", "http", "==", "'GET'", ")", ":", "if", "(", "r", ".", "method", "in", "(", "'create'", ",", "'create.popup'", ")", ")", ":", "field", "=", "r", ".", "table", ".", "location_id", "lat", "=", "get_vars", ".", "get", "(", "'lat'", ",", "None", ")", "if", "(", "lat", "is", "not", "None", ")", ":", "lon", "=", "get_vars", ".", "get", "(", "'lon'", ",", "None", ")", "if", "(", "lon", "is", "not", "None", ")", ":", "form_vars", "=", "Storage", "(", "lat", "=", "float", "(", "lat", ")", ",", "lon", "=", "float", "(", "lon", ")", ")", "form", "=", "Storage", "(", "vars", "=", "form_vars", ")", "s3db", ".", "gis_location_onvalidation", "(", "form", ")", "id", "=", "s3db", ".", "gis_location", ".", "insert", "(", "**", "form_vars", ")", "field", ".", "default", "=", "id", "wkt", "=", "get_vars", ".", "get", "(", "'wkt'", ",", "None", ")", "if", "(", "wkt", "is", "not", "None", ")", ":", "form_vars", "=", "Storage", "(", "wkt", "=", "wkt", ")", "form", "=", "Storage", "(", "vars", "=", "form_vars", ")", "s3db", ".", "gis_location_onvalidation", "(", "form", ")", "id", "=", "s3db", ".", "gis_location", ".", "insert", "(", "**", "form_vars", ")", "field", ".", "default", "=", "id", "return", "True", "s3", ".", "prep", "=", "prep", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
38,174
def _cplxpair(z, tol=None): z = atleast_1d(z) if ((z.size == 0) or np.isrealobj(z)): return np.sort(z) if (z.ndim != 1): raise ValueError('z must be 1-dimensional') (zc, zr) = _cplxreal(z, tol) zc = np.dstack((zc.conj(), zc)).flatten() z = np.append(zc, zr) return z
[ "def", "_cplxpair", "(", "z", ",", "tol", "=", "None", ")", ":", "z", "=", "atleast_1d", "(", "z", ")", "if", "(", "(", "z", ".", "size", "==", "0", ")", "or", "np", ".", "isrealobj", "(", "z", ")", ")", ":", "return", "np", ".", "sort", "(", "z", ")", "if", "(", "z", ".", "ndim", "!=", "1", ")", ":", "raise", "ValueError", "(", "'z must be 1-dimensional'", ")", "(", "zc", ",", "zr", ")", "=", "_cplxreal", "(", "z", ",", "tol", ")", "zc", "=", "np", ".", "dstack", "(", "(", "zc", ".", "conj", "(", ")", ",", "zc", ")", ")", ".", "flatten", "(", ")", "z", "=", "np", ".", "append", "(", "zc", ",", "zr", ")", "return", "z" ]
sort into pairs of complex conjugates .
train
false
38,175
def dataSources(): dsn = create_buffer(1024) desc = create_buffer(1024) dsn_len = c_short() desc_len = c_short() dsn_list = {} try: lock.acquire() if (shared_env_h is None): AllocateEnv() finally: lock.release() while 1: ret = ODBC_API.SQLDataSources(shared_env_h, SQL_FETCH_NEXT, dsn, len(dsn), ADDR(dsn_len), desc, len(desc), ADDR(desc_len)) if (ret == SQL_NO_DATA_FOUND): break elif (not (ret in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO))): ctrl_err(SQL_HANDLE_ENV, shared_env_h, ret) else: dsn_list[dsn.value] = desc.value return dsn_list
[ "def", "dataSources", "(", ")", ":", "dsn", "=", "create_buffer", "(", "1024", ")", "desc", "=", "create_buffer", "(", "1024", ")", "dsn_len", "=", "c_short", "(", ")", "desc_len", "=", "c_short", "(", ")", "dsn_list", "=", "{", "}", "try", ":", "lock", ".", "acquire", "(", ")", "if", "(", "shared_env_h", "is", "None", ")", ":", "AllocateEnv", "(", ")", "finally", ":", "lock", ".", "release", "(", ")", "while", "1", ":", "ret", "=", "ODBC_API", ".", "SQLDataSources", "(", "shared_env_h", ",", "SQL_FETCH_NEXT", ",", "dsn", ",", "len", "(", "dsn", ")", ",", "ADDR", "(", "dsn_len", ")", ",", "desc", ",", "len", "(", "desc", ")", ",", "ADDR", "(", "desc_len", ")", ")", "if", "(", "ret", "==", "SQL_NO_DATA_FOUND", ")", ":", "break", "elif", "(", "not", "(", "ret", "in", "(", "SQL_SUCCESS", ",", "SQL_SUCCESS_WITH_INFO", ")", ")", ")", ":", "ctrl_err", "(", "SQL_HANDLE_ENV", ",", "shared_env_h", ",", "ret", ")", "else", ":", "dsn_list", "[", "dsn", ".", "value", "]", "=", "desc", ".", "value", "return", "dsn_list" ]
return a list with [name .
train
false
38,177
def _default_key_normalizer(key_class, request_context): context = {} for key in key_class._fields: context[key] = request_context.get(key) context['scheme'] = context['scheme'].lower() context['host'] = context['host'].lower() return key_class(**context)
[ "def", "_default_key_normalizer", "(", "key_class", ",", "request_context", ")", ":", "context", "=", "{", "}", "for", "key", "in", "key_class", ".", "_fields", ":", "context", "[", "key", "]", "=", "request_context", ".", "get", "(", "key", ")", "context", "[", "'scheme'", "]", "=", "context", "[", "'scheme'", "]", ".", "lower", "(", ")", "context", "[", "'host'", "]", "=", "context", "[", "'host'", "]", ".", "lower", "(", ")", "return", "key_class", "(", "**", "context", ")" ]
create a pool key of type key_class for a request .
train
false
38,179
def printfile(aFileName): print ('\nMission file: %s' % aFileName) with open(aFileName) as f: for line in f: print (' %s' % line.strip())
[ "def", "printfile", "(", "aFileName", ")", ":", "print", "(", "'\\nMission file: %s'", "%", "aFileName", ")", "with", "open", "(", "aFileName", ")", "as", "f", ":", "for", "line", "in", "f", ":", "print", "(", "' %s'", "%", "line", ".", "strip", "(", ")", ")" ]
print a mission file to demonstrate "round trip" .
train
true
38,181
def construct_relative_path(current_template_name, relative_name): if (not any((relative_name.startswith(x) for x in ["'./", "'../", '"./', '"../']))): return relative_name new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), relative_name.strip('\'"'))) if new_name.startswith('../'): raise TemplateSyntaxError(("The relative path '%s' points outside the file hierarchy that template '%s' is in." % (relative_name, current_template_name))) if (current_template_name.lstrip('/') == new_name): raise TemplateSyntaxError(("The relative path '%s' was translated to template name '%s', the same template in which the tag appears." % (relative_name, current_template_name))) return ('"%s"' % new_name)
[ "def", "construct_relative_path", "(", "current_template_name", ",", "relative_name", ")", ":", "if", "(", "not", "any", "(", "(", "relative_name", ".", "startswith", "(", "x", ")", "for", "x", "in", "[", "\"'./\"", ",", "\"'../\"", ",", "'\"./'", ",", "'\"../'", "]", ")", ")", ")", ":", "return", "relative_name", "new_name", "=", "posixpath", ".", "normpath", "(", "posixpath", ".", "join", "(", "posixpath", ".", "dirname", "(", "current_template_name", ".", "lstrip", "(", "'/'", ")", ")", ",", "relative_name", ".", "strip", "(", "'\\'\"'", ")", ")", ")", "if", "new_name", ".", "startswith", "(", "'../'", ")", ":", "raise", "TemplateSyntaxError", "(", "(", "\"The relative path '%s' points outside the file hierarchy that template '%s' is in.\"", "%", "(", "relative_name", ",", "current_template_name", ")", ")", ")", "if", "(", "current_template_name", ".", "lstrip", "(", "'/'", ")", "==", "new_name", ")", ":", "raise", "TemplateSyntaxError", "(", "(", "\"The relative path '%s' was translated to template name '%s', the same template in which the tag appears.\"", "%", "(", "relative_name", ",", "current_template_name", ")", ")", ")", "return", "(", "'\"%s\"'", "%", "new_name", ")" ]
convert a relative path to the full template name based on the current_template_name .
train
false
38,182
def list_package_resources(package, include_depends, subdir, rfilter=os.path.isfile): package_dir = roslib.packages.get_pkg_dir(package) return list_package_resources_by_dir(package_dir, include_depends, subdir, rfilter)
[ "def", "list_package_resources", "(", "package", ",", "include_depends", ",", "subdir", ",", "rfilter", "=", "os", ".", "path", ".", "isfile", ")", ":", "package_dir", "=", "roslib", ".", "packages", ".", "get_pkg_dir", "(", "package", ")", "return", "list_package_resources_by_dir", "(", "package_dir", ",", "include_depends", ",", "subdir", ",", "rfilter", ")" ]
list resources in a package within a particular subdirectory .
train
false
38,183
def getImportPluginFileNames(): return archive.getPluginFileNamesFromDirectoryPath(getPluginsDirectoryPath())
[ "def", "getImportPluginFileNames", "(", ")", ":", "return", "archive", ".", "getPluginFileNamesFromDirectoryPath", "(", "getPluginsDirectoryPath", "(", ")", ")" ]
get interpret plugin filenames .
train
false
38,185
def test_uninstallpathset_no_paths(caplog): from pip.req.req_uninstall import UninstallPathSet from pkg_resources import get_distribution test_dist = get_distribution('pip') uninstall_set = UninstallPathSet(test_dist) uninstall_set.remove() assert ("Can't uninstall 'pip'. No files were found to uninstall." in caplog.text())
[ "def", "test_uninstallpathset_no_paths", "(", "caplog", ")", ":", "from", "pip", ".", "req", ".", "req_uninstall", "import", "UninstallPathSet", "from", "pkg_resources", "import", "get_distribution", "test_dist", "=", "get_distribution", "(", "'pip'", ")", "uninstall_set", "=", "UninstallPathSet", "(", "test_dist", ")", "uninstall_set", ".", "remove", "(", ")", "assert", "(", "\"Can't uninstall 'pip'. No files were found to uninstall.\"", "in", "caplog", ".", "text", "(", ")", ")" ]
test uninstallpathset logs notification when there are no paths to uninstall .
train
false
38,186
@contextmanager def no_handlers_for_logger(name=None): log = logging.getLogger(name) old_handlers = log.handlers old_propagate = log.propagate log.handlers = [NullHandler()] try: (yield) finally: if old_handlers: log.handlers = old_handlers else: log.propagate = old_propagate
[ "@", "contextmanager", "def", "no_handlers_for_logger", "(", "name", "=", "None", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "name", ")", "old_handlers", "=", "log", ".", "handlers", "old_propagate", "=", "log", ".", "propagate", "log", ".", "handlers", "=", "[", "NullHandler", "(", ")", "]", "try", ":", "(", "yield", ")", "finally", ":", "if", "old_handlers", ":", "log", ".", "handlers", "=", "old_handlers", "else", ":", "log", ".", "propagate", "=", "old_propagate" ]
temporarily remove handlers all handlers from a logger .
train
false
38,187
def _position_is_bracketed(string, position): position = len(string[:position]) (index, length) = (0, len(string)) while (index < position): char = string[index] index += 1 if (char == '['): closing_index = _end_of_set_index(string, index) if (closing_index < length): if (index <= position < closing_index): return True index = (closing_index + 1) else: return False return False
[ "def", "_position_is_bracketed", "(", "string", ",", "position", ")", ":", "position", "=", "len", "(", "string", "[", ":", "position", "]", ")", "(", "index", ",", "length", ")", "=", "(", "0", ",", "len", "(", "string", ")", ")", "while", "(", "index", "<", "position", ")", ":", "char", "=", "string", "[", "index", "]", "index", "+=", "1", "if", "(", "char", "==", "'['", ")", ":", "closing_index", "=", "_end_of_set_index", "(", "string", ",", "index", ")", "if", "(", "closing_index", "<", "length", ")", ":", "if", "(", "index", "<=", "position", "<", "closing_index", ")", ":", "return", "True", "index", "=", "(", "closing_index", "+", "1", ")", "else", ":", "return", "False", "return", "False" ]
tests whether the char at string[position] is inside a valid pair of brackets .
train
false
38,190
def _is_mri_subject(subject, subjects_dir=None): subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) return bool((_find_head_bem(subject, subjects_dir) or _find_head_bem(subject, subjects_dir, high_res=True)))
[ "def", "_is_mri_subject", "(", "subject", ",", "subjects_dir", "=", "None", ")", ":", "subjects_dir", "=", "get_subjects_dir", "(", "subjects_dir", ",", "raise_error", "=", "True", ")", "return", "bool", "(", "(", "_find_head_bem", "(", "subject", ",", "subjects_dir", ")", "or", "_find_head_bem", "(", "subject", ",", "subjects_dir", ",", "high_res", "=", "True", ")", ")", ")" ]
check whether a directory in subjects_dir is an mri subject directory .
train
false
38,191
def sign_hmac(secret, payload): payload = payload.encode('ascii', 'strict') secret = secret.encode('ascii', 'strict') sig = hmac.new(base64.urlsafe_b64decode(secret), payload, hashlib.sha1) out = base64.urlsafe_b64encode(sig.digest()) return out.decode('utf-8')
[ "def", "sign_hmac", "(", "secret", ",", "payload", ")", ":", "payload", "=", "payload", ".", "encode", "(", "'ascii'", ",", "'strict'", ")", "secret", "=", "secret", ".", "encode", "(", "'ascii'", ",", "'strict'", ")", "sig", "=", "hmac", ".", "new", "(", "base64", ".", "urlsafe_b64decode", "(", "secret", ")", ",", "payload", ",", "hashlib", ".", "sha1", ")", "out", "=", "base64", ".", "urlsafe_b64encode", "(", "sig", ".", "digest", "(", ")", ")", "return", "out", ".", "decode", "(", "'utf-8'", ")" ]
returns a base64-encoded hmac-sha1 signature of a given string .
train
true
38,192
def argrelmin(data, axis=0, order=1, mode='clip'): return argrelextrema(data, np.less, axis, order, mode)
[ "def", "argrelmin", "(", "data", ",", "axis", "=", "0", ",", "order", "=", "1", ",", "mode", "=", "'clip'", ")", ":", "return", "argrelextrema", "(", "data", ",", "np", ".", "less", ",", "axis", ",", "order", ",", "mode", ")" ]
calculate the relative minima of data .
train
true
38,193
def force_html(): c.render_style = 'html' c.extension = None c.content_type = 'text/html; charset=UTF-8'
[ "def", "force_html", "(", ")", ":", "c", ".", "render_style", "=", "'html'", "c", ".", "extension", "=", "None", "c", ".", "content_type", "=", "'text/html; charset=UTF-8'" ]
because we can take uris like /s/URL and we can guarantee that the toolbar will never be used with a non-html render style .
train
false
38,194
def get_best_cpu_topology(flavor, image_meta, allow_threads=True, numa_topology=None): return _get_desirable_cpu_topologies(flavor, image_meta, allow_threads, numa_topology)[0]
[ "def", "get_best_cpu_topology", "(", "flavor", ",", "image_meta", ",", "allow_threads", "=", "True", ",", "numa_topology", "=", "None", ")", ":", "return", "_get_desirable_cpu_topologies", "(", "flavor", ",", "image_meta", ",", "allow_threads", ",", "numa_topology", ")", "[", "0", "]" ]
identify best cpu topology for given constraints .
train
false
38,195
def convert_colorspace(arr, fromspace, tospace): fromdict = {'RGB': (lambda im: im), 'HSV': hsv2rgb, 'RGB CIE': rgbcie2rgb, 'XYZ': xyz2rgb, 'YUV': yuv2rgb, 'YIQ': yiq2rgb, 'YPbPr': ypbpr2rgb, 'YCbCr': ycbcr2rgb} todict = {'RGB': (lambda im: im), 'HSV': rgb2hsv, 'RGB CIE': rgb2rgbcie, 'XYZ': rgb2xyz, 'YUV': rgb2yuv, 'YIQ': rgb2yiq, 'YPbPr': rgb2ypbpr, 'YCbCr': rgb2ycbcr} fromspace = fromspace.upper() tospace = tospace.upper() if (fromspace not in fromdict.keys()): raise ValueError(('fromspace needs to be one of %s' % fromdict.keys())) if (tospace not in todict.keys()): raise ValueError(('tospace needs to be one of %s' % todict.keys())) return todict[tospace](fromdict[fromspace](arr))
[ "def", "convert_colorspace", "(", "arr", ",", "fromspace", ",", "tospace", ")", ":", "fromdict", "=", "{", "'RGB'", ":", "(", "lambda", "im", ":", "im", ")", ",", "'HSV'", ":", "hsv2rgb", ",", "'RGB CIE'", ":", "rgbcie2rgb", ",", "'XYZ'", ":", "xyz2rgb", ",", "'YUV'", ":", "yuv2rgb", ",", "'YIQ'", ":", "yiq2rgb", ",", "'YPbPr'", ":", "ypbpr2rgb", ",", "'YCbCr'", ":", "ycbcr2rgb", "}", "todict", "=", "{", "'RGB'", ":", "(", "lambda", "im", ":", "im", ")", ",", "'HSV'", ":", "rgb2hsv", ",", "'RGB CIE'", ":", "rgb2rgbcie", ",", "'XYZ'", ":", "rgb2xyz", ",", "'YUV'", ":", "rgb2yuv", ",", "'YIQ'", ":", "rgb2yiq", ",", "'YPbPr'", ":", "rgb2ypbpr", ",", "'YCbCr'", ":", "rgb2ycbcr", "}", "fromspace", "=", "fromspace", ".", "upper", "(", ")", "tospace", "=", "tospace", ".", "upper", "(", ")", "if", "(", "fromspace", "not", "in", "fromdict", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "(", "'fromspace needs to be one of %s'", "%", "fromdict", ".", "keys", "(", ")", ")", ")", "if", "(", "tospace", "not", "in", "todict", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "(", "'tospace needs to be one of %s'", "%", "todict", ".", "keys", "(", ")", ")", ")", "return", "todict", "[", "tospace", "]", "(", "fromdict", "[", "fromspace", "]", "(", "arr", ")", ")" ]
convert an image array to a new color space .
train
false
38,196
@require_context def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): if (filters and (not is_valid_model_filters(models.Snapshot, filters))): return [] authorize_project_context(context, project_id) filters = (filters.copy() if filters else {}) filters['project_id'] = project_id session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) if (not query): return [] query = query.options(joinedload('snapshot_metadata')) return query.all()
[ "@", "require_context", "def", "snapshot_get_all_by_project", "(", "context", ",", "project_id", ",", "filters", "=", "None", ",", "marker", "=", "None", ",", "limit", "=", "None", ",", "sort_keys", "=", "None", ",", "sort_dirs", "=", "None", ",", "offset", "=", "None", ")", ":", "if", "(", "filters", "and", "(", "not", "is_valid_model_filters", "(", "models", ".", "Snapshot", ",", "filters", ")", ")", ")", ":", "return", "[", "]", "authorize_project_context", "(", "context", ",", "project_id", ")", "filters", "=", "(", "filters", ".", "copy", "(", ")", "if", "filters", "else", "{", "}", ")", "filters", "[", "'project_id'", "]", "=", "project_id", "session", "=", "get_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "query", "=", "_generate_paginate_query", "(", "context", ",", "session", ",", "marker", ",", "limit", ",", "sort_keys", ",", "sort_dirs", ",", "filters", ",", "offset", ",", "models", ".", "Snapshot", ")", "if", "(", "not", "query", ")", ":", "return", "[", "]", "query", "=", "query", ".", "options", "(", "joinedload", "(", "'snapshot_metadata'", ")", ")", "return", "query", ".", "all", "(", ")" ]
get all snapshots belonging to a project .
train
false
38,197
def _bits_to_bytes_len(length_in_bits): return ((length_in_bits + 7) // 8)
[ "def", "_bits_to_bytes_len", "(", "length_in_bits", ")", ":", "return", "(", "(", "length_in_bits", "+", "7", ")", "//", "8", ")" ]
helper function that returns the numbers of bytes necessary to store the given number of bits .
train
false
38,199
@task def remove_incomplete_accounts(days=INCOMPLETE_ACC_MAX_DAYS): from mozillians.users.models import UserProfile now = (datetime.now() - timedelta(days=days)) UserProfile.objects.filter(full_name='').filter(user__date_joined__lt=now).delete()
[ "@", "task", "def", "remove_incomplete_accounts", "(", "days", "=", "INCOMPLETE_ACC_MAX_DAYS", ")", ":", "from", "mozillians", ".", "users", ".", "models", "import", "UserProfile", "now", "=", "(", "datetime", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "days", ")", ")", "UserProfile", ".", "objects", ".", "filter", "(", "full_name", "=", "''", ")", ".", "filter", "(", "user__date_joined__lt", "=", "now", ")", ".", "delete", "(", ")" ]
remove incomplete accounts older than incomplete_acc_max_days old .
train
false
38,200
def check_yn(option, opt, value): if isinstance(value, int): return bool(value) if (value in ('y', 'yes')): return True if (value in ('n', 'no')): return False msg = 'option %s: invalid yn value %r, should be in (y, yes, n, no)' raise OptionValueError((msg % (opt, value)))
[ "def", "check_yn", "(", "option", ",", "opt", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "bool", "(", "value", ")", "if", "(", "value", "in", "(", "'y'", ",", "'yes'", ")", ")", ":", "return", "True", "if", "(", "value", "in", "(", "'n'", ",", "'no'", ")", ")", ":", "return", "False", "msg", "=", "'option %s: invalid yn value %r, should be in (y, yes, n, no)'", "raise", "OptionValueError", "(", "(", "msg", "%", "(", "opt", ",", "value", ")", ")", ")" ]
check a yn value return true for yes and false for no .
train
false
38,202
def test_stoch_matrix(): print ((__name__ + '.') + test_stoch_matrix.__name__) matrices = Matrices() for matrix_dict in matrices.stoch_matrix_dicts: x = gth_solve(matrix_dict['A']) (yield (StationaryDistSumOne(), x)) (yield (StationaryDistNonnegative(), x)) (yield (StationaryDistEqualToKnown(), matrix_dict['stationary_dist'], x))
[ "def", "test_stoch_matrix", "(", ")", ":", "print", "(", "(", "__name__", "+", "'.'", ")", "+", "test_stoch_matrix", ".", "__name__", ")", "matrices", "=", "Matrices", "(", ")", "for", "matrix_dict", "in", "matrices", ".", "stoch_matrix_dicts", ":", "x", "=", "gth_solve", "(", "matrix_dict", "[", "'A'", "]", ")", "(", "yield", "(", "StationaryDistSumOne", "(", ")", ",", "x", ")", ")", "(", "yield", "(", "StationaryDistNonnegative", "(", ")", ",", "x", ")", ")", "(", "yield", "(", "StationaryDistEqualToKnown", "(", ")", ",", "matrix_dict", "[", "'stationary_dist'", "]", ",", "x", ")", ")" ]
test with stochastic matrices .
train
false
38,205
def get_download_link(cookie, tokens, path): metas = get_metas(cookie, tokens, path) if ((not metas) or (metas.get('errno', (-1)) != 0) or ('info' not in metas) or (len(metas['info']) != 1)): logger.error(('pcs.get_download_link(): %s' % metas)) return None dlink = metas['info'][0]['dlink'] url = '{0}&cflg={1}'.format(dlink, cookie.get('cflag').value) req = net.urlopen_without_redirect(url, headers={'Cookie': cookie.sub_output('BAIDUID', 'BDUSS', 'cflag'), 'Accept': const.ACCEPT_HTML}) if (not req): return url else: return req.getheader('Location', url)
[ "def", "get_download_link", "(", "cookie", ",", "tokens", ",", "path", ")", ":", "metas", "=", "get_metas", "(", "cookie", ",", "tokens", ",", "path", ")", "if", "(", "(", "not", "metas", ")", "or", "(", "metas", ".", "get", "(", "'errno'", ",", "(", "-", "1", ")", ")", "!=", "0", ")", "or", "(", "'info'", "not", "in", "metas", ")", "or", "(", "len", "(", "metas", "[", "'info'", "]", ")", "!=", "1", ")", ")", ":", "logger", ".", "error", "(", "(", "'pcs.get_download_link(): %s'", "%", "metas", ")", ")", "return", "None", "dlink", "=", "metas", "[", "'info'", "]", "[", "0", "]", "[", "'dlink'", "]", "url", "=", "'{0}&cflg={1}'", ".", "format", "(", "dlink", ",", "cookie", ".", "get", "(", "'cflag'", ")", ".", "value", ")", "req", "=", "net", ".", "urlopen_without_redirect", "(", "url", ",", "headers", "=", "{", "'Cookie'", ":", "cookie", ".", "sub_output", "(", "'BAIDUID'", ",", "'BDUSS'", ",", "'cflag'", ")", ",", "'Accept'", ":", "const", ".", "ACCEPT_HTML", "}", ")", "if", "(", "not", "req", ")", ":", "return", "url", "else", ":", "return", "req", ".", "getheader", "(", "'Location'", ",", "url", ")" ]
path - 一个文件的绝对路径 .
train
true
38,206
@step(u'the directory "{directory}" exists') def step_directory_exists(context, directory): step_the_directory_should_exist(context, directory)
[ "@", "step", "(", "u'the directory \"{directory}\" exists'", ")", "def", "step_directory_exists", "(", "context", ",", "directory", ")", ":", "step_the_directory_should_exist", "(", "context", ",", "directory", ")" ]
verifies that a directory exists .
train
false
38,207
def test_type_error_if_not_dict_context(replay_test_dir, template_name): with pytest.raises(TypeError): replay.dump(replay_test_dir, template_name, 'not_a_dict')
[ "def", "test_type_error_if_not_dict_context", "(", "replay_test_dir", ",", "template_name", ")", ":", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "replay", ".", "dump", "(", "replay_test_dir", ",", "template_name", ",", "'not_a_dict'", ")" ]
test that replay .
train
false
38,208
@command('user\\s+(.+)') def usersearch(q_user, identify='forUsername'): (user, _, term) = (x.strip() for x in q_user.partition('/')) if (identify == 'forUsername'): ret = channelfromname(user) if (not ret): return (user, channel_id) = ret else: channel_id = user usersearch_id(user, channel_id, term)
[ "@", "command", "(", "'user\\\\s+(.+)'", ")", "def", "usersearch", "(", "q_user", ",", "identify", "=", "'forUsername'", ")", ":", "(", "user", ",", "_", ",", "term", ")", "=", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "q_user", ".", "partition", "(", "'/'", ")", ")", "if", "(", "identify", "==", "'forUsername'", ")", ":", "ret", "=", "channelfromname", "(", "user", ")", "if", "(", "not", "ret", ")", ":", "return", "(", "user", ",", "channel_id", ")", "=", "ret", "else", ":", "channel_id", "=", "user", "usersearch_id", "(", "user", ",", "channel_id", ",", "term", ")" ]
fetch uploads by a youtube user .
train
false
38,209
def CDLUPSIDEGAP2CROWS(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLUPSIDEGAP2CROWS)
[ "def", "CDLUPSIDEGAP2CROWS", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLUPSIDEGAP2CROWS", ")" ]
upside gap two crows .
train
false
38,211
def attribute_mapped_collection(attr_name): getter = _SerializableAttrGetter(attr_name) return (lambda : MappedCollection(getter))
[ "def", "attribute_mapped_collection", "(", "attr_name", ")", ":", "getter", "=", "_SerializableAttrGetter", "(", "attr_name", ")", "return", "(", "lambda", ":", "MappedCollection", "(", "getter", ")", ")" ]
a dictionary-based collection type with attribute-based keying .
train
false
38,212
def _error(name, msg): return {'name': name, 'result': False, 'comment': msg, 'changes': {}}
[ "def", "_error", "(", "name", ",", "msg", ")", ":", "return", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "msg", ",", "'changes'", ":", "{", "}", "}" ]
print msg and optionally exit with return code exit_ .
train
false
38,213
def get_component_by_name(app, name): sa_session = app.model.context.current return sa_session.query(app.model.Component).filter((app.model.Component.table.c.name == name)).first()
[ "def", "get_component_by_name", "(", "app", ",", "name", ")", ":", "sa_session", "=", "app", ".", "model", ".", "context", ".", "current", "return", "sa_session", ".", "query", "(", "app", ".", "model", ".", "Component", ")", ".", "filter", "(", "(", "app", ".", "model", ".", "Component", ".", "table", ".", "c", ".", "name", "==", "name", ")", ")", ".", "first", "(", ")" ]
get a component from the database via a name .
train
false
38,214
def getRCWFromProgID(prog_id): if is_cli: return Activator.CreateInstance(getTypeFromProgID(prog_id)) else: return win32com.client.Dispatch(prog_id)
[ "def", "getRCWFromProgID", "(", "prog_id", ")", ":", "if", "is_cli", ":", "return", "Activator", ".", "CreateInstance", "(", "getTypeFromProgID", "(", "prog_id", ")", ")", "else", ":", "return", "win32com", ".", "client", ".", "Dispatch", "(", "prog_id", ")" ]
returns an instance of prog_id .
train
false
38,217
def _string_to_rgb(color): if (not color.startswith('#')): if (color.lower() not in _color_dict): raise ValueError(('Color "%s" unknown' % color)) color = _color_dict[color] assert (color[0] == '#') color = color[1:] lc = len(color) if (lc in (3, 4)): color = ''.join(((c + c) for c in color)) lc = len(color) if (lc not in (6, 8)): raise ValueError('Hex color must have exactly six or eight elements following the # sign') color = np.array([(int(color[i:(i + 2)], 16) / 255.0) for i in range(0, lc, 2)]) return color
[ "def", "_string_to_rgb", "(", "color", ")", ":", "if", "(", "not", "color", ".", "startswith", "(", "'#'", ")", ")", ":", "if", "(", "color", ".", "lower", "(", ")", "not", "in", "_color_dict", ")", ":", "raise", "ValueError", "(", "(", "'Color \"%s\" unknown'", "%", "color", ")", ")", "color", "=", "_color_dict", "[", "color", "]", "assert", "(", "color", "[", "0", "]", "==", "'#'", ")", "color", "=", "color", "[", "1", ":", "]", "lc", "=", "len", "(", "color", ")", "if", "(", "lc", "in", "(", "3", ",", "4", ")", ")", ":", "color", "=", "''", ".", "join", "(", "(", "(", "c", "+", "c", ")", "for", "c", "in", "color", ")", ")", "lc", "=", "len", "(", "color", ")", "if", "(", "lc", "not", "in", "(", "6", ",", "8", ")", ")", ":", "raise", "ValueError", "(", "'Hex color must have exactly six or eight elements following the # sign'", ")", "color", "=", "np", ".", "array", "(", "[", "(", "int", "(", "color", "[", "i", ":", "(", "i", "+", "2", ")", "]", ",", "16", ")", "/", "255.0", ")", "for", "i", "in", "range", "(", "0", ",", "lc", ",", "2", ")", "]", ")", "return", "color" ]
convert user string or hex color to color array .
train
true
38,220
def lvremove(lvname, vgname): cmd = ['lvremove', '-f', '{0}/{1}'.format(vgname, lvname)] out = __salt__['cmd.run'](cmd, python_shell=False) return out.strip()
[ "def", "lvremove", "(", "lvname", ",", "vgname", ")", ":", "cmd", "=", "[", "'lvremove'", ",", "'-f'", ",", "'{0}/{1}'", ".", "format", "(", "vgname", ",", "lvname", ")", "]", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "out", ".", "strip", "(", ")" ]
remove a given existing logical volume from a named existing volume group cli example: .
train
true
38,221
def to_time(wmi_time): def int_or_none(s, start, end): try: return int(s[start:end]) except ValueError: return None year = int_or_none(wmi_time, 0, 4) month = int_or_none(wmi_time, 4, 6) day = int_or_none(wmi_time, 6, 8) hours = int_or_none(wmi_time, 8, 10) minutes = int_or_none(wmi_time, 10, 12) seconds = int_or_none(wmi_time, 12, 14) microseconds = int_or_none(wmi_time, 15, 21) timezone = wmi_time[22:] if (timezone == '***'): timezone = None return (year, month, day, hours, minutes, seconds, microseconds, timezone)
[ "def", "to_time", "(", "wmi_time", ")", ":", "def", "int_or_none", "(", "s", ",", "start", ",", "end", ")", ":", "try", ":", "return", "int", "(", "s", "[", "start", ":", "end", "]", ")", "except", "ValueError", ":", "return", "None", "year", "=", "int_or_none", "(", "wmi_time", ",", "0", ",", "4", ")", "month", "=", "int_or_none", "(", "wmi_time", ",", "4", ",", "6", ")", "day", "=", "int_or_none", "(", "wmi_time", ",", "6", ",", "8", ")", "hours", "=", "int_or_none", "(", "wmi_time", ",", "8", ",", "10", ")", "minutes", "=", "int_or_none", "(", "wmi_time", ",", "10", ",", "12", ")", "seconds", "=", "int_or_none", "(", "wmi_time", ",", "12", ",", "14", ")", "microseconds", "=", "int_or_none", "(", "wmi_time", ",", "15", ",", "21", ")", "timezone", "=", "wmi_time", "[", "22", ":", "]", "if", "(", "timezone", "==", "'***'", ")", ":", "timezone", "=", "None", "return", "(", "year", ",", "month", ",", "day", ",", "hours", ",", "minutes", ",", "seconds", ",", "microseconds", ",", "timezone", ")" ]
just return any time struct .
train
true
38,222
def direct_to_user_template(request, username, template_name, extra_context=None): user = get_object_or_404(get_user_model(), username__iexact=username) if (not extra_context): extra_context = dict() extra_context['viewed_user'] = user extra_context['profile'] = get_user_profile(user=user) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
[ "def", "direct_to_user_template", "(", "request", ",", "username", ",", "template_name", ",", "extra_context", "=", "None", ")", ":", "user", "=", "get_object_or_404", "(", "get_user_model", "(", ")", ",", "username__iexact", "=", "username", ")", "if", "(", "not", "extra_context", ")", ":", "extra_context", "=", "dict", "(", ")", "extra_context", "[", "'viewed_user'", "]", "=", "user", "extra_context", "[", "'profile'", "]", "=", "get_user_profile", "(", "user", "=", "user", ")", "return", "ExtraContextTemplateView", ".", "as_view", "(", "template_name", "=", "template_name", ",", "extra_context", "=", "extra_context", ")", "(", "request", ")" ]
simple wrapper for djangos :func:direct_to_template view .
train
true
38,223
def getModule(moduleName): return theSystemPath[moduleName]
[ "def", "getModule", "(", "moduleName", ")", ":", "return", "theSystemPath", "[", "moduleName", "]" ]
retrieve a module from the system path .
train
false
38,224
def refactor_with_2to3(source_text, fixer_names, filename=u''): from lib2to3.refactor import RefactoringTool fixers = [(u'lib2to3.fixes.fix_' + name) for name in fixer_names] tool = RefactoringTool(fixer_names=fixers, explicit=fixers) from lib2to3.pgen2 import tokenize as lib2to3_tokenize try: return unicode(tool.refactor_string(source_text, name=filename)) except lib2to3_tokenize.TokenError: return source_text
[ "def", "refactor_with_2to3", "(", "source_text", ",", "fixer_names", ",", "filename", "=", "u''", ")", ":", "from", "lib2to3", ".", "refactor", "import", "RefactoringTool", "fixers", "=", "[", "(", "u'lib2to3.fixes.fix_'", "+", "name", ")", "for", "name", "in", "fixer_names", "]", "tool", "=", "RefactoringTool", "(", "fixer_names", "=", "fixers", ",", "explicit", "=", "fixers", ")", "from", "lib2to3", ".", "pgen2", "import", "tokenize", "as", "lib2to3_tokenize", "try", ":", "return", "unicode", "(", "tool", ".", "refactor_string", "(", "source_text", ",", "name", "=", "filename", ")", ")", "except", "lib2to3_tokenize", ".", "TokenError", ":", "return", "source_text" ]
use lib2to3 to refactor the source .
train
true
38,225
def m_quadratic_sum(A, B, max_it=50): gamma1 = solve_discrete_lyapunov(A, B, max_it) return gamma1
[ "def", "m_quadratic_sum", "(", "A", ",", "B", ",", "max_it", "=", "50", ")", ":", "gamma1", "=", "solve_discrete_lyapunov", "(", "A", ",", "B", ",", "max_it", ")", "return", "gamma1" ]
computes the quadratic sum .
train
true
38,228
def spawn(coro): if (not isinstance(coro, types.GeneratorType)): raise ValueError((u'%s is not a coroutine' % coro)) return SpawnEvent(coro)
[ "def", "spawn", "(", "coro", ")", ":", "if", "(", "not", "isinstance", "(", "coro", ",", "types", ".", "GeneratorType", ")", ")", ":", "raise", "ValueError", "(", "(", "u'%s is not a coroutine'", "%", "coro", ")", ")", "return", "SpawnEvent", "(", "coro", ")" ]
create a spawned process .
train
false
38,230
def random_rainbow(s): colors_shuffle = [(globals()[i.encode('utf8')] if (not str(i).isdigit()) else term_color(int(i))) for i in c['CYCLE_COLOR']] colored = [random.choice(colors_shuffle)(i) for i in s] return ''.join(colored)
[ "def", "random_rainbow", "(", "s", ")", ":", "colors_shuffle", "=", "[", "(", "globals", "(", ")", "[", "i", ".", "encode", "(", "'utf8'", ")", "]", "if", "(", "not", "str", "(", "i", ")", ".", "isdigit", "(", ")", ")", "else", "term_color", "(", "int", "(", "i", ")", ")", ")", "for", "i", "in", "c", "[", "'CYCLE_COLOR'", "]", "]", "colored", "=", "[", "random", ".", "choice", "(", "colors_shuffle", ")", "(", "i", ")", "for", "i", "in", "s", "]", "return", "''", ".", "join", "(", "colored", ")" ]
print a string with random color with each character .
train
false
38,231
def report(): t = Twitter(auth=authen()) screen_name = g['stuff'].split()[0] if screen_name.startswith('@'): t.users.report_spam(screen_name=screen_name[1:]) printNicely(green((('You reported ' + screen_name) + '.'))) else: printNicely(red("Sorry I can't understand."))
[ "def", "report", "(", ")", ":", "t", "=", "Twitter", "(", "auth", "=", "authen", "(", ")", ")", "screen_name", "=", "g", "[", "'stuff'", "]", ".", "split", "(", ")", "[", "0", "]", "if", "screen_name", ".", "startswith", "(", "'@'", ")", ":", "t", ".", "users", ".", "report_spam", "(", "screen_name", "=", "screen_name", "[", "1", ":", "]", ")", "printNicely", "(", "green", "(", "(", "(", "'You reported '", "+", "screen_name", ")", "+", "'.'", ")", ")", ")", "else", ":", "printNicely", "(", "red", "(", "\"Sorry I can't understand.\"", ")", ")" ]
provides report about git status of all repos .
train
false
38,232
def entropy(pk, qk=None, base=None): pk = asarray(pk) pk = ((1.0 * pk) / np.sum(pk, axis=0)) if (qk is None): vec = entr(pk) else: qk = asarray(qk) if (len(qk) != len(pk)): raise ValueError('qk and pk must have same length.') qk = ((1.0 * qk) / np.sum(qk, axis=0)) vec = rel_entr(pk, qk) S = np.sum(vec, axis=0) if (base is not None): S /= log(base) return S
[ "def", "entropy", "(", "pk", ",", "qk", "=", "None", ",", "base", "=", "None", ")", ":", "pk", "=", "asarray", "(", "pk", ")", "pk", "=", "(", "(", "1.0", "*", "pk", ")", "/", "np", ".", "sum", "(", "pk", ",", "axis", "=", "0", ")", ")", "if", "(", "qk", "is", "None", ")", ":", "vec", "=", "entr", "(", "pk", ")", "else", ":", "qk", "=", "asarray", "(", "qk", ")", "if", "(", "len", "(", "qk", ")", "!=", "len", "(", "pk", ")", ")", ":", "raise", "ValueError", "(", "'qk and pk must have same length.'", ")", "qk", "=", "(", "(", "1.0", "*", "qk", ")", "/", "np", ".", "sum", "(", "qk", ",", "axis", "=", "0", ")", ")", "vec", "=", "rel_entr", "(", "pk", ",", "qk", ")", "S", "=", "np", ".", "sum", "(", "vec", ",", "axis", "=", "0", ")", "if", "(", "base", "is", "not", "None", ")", ":", "S", "/=", "log", "(", "base", ")", "return", "S" ]
given a list of class probabilities .
train
false
38,233
def _check_pillar(kwargs): if kwargs.get('force'): return True if ('_errors' in __pillar__): return False return True
[ "def", "_check_pillar", "(", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'force'", ")", ":", "return", "True", "if", "(", "'_errors'", "in", "__pillar__", ")", ":", "return", "False", "return", "True" ]
check the pillar for errors .
train
false
38,235
def _parse_snippet_file(content, full_filename): filename = full_filename[:(- len('.snippet'))] segments = _splitall(filename) segments = segments[(segments.index('snippets') + 1):] assert (len(segments) in (2, 3)) trigger = segments[1] description = (segments[2] if (2 < len(segments)) else '') if (content and content.endswith(os.linesep)): content = content[:(- len(os.linesep))] (yield ('snippet', (SnipMateSnippetDefinition(trigger, content, description, full_filename),)))
[ "def", "_parse_snippet_file", "(", "content", ",", "full_filename", ")", ":", "filename", "=", "full_filename", "[", ":", "(", "-", "len", "(", "'.snippet'", ")", ")", "]", "segments", "=", "_splitall", "(", "filename", ")", "segments", "=", "segments", "[", "(", "segments", ".", "index", "(", "'snippets'", ")", "+", "1", ")", ":", "]", "assert", "(", "len", "(", "segments", ")", "in", "(", "2", ",", "3", ")", ")", "trigger", "=", "segments", "[", "1", "]", "description", "=", "(", "segments", "[", "2", "]", "if", "(", "2", "<", "len", "(", "segments", ")", ")", "else", "''", ")", "if", "(", "content", "and", "content", ".", "endswith", "(", "os", ".", "linesep", ")", ")", ":", "content", "=", "content", "[", ":", "(", "-", "len", "(", "os", ".", "linesep", ")", ")", "]", "(", "yield", "(", "'snippet'", ",", "(", "SnipMateSnippetDefinition", "(", "trigger", ",", "content", ",", "description", ",", "full_filename", ")", ",", ")", ")", ")" ]
parses content assuming it is a .
train
false
38,236
def make_enum(enum_type='enum', base_classes=None, methods=None, **attrs): def __init__(instance, *args, **kwargs): raise RuntimeError(('%s types can not be initialized.' % enum_type)) if (base_classes is None): base_classes = () if (methods is None): methods = {} base_classes = (base_classes + (object,)) for (k, v) in methods.iteritems(): methods[k] = classmethod(v) attrs['enums'] = attrs.copy() methods.update(attrs) methods['__init__'] = __init__ return type(enum_type, base_classes, methods)
[ "def", "make_enum", "(", "enum_type", "=", "'enum'", ",", "base_classes", "=", "None", ",", "methods", "=", "None", ",", "**", "attrs", ")", ":", "def", "__init__", "(", "instance", ",", "*", "args", ",", "**", "kwargs", ")", ":", "raise", "RuntimeError", "(", "(", "'%s types can not be initialized.'", "%", "enum_type", ")", ")", "if", "(", "base_classes", "is", "None", ")", ":", "base_classes", "=", "(", ")", "if", "(", "methods", "is", "None", ")", ":", "methods", "=", "{", "}", "base_classes", "=", "(", "base_classes", "+", "(", "object", ",", ")", ")", "for", "(", "k", ",", "v", ")", "in", "methods", ".", "iteritems", "(", ")", ":", "methods", "[", "k", "]", "=", "classmethod", "(", "v", ")", "attrs", "[", "'enums'", "]", "=", "attrs", ".", "copy", "(", ")", "methods", ".", "update", "(", "attrs", ")", "methods", "[", "'__init__'", "]", "=", "__init__", "return", "type", "(", "enum_type", ",", "base_classes", ",", "methods", ")" ]
generates a enumeration with the given attributes .
train
false
38,240
@register.filter(name='rule_member_count') def rule_member_count(instance, member): member = getattr(instance, member) counts = member.all().count() return str(counts)
[ "@", "register", ".", "filter", "(", "name", "=", "'rule_member_count'", ")", "def", "rule_member_count", "(", "instance", ",", "member", ")", ":", "member", "=", "getattr", "(", "instance", ",", "member", ")", "counts", "=", "member", ".", "all", "(", ")", ".", "count", "(", ")", "return", "str", "(", "counts", ")" ]
instance is a rule object .
train
false
38,244
def autoload_filenode(must_be=None, default_root=False): def _autoload_filenode(func): @handle_odm_errors @must_have_addon('osfstorage', 'node') @functools.wraps(func) def wrapped(*args, **kwargs): node = kwargs['node'] if (('fid' not in kwargs) and default_root): file_node = kwargs['node_addon'].get_root() else: file_node = models.OsfStorageFileNode.get(kwargs.get('fid'), node) if (must_be and (file_node.kind != must_be)): raise HTTPError(httplib.BAD_REQUEST, data={'message_short': 'incorrect type', 'message_long': 'FileNode must be of type {} not {}'.format(must_be, file_node.kind)}) kwargs['file_node'] = file_node return func(*args, **kwargs) return wrapped return _autoload_filenode
[ "def", "autoload_filenode", "(", "must_be", "=", "None", ",", "default_root", "=", "False", ")", ":", "def", "_autoload_filenode", "(", "func", ")", ":", "@", "handle_odm_errors", "@", "must_have_addon", "(", "'osfstorage'", ",", "'node'", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "**", "kwargs", ")", ":", "node", "=", "kwargs", "[", "'node'", "]", "if", "(", "(", "'fid'", "not", "in", "kwargs", ")", "and", "default_root", ")", ":", "file_node", "=", "kwargs", "[", "'node_addon'", "]", ".", "get_root", "(", ")", "else", ":", "file_node", "=", "models", ".", "OsfStorageFileNode", ".", "get", "(", "kwargs", ".", "get", "(", "'fid'", ")", ",", "node", ")", "if", "(", "must_be", "and", "(", "file_node", ".", "kind", "!=", "must_be", ")", ")", ":", "raise", "HTTPError", "(", "httplib", ".", "BAD_REQUEST", ",", "data", "=", "{", "'message_short'", ":", "'incorrect type'", ",", "'message_long'", ":", "'FileNode must be of type {} not {}'", ".", "format", "(", "must_be", ",", "file_node", ".", "kind", ")", "}", ")", "kwargs", "[", "'file_node'", "]", "=", "file_node", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapped", "return", "_autoload_filenode" ]
implies both must_have_addon osfstorage node and handle_odm_errors attempts to load fid as a osfstoragefilenode with viable constraints .
train
false
38,245
def max_call_gas(gas): return (gas - (gas // opcodes.CALL_CHILD_LIMIT_DENOM))
[ "def", "max_call_gas", "(", "gas", ")", ":", "return", "(", "gas", "-", "(", "gas", "//", "opcodes", ".", "CALL_CHILD_LIMIT_DENOM", ")", ")" ]
since eip150 calls will send only all but 1/64th of the available gas .
train
false
38,247
def user_can_edit_snippet_type(user, model): for action in (u'add', u'change', u'delete'): if user.has_perm(get_permission_name(action, model)): return True return False
[ "def", "user_can_edit_snippet_type", "(", "user", ",", "model", ")", ":", "for", "action", "in", "(", "u'add'", ",", "u'change'", ",", "u'delete'", ")", ":", "if", "user", ".", "has_perm", "(", "get_permission_name", "(", "action", ",", "model", ")", ")", ":", "return", "True", "return", "False" ]
true if user has add .
train
false
38,248
def _update_first_contribution_msec(user_id, first_contribution_msec): user_settings = get_user_settings(user_id, strict=True) user_settings.first_contribution_msec = first_contribution_msec _save_user_settings(user_settings)
[ "def", "_update_first_contribution_msec", "(", "user_id", ",", "first_contribution_msec", ")", ":", "user_settings", "=", "get_user_settings", "(", "user_id", ",", "strict", "=", "True", ")", "user_settings", ".", "first_contribution_msec", "=", "first_contribution_msec", "_save_user_settings", "(", "user_settings", ")" ]
updates first_contribution_msec of user with given user_id .
train
false
38,250
def _api_get_files(name, output, kwargs): value = kwargs.get('value') if value: return report(output, keyword='files', data=build_file_list(value)) else: return report(output, _MSG_NO_VALUE)
[ "def", "_api_get_files", "(", "name", ",", "output", ",", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "'value'", ")", "if", "value", ":", "return", "report", "(", "output", ",", "keyword", "=", "'files'", ",", "data", "=", "build_file_list", "(", "value", ")", ")", "else", ":", "return", "report", "(", "output", ",", "_MSG_NO_VALUE", ")" ]
api: accepts output .
train
false
38,252
def basename_from_filename(filename): mimetype = mimetypes.guess_type(filename)[0] if (mimetype is not None): mimetype = mimetype.lower() for (filetype, icon_name) in KNOWN_FILE_MIME_TYPES: if (filetype in mimetype): return icon_name extension = os.path.splitext(filename)[1] return KNOWN_FILE_EXTENSIONS.get(extension.lower(), u'file-text.svg')
[ "def", "basename_from_filename", "(", "filename", ")", ":", "mimetype", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "[", "0", "]", "if", "(", "mimetype", "is", "not", "None", ")", ":", "mimetype", "=", "mimetype", ".", "lower", "(", ")", "for", "(", "filetype", ",", "icon_name", ")", "in", "KNOWN_FILE_MIME_TYPES", ":", "if", "(", "filetype", "in", "mimetype", ")", ":", "return", "icon_name", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "return", "KNOWN_FILE_EXTENSIONS", ".", "get", "(", "extension", ".", "lower", "(", ")", ",", "u'file-text.svg'", ")" ]
returns an icon name based on the filename .
train
false
38,253
def jbig2Encode(stream, parameters): encodedStream = '' return ((-1), 'Jbig2Encode not supported yet')
[ "def", "jbig2Encode", "(", "stream", ",", "parameters", ")", ":", "encodedStream", "=", "''", "return", "(", "(", "-", "1", ")", ",", "'Jbig2Encode not supported yet'", ")" ]
method to encode streams using the jbig2 standard .
train
false
38,254
def _upstart_is_enabled(name): return (not _upstart_is_disabled(name))
[ "def", "_upstart_is_enabled", "(", "name", ")", ":", "return", "(", "not", "_upstart_is_disabled", "(", "name", ")", ")" ]
assume that if an upstart service is not disabled then it must be enabled .
train
false
38,258
def sparse2cvxopt(value): import cvxopt if isinstance(value, (np.ndarray, np.matrix)): return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d') elif sp.issparse(value): value = value.tocoo() return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(), value.col.tolist(), size=value.shape, tc='d')
[ "def", "sparse2cvxopt", "(", "value", ")", ":", "import", "cvxopt", "if", "isinstance", "(", "value", ",", "(", "np", ".", "ndarray", ",", "np", ".", "matrix", ")", ")", ":", "return", "cvxopt", ".", "sparse", "(", "cvxopt", ".", "matrix", "(", "value", ".", "astype", "(", "'float64'", ")", ")", ",", "tc", "=", "'d'", ")", "elif", "sp", ".", "issparse", "(", "value", ")", ":", "value", "=", "value", ".", "tocoo", "(", ")", "return", "cvxopt", ".", "spmatrix", "(", "value", ".", "data", ".", "tolist", "(", ")", ",", "value", ".", "row", ".", "tolist", "(", ")", ",", "value", ".", "col", ".", "tolist", "(", ")", ",", "size", "=", "value", ".", "shape", ",", "tc", "=", "'d'", ")" ]
converts a scipy sparse matrix to a cvxopt sparse matrix .
train
false
38,259
def compareAreaDescending(loopArea, otherLoopArea): if (loopArea.area > otherLoopArea.area): return (-1) return int((loopArea.area < otherLoopArea.area))
[ "def", "compareAreaDescending", "(", "loopArea", ",", "otherLoopArea", ")", ":", "if", "(", "loopArea", ".", "area", ">", "otherLoopArea", ".", "area", ")", ":", "return", "(", "-", "1", ")", "return", "int", "(", "(", "loopArea", ".", "area", "<", "otherLoopArea", ".", "area", ")", ")" ]
get comparison in order to sort loop areas in descending order of area .
train
false
38,260
def _load_yaml_with_clear_tag(stream): loader = yaml.SafeLoader(stream) loader.add_constructor('!clear', _cleared_value_constructor) try: return loader.get_single_data() finally: if hasattr(loader, 'dispose'): loader.dispose()
[ "def", "_load_yaml_with_clear_tag", "(", "stream", ")", ":", "loader", "=", "yaml", ".", "SafeLoader", "(", "stream", ")", "loader", ".", "add_constructor", "(", "'!clear'", ",", "_cleared_value_constructor", ")", "try", ":", "return", "loader", ".", "get_single_data", "(", ")", "finally", ":", "if", "hasattr", "(", "loader", ",", "'dispose'", ")", ":", "loader", ".", "dispose", "(", ")" ]
like yaml .
train
false
38,261
def has_application(backend=None, has=(), capable=()): from ..app.backends import BACKEND_NAMES if (backend is None): for backend in BACKEND_NAMES: if has_backend(backend, has=has, capable=capable): good = True msg = backend break else: good = False msg = 'Requires application backend' else: (good, why) = has_backend(backend, has=has, capable=capable, out=['why_not']) if (not good): msg = ('Requires %s: %s' % (backend, why)) else: msg = backend return (good, msg)
[ "def", "has_application", "(", "backend", "=", "None", ",", "has", "=", "(", ")", ",", "capable", "=", "(", ")", ")", ":", "from", ".", ".", "app", ".", "backends", "import", "BACKEND_NAMES", "if", "(", "backend", "is", "None", ")", ":", "for", "backend", "in", "BACKEND_NAMES", ":", "if", "has_backend", "(", "backend", ",", "has", "=", "has", ",", "capable", "=", "capable", ")", ":", "good", "=", "True", "msg", "=", "backend", "break", "else", ":", "good", "=", "False", "msg", "=", "'Requires application backend'", "else", ":", "(", "good", ",", "why", ")", "=", "has_backend", "(", "backend", ",", "has", "=", "has", ",", "capable", "=", "capable", ",", "out", "=", "[", "'why_not'", "]", ")", "if", "(", "not", "good", ")", ":", "msg", "=", "(", "'Requires %s: %s'", "%", "(", "backend", ",", "why", ")", ")", "else", ":", "msg", "=", "backend", "return", "(", "good", ",", "msg", ")" ]
determine if a suitable app backend exists .
train
false
38,262
def dict_contains(superset, subset): for (key, value) in subset.iteritems(): ok_((key in superset)) eq_(superset[key], value)
[ "def", "dict_contains", "(", "superset", ",", "subset", ")", ":", "for", "(", "key", ",", "value", ")", "in", "subset", ".", "iteritems", "(", ")", ":", "ok_", "(", "(", "key", "in", "superset", ")", ")", "eq_", "(", "superset", "[", "key", "]", ",", "value", ")" ]
assert that all key/val pairs in dict subset also exist in superset .
train
false
38,263
def commutation_matrix(p, q): K = np.eye((p * q)) indices = np.arange((p * q)).reshape((p, q), order='F') return K.take(indices.ravel(), axis=0)
[ "def", "commutation_matrix", "(", "p", ",", "q", ")", ":", "K", "=", "np", ".", "eye", "(", "(", "p", "*", "q", ")", ")", "indices", "=", "np", ".", "arange", "(", "(", "p", "*", "q", ")", ")", ".", "reshape", "(", "(", "p", ",", "q", ")", ",", "order", "=", "'F'", ")", "return", "K", ".", "take", "(", "indices", ".", "ravel", "(", ")", ",", "axis", "=", "0", ")" ]
create the commutation matrix k_{p .
train
false
38,264
def lc_random(lower, upper, stepsize): nstep = int(((upper - lower) / (1.0 * stepsize))) choices = [(lower + (x * stepsize)) for x in range(nstep)] return random.choice(choices)
[ "def", "lc_random", "(", "lower", ",", "upper", ",", "stepsize", ")", ":", "nstep", "=", "int", "(", "(", "(", "upper", "-", "lower", ")", "/", "(", "1.0", "*", "stepsize", ")", ")", ")", "choices", "=", "[", "(", "lower", "+", "(", "x", "*", "stepsize", ")", ")", "for", "x", "in", "range", "(", "nstep", ")", "]", "return", "random", ".", "choice", "(", "choices", ")" ]
like random .
train
false
38,265
def new_host_state(self, host, node, capabilities=None, service=None): if (capabilities is None): capabilities = {} cap = capabilities.get('compute', {}) if bool(cap.get('baremetal_driver')): return BaremetalNodeState(host, node, capabilities, service) else: return host_manager.HostState(host, node, capabilities, service)
[ "def", "new_host_state", "(", "self", ",", "host", ",", "node", ",", "capabilities", "=", "None", ",", "service", "=", "None", ")", ":", "if", "(", "capabilities", "is", "None", ")", ":", "capabilities", "=", "{", "}", "cap", "=", "capabilities", ".", "get", "(", "'compute'", ",", "{", "}", ")", "if", "bool", "(", "cap", ".", "get", "(", "'baremetal_driver'", ")", ")", ":", "return", "BaremetalNodeState", "(", "host", ",", "node", ",", "capabilities", ",", "service", ")", "else", ":", "return", "host_manager", ".", "HostState", "(", "host", ",", "node", ",", "capabilities", ",", "service", ")" ]
returns an instance of baremetalhoststate or hoststate according to capabilities .
train
false
38,267
def print_debug_info(qs, file=None): opts = qs.model._mptt_meta writer = csv.writer((sys.stdout if (file is None) else file)) header = (u'pk', opts.level_attr, (u'%s_id' % opts.parent_attr), opts.tree_id_attr, opts.left_attr, opts.right_attr, u'pretty') writer.writerow(header) for n in qs.order_by(u'tree_id', u'lft'): level = getattr(n, opts.level_attr) row = [] for field in header[:(-1)]: row.append(getattr(n, field)) row.append((u'%s%s' % ((u'- ' * level), text_type(n).encode(u'utf-8')))) writer.writerow(row)
[ "def", "print_debug_info", "(", "qs", ",", "file", "=", "None", ")", ":", "opts", "=", "qs", ".", "model", ".", "_mptt_meta", "writer", "=", "csv", ".", "writer", "(", "(", "sys", ".", "stdout", "if", "(", "file", "is", "None", ")", "else", "file", ")", ")", "header", "=", "(", "u'pk'", ",", "opts", ".", "level_attr", ",", "(", "u'%s_id'", "%", "opts", ".", "parent_attr", ")", ",", "opts", ".", "tree_id_attr", ",", "opts", ".", "left_attr", ",", "opts", ".", "right_attr", ",", "u'pretty'", ")", "writer", ".", "writerow", "(", "header", ")", "for", "n", "in", "qs", ".", "order_by", "(", "u'tree_id'", ",", "u'lft'", ")", ":", "level", "=", "getattr", "(", "n", ",", "opts", ".", "level_attr", ")", "row", "=", "[", "]", "for", "field", "in", "header", "[", ":", "(", "-", "1", ")", "]", ":", "row", ".", "append", "(", "getattr", "(", "n", ",", "field", ")", ")", "row", ".", "append", "(", "(", "u'%s%s'", "%", "(", "(", "u'- '", "*", "level", ")", ",", "text_type", "(", "n", ")", ".", "encode", "(", "u'utf-8'", ")", ")", ")", ")", "writer", ".", "writerow", "(", "row", ")" ]
given an mptt queryset .
train
false
38,269
def custom_layer(incoming, custom_fn, **kwargs): name = 'CustomLayer' if ('name' in kwargs): name = kwargs['name'] with tf.name_scope(name): inference = custom_fn(incoming, **kwargs) return inference
[ "def", "custom_layer", "(", "incoming", ",", "custom_fn", ",", "**", "kwargs", ")", ":", "name", "=", "'CustomLayer'", "if", "(", "'name'", "in", "kwargs", ")", ":", "name", "=", "kwargs", "[", "'name'", "]", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "inference", "=", "custom_fn", "(", "incoming", ",", "**", "kwargs", ")", "return", "inference" ]
custom layer .
train
false
38,270
def compression_matrix(data, q, n_power_iter=0, seed=None): n = data.shape[1] comp_level = compression_level(n, q) state = RandomState(seed) omega = state.standard_normal(size=(n, comp_level), chunks=(data.chunks[1], (comp_level,))) mat_h = data.dot(omega) for j in range(n_power_iter): mat_h = data.dot(data.T.dot(mat_h)) (q, _) = tsqr(mat_h) return q.T
[ "def", "compression_matrix", "(", "data", ",", "q", ",", "n_power_iter", "=", "0", ",", "seed", "=", "None", ")", ":", "n", "=", "data", ".", "shape", "[", "1", "]", "comp_level", "=", "compression_level", "(", "n", ",", "q", ")", "state", "=", "RandomState", "(", "seed", ")", "omega", "=", "state", ".", "standard_normal", "(", "size", "=", "(", "n", ",", "comp_level", ")", ",", "chunks", "=", "(", "data", ".", "chunks", "[", "1", "]", ",", "(", "comp_level", ",", ")", ")", ")", "mat_h", "=", "data", ".", "dot", "(", "omega", ")", "for", "j", "in", "range", "(", "n_power_iter", ")", ":", "mat_h", "=", "data", ".", "dot", "(", "data", ".", "T", ".", "dot", "(", "mat_h", ")", ")", "(", "q", ",", "_", ")", "=", "tsqr", "(", "mat_h", ")", "return", "q", ".", "T" ]
randomly sample matrix to find most active subspace this compression matrix returned by this algorithm can be used to compute both the qr decomposition and the singular value decomposition .
train
false
38,271
def check_palette(palette): if (palette is None): return None p = list(palette) if (not (0 < len(p) <= 256)): raise ValueError('a palette must have between 1 and 256 entries') seen_triple = False for (i, t) in enumerate(p): if (len(t) not in (3, 4)): raise ValueError(('palette entry %d: entries must be 3- or 4-tuples.' % i)) if (len(t) == 3): seen_triple = True if (seen_triple and (len(t) == 4)): raise ValueError(('palette entry %d: all 4-tuples must precede all 3-tuples' % i)) for x in t: if ((int(x) != x) or (not (0 <= x <= 255))): raise ValueError(('palette entry %d: values must be integer: 0 <= x <= 255' % i)) return p
[ "def", "check_palette", "(", "palette", ")", ":", "if", "(", "palette", "is", "None", ")", ":", "return", "None", "p", "=", "list", "(", "palette", ")", "if", "(", "not", "(", "0", "<", "len", "(", "p", ")", "<=", "256", ")", ")", ":", "raise", "ValueError", "(", "'a palette must have between 1 and 256 entries'", ")", "seen_triple", "=", "False", "for", "(", "i", ",", "t", ")", "in", "enumerate", "(", "p", ")", ":", "if", "(", "len", "(", "t", ")", "not", "in", "(", "3", ",", "4", ")", ")", ":", "raise", "ValueError", "(", "(", "'palette entry %d: entries must be 3- or 4-tuples.'", "%", "i", ")", ")", "if", "(", "len", "(", "t", ")", "==", "3", ")", ":", "seen_triple", "=", "True", "if", "(", "seen_triple", "and", "(", "len", "(", "t", ")", "==", "4", ")", ")", ":", "raise", "ValueError", "(", "(", "'palette entry %d: all 4-tuples must precede all 3-tuples'", "%", "i", ")", ")", "for", "x", "in", "t", ":", "if", "(", "(", "int", "(", "x", ")", "!=", "x", ")", "or", "(", "not", "(", "0", "<=", "x", "<=", "255", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "'palette entry %d: values must be integer: 0 <= x <= 255'", "%", "i", ")", ")", "return", "p" ]
check a palette argument for validity .
train
true
38,272
def test_messages(client): login(client, flaskr.app.config['USERNAME'], flaskr.app.config['PASSWORD']) rv = client.post('/add', data=dict(title='<Hello>', text='<strong>HTML</strong> allowed here'), follow_redirects=True) assert ('No entries here so far' not in rv.data) assert ('&lt;Hello&gt;' in rv.data) assert ('<strong>HTML</strong> allowed here' in rv.data)
[ "def", "test_messages", "(", "client", ")", ":", "login", "(", "client", ",", "flaskr", ".", "app", ".", "config", "[", "'USERNAME'", "]", ",", "flaskr", ".", "app", ".", "config", "[", "'PASSWORD'", "]", ")", "rv", "=", "client", ".", "post", "(", "'/add'", ",", "data", "=", "dict", "(", "title", "=", "'<Hello>'", ",", "text", "=", "'<strong>HTML</strong> allowed here'", ")", ",", "follow_redirects", "=", "True", ")", "assert", "(", "'No entries here so far'", "not", "in", "rv", ".", "data", ")", "assert", "(", "'&lt;Hello&gt;'", "in", "rv", ".", "data", ")", "assert", "(", "'<strong>HTML</strong> allowed here'", "in", "rv", ".", "data", ")" ]
test that messages work .
train
false
38,274
def is_focused_on_element(browser, selector): return browser.execute_script("return $('{}').is(':focus')".format(selector))
[ "def", "is_focused_on_element", "(", "browser", ",", "selector", ")", ":", "return", "browser", ".", "execute_script", "(", "\"return $('{}').is(':focus')\"", ".", "format", "(", "selector", ")", ")" ]
check if the focus is on the element that matches the selector .
train
false
38,276
def square_n_sort(L): L_square = [] L_sorted = [] count = len(L) if (L[0] >= 0): for i in L: L_square.append((i ** 2)) return L_square while (count > 0): if (abs(L[0]) >= abs(L[(-1)])): L_square.append((L[0] ** 2)) L.remove(L[0]) else: L_square.append((L[(-1)] ** 2)) L.remove(L[(-1)]) count -= 1 L_sorted = L_square[::(-1)] return L_sorted
[ "def", "square_n_sort", "(", "L", ")", ":", "L_square", "=", "[", "]", "L_sorted", "=", "[", "]", "count", "=", "len", "(", "L", ")", "if", "(", "L", "[", "0", "]", ">=", "0", ")", ":", "for", "i", "in", "L", ":", "L_square", ".", "append", "(", "(", "i", "**", "2", ")", ")", "return", "L_square", "while", "(", "count", ">", "0", ")", ":", "if", "(", "abs", "(", "L", "[", "0", "]", ")", ">=", "abs", "(", "L", "[", "(", "-", "1", ")", "]", ")", ")", ":", "L_square", ".", "append", "(", "(", "L", "[", "0", "]", "**", "2", ")", ")", "L", ".", "remove", "(", "L", "[", "0", "]", ")", "else", ":", "L_square", ".", "append", "(", "(", "L", "[", "(", "-", "1", ")", "]", "**", "2", ")", ")", "L", ".", "remove", "(", "L", "[", "(", "-", "1", ")", "]", ")", "count", "-=", "1", "L_sorted", "=", "L_square", "[", ":", ":", "(", "-", "1", ")", "]", "return", "L_sorted" ]
get an ordered list of ints and square the values .
train
false
38,277
def register_builtin_transform(transform, builtin_name): def _transform_wrapper(node, context=None): result = transform(node, context=context) if result: result.parent = node result.lineno = node.lineno result.col_offset = node.col_offset return iter([result]) MANAGER.register_transform(nodes.CallFunc, inference_tip(_transform_wrapper), (lambda n: (isinstance(n.func, nodes.Name) and (n.func.name == builtin_name))))
[ "def", "register_builtin_transform", "(", "transform", ",", "builtin_name", ")", ":", "def", "_transform_wrapper", "(", "node", ",", "context", "=", "None", ")", ":", "result", "=", "transform", "(", "node", ",", "context", "=", "context", ")", "if", "result", ":", "result", ".", "parent", "=", "node", "result", ".", "lineno", "=", "node", ".", "lineno", "result", ".", "col_offset", "=", "node", ".", "col_offset", "return", "iter", "(", "[", "result", "]", ")", "MANAGER", ".", "register_transform", "(", "nodes", ".", "CallFunc", ",", "inference_tip", "(", "_transform_wrapper", ")", ",", "(", "lambda", "n", ":", "(", "isinstance", "(", "n", ".", "func", ",", "nodes", ".", "Name", ")", "and", "(", "n", ".", "func", ".", "name", "==", "builtin_name", ")", ")", ")", ")" ]
register a new transform function for the given *builtin_name* .
train
false
38,278
def format_satoshis_plain(x, decimal_point=8): scale_factor = pow(10, decimal_point) return '{:.8f}'.format((Decimal(x) / scale_factor)).rstrip('0').rstrip('.')
[ "def", "format_satoshis_plain", "(", "x", ",", "decimal_point", "=", "8", ")", ":", "scale_factor", "=", "pow", "(", "10", ",", "decimal_point", ")", "return", "'{:.8f}'", ".", "format", "(", "(", "Decimal", "(", "x", ")", "/", "scale_factor", ")", ")", ".", "rstrip", "(", "'0'", ")", ".", "rstrip", "(", "'.'", ")" ]
display a satoshi amount scaled .
train
false
38,282
def vocabulary_create(context, data_dict): model = context['model'] schema = (context.get('schema') or ckan.logic.schema.default_create_vocabulary_schema()) _check_access('vocabulary_create', context, data_dict) (data, errors) = _validate(data_dict, schema, context) if errors: model.Session.rollback() raise ValidationError(errors) vocabulary = model_save.vocabulary_dict_save(data, context) if (not context.get('defer_commit')): model.repo.commit() log.debug(('Created Vocabulary %s' % vocabulary.name)) return model_dictize.vocabulary_dictize(vocabulary, context)
[ "def", "vocabulary_create", "(", "context", ",", "data_dict", ")", ":", "model", "=", "context", "[", "'model'", "]", "schema", "=", "(", "context", ".", "get", "(", "'schema'", ")", "or", "ckan", ".", "logic", ".", "schema", ".", "default_create_vocabulary_schema", "(", ")", ")", "_check_access", "(", "'vocabulary_create'", ",", "context", ",", "data_dict", ")", "(", "data", ",", "errors", ")", "=", "_validate", "(", "data_dict", ",", "schema", ",", "context", ")", "if", "errors", ":", "model", ".", "Session", ".", "rollback", "(", ")", "raise", "ValidationError", "(", "errors", ")", "vocabulary", "=", "model_save", ".", "vocabulary_dict_save", "(", "data", ",", "context", ")", "if", "(", "not", "context", ".", "get", "(", "'defer_commit'", ")", ")", ":", "model", ".", "repo", ".", "commit", "(", ")", "log", ".", "debug", "(", "(", "'Created Vocabulary %s'", "%", "vocabulary", ".", "name", ")", ")", "return", "model_dictize", ".", "vocabulary_dictize", "(", "vocabulary", ",", "context", ")" ]
create a new tag vocabulary .
train
false
38,284
@not_implemented_for('directed') def biconnected_components(G): for comp in _biconnected_dfs(G, components=True): (yield set(chain.from_iterable(comp)))
[ "@", "not_implemented_for", "(", "'directed'", ")", "def", "biconnected_components", "(", "G", ")", ":", "for", "comp", "in", "_biconnected_dfs", "(", "G", ",", "components", "=", "True", ")", ":", "(", "yield", "set", "(", "chain", ".", "from_iterable", "(", "comp", ")", ")", ")" ]
return a generator of sets of nodes .
train
false
38,285
def normalize_series_name(name): name = name.lower() name = name.replace(u'&amp;', u' and ') name = name.translate(TRANSLATE_MAP) name = u' '.join(name.split()) return name
[ "def", "normalize_series_name", "(", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "name", "=", "name", ".", "replace", "(", "u'&amp;'", ",", "u' and '", ")", "name", "=", "name", ".", "translate", "(", "TRANSLATE_MAP", ")", "name", "=", "u' '", ".", "join", "(", "name", ".", "split", "(", ")", ")", "return", "name" ]
returns a normalized version of the series name .
train
false
38,286
def install_as_gi(): import sys if ('gi.repository' in const.PREFIX): return for mod in iterkeys(sys.modules): if ((mod == 'gi') or mod.startswith('gi.')): raise AssertionError('pgi has to be imported before gi') import pgi import pgi.repository sys.modules['gi'] = pgi sys.modules['gi.repository'] = pgi.repository const.PREFIX.append('gi.repository')
[ "def", "install_as_gi", "(", ")", ":", "import", "sys", "if", "(", "'gi.repository'", "in", "const", ".", "PREFIX", ")", ":", "return", "for", "mod", "in", "iterkeys", "(", "sys", ".", "modules", ")", ":", "if", "(", "(", "mod", "==", "'gi'", ")", "or", "mod", ".", "startswith", "(", "'gi.'", ")", ")", ":", "raise", "AssertionError", "(", "'pgi has to be imported before gi'", ")", "import", "pgi", "import", "pgi", ".", "repository", "sys", ".", "modules", "[", "'gi'", "]", "=", "pgi", "sys", ".", "modules", "[", "'gi.repository'", "]", "=", "pgi", ".", "repository", "const", ".", "PREFIX", ".", "append", "(", "'gi.repository'", ")" ]
call before the first gi import to redirect gi imports to pgi .
train
true
38,287
def _version_from_file(lines): is_version_line = (lambda line: line.lower().startswith('version:')) version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') (_, _, value) = line.partition(':') return (safe_version(value.strip()) or None)
[ "def", "_version_from_file", "(", "lines", ")", ":", "is_version_line", "=", "(", "lambda", "line", ":", "line", ".", "lower", "(", ")", ".", "startswith", "(", "'version:'", ")", ")", "version_lines", "=", "filter", "(", "is_version_line", ",", "lines", ")", "line", "=", "next", "(", "iter", "(", "version_lines", ")", ",", "''", ")", "(", "_", ",", "_", ",", "value", ")", "=", "line", ".", "partition", "(", "':'", ")", "return", "(", "safe_version", "(", "value", ".", "strip", "(", ")", ")", "or", "None", ")" ]
given an iterable of lines from a metadata file .
train
true
38,289
@pytest.mark.skipif(no_fsl(), reason=u'fsl is not installed') def test_fast_list_outputs(setup_infile): def _run_and_test(opts, output_base): outputs = fsl.FAST(**opts)._list_outputs() for output in outputs.values(): if output: for filename in filename_to_list(output): assert os.path.realpath(filename).startswith(os.path.realpath(output_base)) (tmp_infile, indir) = setup_infile cwd = tempfile.mkdtemp() os.chdir(cwd) assert (indir != cwd) out_basename = u'a_basename' opts = {u'in_files': tmp_infile} (input_path, input_filename, input_ext) = split_filename(tmp_infile) _run_and_test(opts, os.path.join(input_path, input_filename)) opts[u'out_basename'] = out_basename _run_and_test(opts, os.path.join(cwd, out_basename))
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "no_fsl", "(", ")", ",", "reason", "=", "u'fsl is not installed'", ")", "def", "test_fast_list_outputs", "(", "setup_infile", ")", ":", "def", "_run_and_test", "(", "opts", ",", "output_base", ")", ":", "outputs", "=", "fsl", ".", "FAST", "(", "**", "opts", ")", ".", "_list_outputs", "(", ")", "for", "output", "in", "outputs", ".", "values", "(", ")", ":", "if", "output", ":", "for", "filename", "in", "filename_to_list", "(", "output", ")", ":", "assert", "os", ".", "path", ".", "realpath", "(", "filename", ")", ".", "startswith", "(", "os", ".", "path", ".", "realpath", "(", "output_base", ")", ")", "(", "tmp_infile", ",", "indir", ")", "=", "setup_infile", "cwd", "=", "tempfile", ".", "mkdtemp", "(", ")", "os", ".", "chdir", "(", "cwd", ")", "assert", "(", "indir", "!=", "cwd", ")", "out_basename", "=", "u'a_basename'", "opts", "=", "{", "u'in_files'", ":", "tmp_infile", "}", "(", "input_path", ",", "input_filename", ",", "input_ext", ")", "=", "split_filename", "(", "tmp_infile", ")", "_run_and_test", "(", "opts", ",", "os", ".", "path", ".", "join", "(", "input_path", ",", "input_filename", ")", ")", "opts", "[", "u'out_basename'", "]", "=", "out_basename", "_run_and_test", "(", "opts", ",", "os", ".", "path", ".", "join", "(", "cwd", ",", "out_basename", ")", ")" ]
by default .
train
false
38,290
def ishashable(x): try: hash(x) return True except TypeError: return False
[ "def", "ishashable", "(", "x", ")", ":", "try", ":", "hash", "(", "x", ")", "return", "True", "except", "TypeError", ":", "return", "False" ]
is x hashable? examples .
train
false
38,292
def set_user_lang(user, user_language=None): from frappe.translate import get_user_lang local.lang = get_user_lang(user)
[ "def", "set_user_lang", "(", "user", ",", "user_language", "=", "None", ")", ":", "from", "frappe", ".", "translate", "import", "get_user_lang", "local", ".", "lang", "=", "get_user_lang", "(", "user", ")" ]
guess and set user language for the session .
train
false
38,293
def length_of_indexer(indexer, target=None): if ((target is not None) and isinstance(indexer, slice)): l = len(target) start = indexer.start stop = indexer.stop step = indexer.step if (start is None): start = 0 elif (start < 0): start += l if ((stop is None) or (stop > l)): stop = l elif (stop < 0): stop += l if (step is None): step = 1 elif (step < 0): step = (- step) return ((((stop - start) + step) - 1) // step) elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)): return len(indexer) elif (not is_list_like_indexer(indexer)): return 1 raise AssertionError('cannot find the length of the indexer')
[ "def", "length_of_indexer", "(", "indexer", ",", "target", "=", "None", ")", ":", "if", "(", "(", "target", "is", "not", "None", ")", "and", "isinstance", "(", "indexer", ",", "slice", ")", ")", ":", "l", "=", "len", "(", "target", ")", "start", "=", "indexer", ".", "start", "stop", "=", "indexer", ".", "stop", "step", "=", "indexer", ".", "step", "if", "(", "start", "is", "None", ")", ":", "start", "=", "0", "elif", "(", "start", "<", "0", ")", ":", "start", "+=", "l", "if", "(", "(", "stop", "is", "None", ")", "or", "(", "stop", ">", "l", ")", ")", ":", "stop", "=", "l", "elif", "(", "stop", "<", "0", ")", ":", "stop", "+=", "l", "if", "(", "step", "is", "None", ")", ":", "step", "=", "1", "elif", "(", "step", "<", "0", ")", ":", "step", "=", "(", "-", "step", ")", "return", "(", "(", "(", "(", "stop", "-", "start", ")", "+", "step", ")", "-", "1", ")", "//", "step", ")", "elif", "isinstance", "(", "indexer", ",", "(", "ABCSeries", ",", "Index", ",", "np", ".", "ndarray", ",", "list", ")", ")", ":", "return", "len", "(", "indexer", ")", "elif", "(", "not", "is_list_like_indexer", "(", "indexer", ")", ")", ":", "return", "1", "raise", "AssertionError", "(", "'cannot find the length of the indexer'", ")" ]
return the length of a single non-tuple indexer which could be a slice .
train
true
38,297
def replace(s, old, new, maxsplit=0): return s.replace(old, new, maxsplit)
[ "def", "replace", "(", "s", ",", "old", ",", "new", ",", "maxsplit", "=", "0", ")", ":", "return", "s", ".", "replace", "(", "old", ",", "new", ",", "maxsplit", ")" ]
return x .
train
false
38,298
def is_running(proxyname): return {'result': _is_proxy_running(proxyname)}
[ "def", "is_running", "(", "proxyname", ")", ":", "return", "{", "'result'", ":", "_is_proxy_running", "(", "proxyname", ")", "}" ]
return true if an inspected container is in a state we consider "running .
train
false
38,299
def output_file(filename, title='Bokeh Plot', mode='cdn', root_dir=None): _state.output_file(filename, title=title, mode=mode, root_dir=root_dir)
[ "def", "output_file", "(", "filename", ",", "title", "=", "'Bokeh Plot'", ",", "mode", "=", "'cdn'", ",", "root_dir", "=", "None", ")", ":", "_state", ".", "output_file", "(", "filename", ",", "title", "=", "title", ",", "mode", "=", "mode", ",", "root_dir", "=", "root_dir", ")" ]
configure the default output state to generate output saved to a file when :func:show is called .
train
false
38,300
def tiny2zero(x, eps=1e-15): mask = (np.abs(x.copy()) < eps) x[mask] = 0 return x
[ "def", "tiny2zero", "(", "x", ",", "eps", "=", "1e-15", ")", ":", "mask", "=", "(", "np", ".", "abs", "(", "x", ".", "copy", "(", ")", ")", "<", "eps", ")", "x", "[", "mask", "]", "=", "0", "return", "x" ]
replace abs values smaller than eps by zero .
train
false
38,301
@contextlib.contextmanager def override_config(name, value): old_value = getattr(config, name) setattr(config, name, value) try: (yield) finally: setattr(config, name, old_value)
[ "@", "contextlib", ".", "contextmanager", "def", "override_config", "(", "name", ",", "value", ")", ":", "old_value", "=", "getattr", "(", "config", ",", "name", ")", "setattr", "(", "config", ",", "name", ",", "value", ")", "try", ":", "(", "yield", ")", "finally", ":", "setattr", "(", "config", ",", "name", ",", "old_value", ")" ]
return a context manager that temporarily sets numba config variable *name* to *value* .
train
false
38,304
@image_comparison(baseline_images=[u'EventCollection_plot__extend_positions']) def test__EventCollection__extend_positions(): (splt, coll, props) = generate_EventCollection_plot() new_positions = np.hstack([props[u'positions'], props[u'extra_positions'][1:]]) coll.extend_positions(props[u'extra_positions'][1:]) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props[u'linelength'], props[u'lineoffset'], props[u'orientation']) splt.set_title(u'EventCollection: extend_positions') splt.set_xlim((-1), 90)
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'EventCollection_plot__extend_positions'", "]", ")", "def", "test__EventCollection__extend_positions", "(", ")", ":", "(", "splt", ",", "coll", ",", "props", ")", "=", "generate_EventCollection_plot", "(", ")", "new_positions", "=", "np", ".", "hstack", "(", "[", "props", "[", "u'positions'", "]", ",", "props", "[", "u'extra_positions'", "]", "[", "1", ":", "]", "]", ")", "coll", ".", "extend_positions", "(", "props", "[", "u'extra_positions'", "]", "[", "1", ":", "]", ")", "np", ".", "testing", ".", "assert_array_equal", "(", "new_positions", ",", "coll", ".", "get_positions", "(", ")", ")", "check_segments", "(", "coll", ",", "new_positions", ",", "props", "[", "u'linelength'", "]", ",", "props", "[", "u'lineoffset'", "]", ",", "props", "[", "u'orientation'", "]", ")", "splt", ".", "set_title", "(", "u'EventCollection: extend_positions'", ")", "splt", ".", "set_xlim", "(", "(", "-", "1", ")", ",", "90", ")" ]
check to make sure extend_positions works properly .
train
false
38,305
def test_feature_representation_without_colors(): feature_file = ojoin('..', 'simple_features', '1st_feature_dir', 'some.feature') feature = Feature.from_file(feature_file) assert_lines(feature.represented(), 'Feature: Addition # tests/functional/simple_features/1st_feature_dir/some.feature:5\n In order to avoid silly mistakes # tests/functional/simple_features/1st_feature_dir/some.feature:6\n As a math idiot # tests/functional/simple_features/1st_feature_dir/some.feature:7\n I want to be told the sum of two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:8\n')
[ "def", "test_feature_representation_without_colors", "(", ")", ":", "feature_file", "=", "ojoin", "(", "'..'", ",", "'simple_features'", ",", "'1st_feature_dir'", ",", "'some.feature'", ")", "feature", "=", "Feature", ".", "from_file", "(", "feature_file", ")", "assert_lines", "(", "feature", ".", "represented", "(", ")", ",", "'Feature: Addition # tests/functional/simple_features/1st_feature_dir/some.feature:5\\n In order to avoid silly mistakes # tests/functional/simple_features/1st_feature_dir/some.feature:6\\n As a math idiot # tests/functional/simple_features/1st_feature_dir/some.feature:7\\n I want to be told the sum of two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:8\\n'", ")" ]
feature represented without colors .
train
false
38,306
def buildSubsamplingNetwork(): n = FeedForwardNetwork() n.addInputModule(LinearLayer(6, 'in')) n.addOutputModule(LinearLayer(1, 'out')) n.addConnection(SubsamplingConnection(n['in'], n['out'], inSliceTo=4)) n.addConnection(SubsamplingConnection(n['in'], n['out'], inSliceFrom=4)) n.sortModules() return n
[ "def", "buildSubsamplingNetwork", "(", ")", ":", "n", "=", "FeedForwardNetwork", "(", ")", "n", ".", "addInputModule", "(", "LinearLayer", "(", "6", ",", "'in'", ")", ")", "n", ".", "addOutputModule", "(", "LinearLayer", "(", "1", ",", "'out'", ")", ")", "n", ".", "addConnection", "(", "SubsamplingConnection", "(", "n", "[", "'in'", "]", ",", "n", "[", "'out'", "]", ",", "inSliceTo", "=", "4", ")", ")", "n", ".", "addConnection", "(", "SubsamplingConnection", "(", "n", "[", "'in'", "]", ",", "n", "[", "'out'", "]", ",", "inSliceFrom", "=", "4", ")", ")", "n", ".", "sortModules", "(", ")", "return", "n" ]
builds a network with subsampling connections .
train
false
38,307
@gen.coroutine def _Init(init_db=True, server_logging=True): httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient', max_clients=100) if options.options.devbox: metadata = ami_metadata.Metadata() else: metadata = (yield gen.Task(ami_metadata.Metadata)) if (metadata is None): raise Exception('failed to fetch AWS instance metadata; if running on dev box, use the --devbox option') ami_metadata.SetAMIMetadata(metadata) logging.info('AMI metadata initialized') ServerEnvironment.InitServerEnvironment() logging.info('server environment initialized') (yield gen.Task(secrets.InitSecrets, can_prompt=sys.stderr.isatty())) logging.info('secrets initialized') if init_db: (yield gen.Task(db_client.InitDB, vf_schema.SCHEMA)) logging.info('DB client initialized') object_store.InitObjectStore(temporary=False) logging.info('object store initialized') if server_logging: server_log.InitServerLog() logging.info('main.py initialization complete')
[ "@", "gen", ".", "coroutine", "def", "_Init", "(", "init_db", "=", "True", ",", "server_logging", "=", "True", ")", ":", "httpclient", ".", "AsyncHTTPClient", ".", "configure", "(", "'tornado.curl_httpclient.CurlAsyncHTTPClient'", ",", "max_clients", "=", "100", ")", "if", "options", ".", "options", ".", "devbox", ":", "metadata", "=", "ami_metadata", ".", "Metadata", "(", ")", "else", ":", "metadata", "=", "(", "yield", "gen", ".", "Task", "(", "ami_metadata", ".", "Metadata", ")", ")", "if", "(", "metadata", "is", "None", ")", ":", "raise", "Exception", "(", "'failed to fetch AWS instance metadata; if running on dev box, use the --devbox option'", ")", "ami_metadata", ".", "SetAMIMetadata", "(", "metadata", ")", "logging", ".", "info", "(", "'AMI metadata initialized'", ")", "ServerEnvironment", ".", "InitServerEnvironment", "(", ")", "logging", ".", "info", "(", "'server environment initialized'", ")", "(", "yield", "gen", ".", "Task", "(", "secrets", ".", "InitSecrets", ",", "can_prompt", "=", "sys", ".", "stderr", ".", "isatty", "(", ")", ")", ")", "logging", ".", "info", "(", "'secrets initialized'", ")", "if", "init_db", ":", "(", "yield", "gen", ".", "Task", "(", "db_client", ".", "InitDB", ",", "vf_schema", ".", "SCHEMA", ")", ")", "logging", ".", "info", "(", "'DB client initialized'", ")", "object_store", ".", "InitObjectStore", "(", "temporary", "=", "False", ")", "logging", ".", "info", "(", "'object store initialized'", ")", "if", "server_logging", ":", "server_log", ".", "InitServerLog", "(", ")", "logging", ".", "info", "(", "'main.py initialization complete'", ")" ]
completes viewfinder initialization .
train
false
38,312
def rpm_rebuilddb(): module.run_command(['/usr/bin/rpm', '--rebuilddb'])
[ "def", "rpm_rebuilddb", "(", ")", ":", "module", ".", "run_command", "(", "[", "'/usr/bin/rpm'", ",", "'--rebuilddb'", "]", ")" ]
runs rpm --rebuilddb to ensure the db is in good shape .
train
false
38,313
def subscribe_to_messages(observer_function): all_output_plugins = om.manager.get_output_plugin_inst() for plugin_inst in all_output_plugins: if isinstance(plugin_inst, GtkOutput): plugin_inst.subscribe(observer_function) break else: gtk_output = GtkOutput() om.manager.set_output_plugin_inst(gtk_output) gtk_output.subscribe(observer_function)
[ "def", "subscribe_to_messages", "(", "observer_function", ")", ":", "all_output_plugins", "=", "om", ".", "manager", ".", "get_output_plugin_inst", "(", ")", "for", "plugin_inst", "in", "all_output_plugins", ":", "if", "isinstance", "(", "plugin_inst", ",", "GtkOutput", ")", ":", "plugin_inst", ".", "subscribe", "(", "observer_function", ")", "break", "else", ":", "gtk_output", "=", "GtkOutput", "(", ")", "om", ".", "manager", ".", "set_output_plugin_inst", "(", "gtk_output", ")", "gtk_output", ".", "subscribe", "(", "observer_function", ")" ]
subscribe observer_function to the gtkoutput messages .
train
false
38,314
def test_language_portuguese(): lang = Language('pt-br') assert_equals(lang.code, u'pt-br') assert_equals(lang.name, u'Portuguese') assert_equals(lang.native, u'Portugu\xeas') assert_equals(lang.feature, u'Funcionalidade') assert_equals(lang.scenario, u'Cen\xe1rio|Cenario') assert_equals(lang.examples, u'Exemplos|Cen\xe1rios') assert_equals(lang.scenario_outline, u'Esquema do Cen\xe1rio|Esquema do Cenario')
[ "def", "test_language_portuguese", "(", ")", ":", "lang", "=", "Language", "(", "'pt-br'", ")", "assert_equals", "(", "lang", ".", "code", ",", "u'pt-br'", ")", "assert_equals", "(", "lang", ".", "name", ",", "u'Portuguese'", ")", "assert_equals", "(", "lang", ".", "native", ",", "u'Portugu\\xeas'", ")", "assert_equals", "(", "lang", ".", "feature", ",", "u'Funcionalidade'", ")", "assert_equals", "(", "lang", ".", "scenario", ",", "u'Cen\\xe1rio|Cenario'", ")", "assert_equals", "(", "lang", ".", "examples", ",", "u'Exemplos|Cen\\xe1rios'", ")", "assert_equals", "(", "lang", ".", "scenario_outline", ",", "u'Esquema do Cen\\xe1rio|Esquema do Cenario'", ")" ]
language: pt-br -> language class supports portuguese through code "pt-br" .
train
false
38,315
def get_swap_size(vm_): return config.get_cloud_config_value('swap', vm_, __opts__, default=128)
[ "def", "get_swap_size", "(", "vm_", ")", ":", "return", "config", ".", "get_cloud_config_value", "(", "'swap'", ",", "vm_", ",", "__opts__", ",", "default", "=", "128", ")" ]
returns the amoutn of swap space to be used in mb .
train
false
38,316
def _centos7_install_commands(version): installable_version = get_installable_version(flocker_version) return sequence([run(command='yum clean all'), run(command='yum install -y {}'.format(get_repository_url(distribution='centos-7', flocker_version=installable_version))), run_from_args(((['yum', 'install'] + get_repo_options(installable_version)) + ['-y', ('clusterhq-flocker-node' + version)]))])
[ "def", "_centos7_install_commands", "(", "version", ")", ":", "installable_version", "=", "get_installable_version", "(", "flocker_version", ")", "return", "sequence", "(", "[", "run", "(", "command", "=", "'yum clean all'", ")", ",", "run", "(", "command", "=", "'yum install -y {}'", ".", "format", "(", "get_repository_url", "(", "distribution", "=", "'centos-7'", ",", "flocker_version", "=", "installable_version", ")", ")", ")", ",", "run_from_args", "(", "(", "(", "[", "'yum'", ",", "'install'", "]", "+", "get_repo_options", "(", "installable_version", ")", ")", "+", "[", "'-y'", ",", "(", "'clusterhq-flocker-node'", "+", "version", ")", "]", ")", ")", "]", ")" ]
construct the command sequence expected for installing flocker on centos 7 .
train
false
38,317
def for_int_dtypes(name='dtype', no_bool=False): if no_bool: return for_dtypes(_int_dtypes, name=name) else: return for_dtypes(_int_bool_dtypes, name=name)
[ "def", "for_int_dtypes", "(", "name", "=", "'dtype'", ",", "no_bool", "=", "False", ")", ":", "if", "no_bool", ":", "return", "for_dtypes", "(", "_int_dtypes", ",", "name", "=", "name", ")", "else", ":", "return", "for_dtypes", "(", "_int_bool_dtypes", ",", "name", "=", "name", ")" ]
decorator that checks the fixture with integer and optionally bool dtypes .
train
false
38,319
def aic(llf, nobs, df_modelwc): return (((-2.0) * llf) + (2.0 * df_modelwc))
[ "def", "aic", "(", "llf", ",", "nobs", ",", "df_modelwc", ")", ":", "return", "(", "(", "(", "-", "2.0", ")", "*", "llf", ")", "+", "(", "2.0", "*", "df_modelwc", ")", ")" ]
akaike information criterion parameters llf : float value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant returns aic : float information criterion references URL .
train
false
38,320
def _handle_voted_field(form_value, cc_content, api_content, request, context): signal = (thread_voted if (cc_content.type == 'thread') else comment_voted) signal.send(sender=None, user=context['request'].user, post=cc_content) if form_value: context['cc_requester'].vote(cc_content, 'up') api_content['vote_count'] += 1 else: context['cc_requester'].unvote(cc_content) api_content['vote_count'] -= 1 track_voted_event(request, context['course'], cc_content, vote_value='up', undo_vote=(False if form_value else True))
[ "def", "_handle_voted_field", "(", "form_value", ",", "cc_content", ",", "api_content", ",", "request", ",", "context", ")", ":", "signal", "=", "(", "thread_voted", "if", "(", "cc_content", ".", "type", "==", "'thread'", ")", "else", "comment_voted", ")", "signal", ".", "send", "(", "sender", "=", "None", ",", "user", "=", "context", "[", "'request'", "]", ".", "user", ",", "post", "=", "cc_content", ")", "if", "form_value", ":", "context", "[", "'cc_requester'", "]", ".", "vote", "(", "cc_content", ",", "'up'", ")", "api_content", "[", "'vote_count'", "]", "+=", "1", "else", ":", "context", "[", "'cc_requester'", "]", ".", "unvote", "(", "cc_content", ")", "api_content", "[", "'vote_count'", "]", "-=", "1", "track_voted_event", "(", "request", ",", "context", "[", "'course'", "]", ",", "cc_content", ",", "vote_value", "=", "'up'", ",", "undo_vote", "=", "(", "False", "if", "form_value", "else", "True", ")", ")" ]
vote or undo vote on thread/comment .
train
false