id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
5,799
def _ensure_unicode(text): if isinstance(text, bytes): text = text.decode(u'utf-8') return text
[ "def", "_ensure_unicode", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "bytes", ")", ":", "text", "=", "text", ".", "decode", "(", "u'utf-8'", ")", "return", "text" ]
return a unicode object for the given text .
train
false
5,801
def xml_name(p, keep_escape=False, encoding=None): if isinstance(p, unicode): pass elif isinstance(p, str): if (sabnzbd.DARWIN or (encoding == 'utf-8')): p = p.decode('utf-8', 'replace') elif gUTF: p = p.decode('utf-8', 'replace') else: p = p.decode(codepage, 'replace') else: p = str(p) if keep_escape: return p.encode('ascii', 'xmlcharrefreplace') else: return escape(p).encode('ascii', 'xmlcharrefreplace')
[ "def", "xml_name", "(", "p", ",", "keep_escape", "=", "False", ",", "encoding", "=", "None", ")", ":", "if", "isinstance", "(", "p", ",", "unicode", ")", ":", "pass", "elif", "isinstance", "(", "p", ",", "str", ")", ":", "if", "(", "sabnzbd", ".", "DARWIN", "or", "(", "encoding", "==", "'utf-8'", ")", ")", ":", "p", "=", "p", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", "elif", "gUTF", ":", "p", "=", "p", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", "else", ":", "p", "=", "p", ".", "decode", "(", "codepage", ",", "'replace'", ")", "else", ":", "p", "=", "str", "(", "p", ")", "if", "keep_escape", ":", "return", "p", ".", "encode", "(", "'ascii'", ",", "'xmlcharrefreplace'", ")", "else", ":", "return", "escape", "(", "p", ")", ".", "encode", "(", "'ascii'", ",", "'xmlcharrefreplace'", ")" ]
prepare name for use in html/xml contect .
train
false
5,802
def download_UCLUST(): status('Installing UCLUST...') if (sys.platform == 'darwin'): URL = 'http://www.drive5.com/uclust/uclustq1.2.22_i86darwin64' elif (sys.platform == 'linux2'): URL = 'http://www.drive5.com/uclust/uclustq1.2.22_i86linux64' else: status(('Platform %r not supported by UCLUST.\n' % sys.platform)) return return_value = download_file(URL, 'scripts/', 'uclust') if (not return_value): chmod('scripts/uclust', (stat('scripts/uclust').st_mode | S_IEXEC)) status('UCLUST installed.\n') else: status('UCLUST could not be installed.\n')
[ "def", "download_UCLUST", "(", ")", ":", "status", "(", "'Installing UCLUST...'", ")", "if", "(", "sys", ".", "platform", "==", "'darwin'", ")", ":", "URL", "=", "'http://www.drive5.com/uclust/uclustq1.2.22_i86darwin64'", "elif", "(", "sys", ".", "platform", "==", "'linux2'", ")", ":", "URL", "=", "'http://www.drive5.com/uclust/uclustq1.2.22_i86linux64'", "else", ":", "status", "(", "(", "'Platform %r not supported by UCLUST.\\n'", "%", "sys", ".", "platform", ")", ")", "return", "return_value", "=", "download_file", "(", "URL", ",", "'scripts/'", ",", "'uclust'", ")", "if", "(", "not", "return_value", ")", ":", "chmod", "(", "'scripts/uclust'", ",", "(", "stat", "(", "'scripts/uclust'", ")", ".", "st_mode", "|", "S_IEXEC", ")", ")", "status", "(", "'UCLUST installed.\\n'", ")", "else", ":", "status", "(", "'UCLUST could not be installed.\\n'", ")" ]
download the uclust executable and set it to the scripts directory .
train
false
5,803
def make_pad_parameters(curve_value, threshold_value): threshold_range = (MAX_THRESHOLD_STEP - MIN_THRESHOLD_STEP) t = (float((threshold_value - MIN_THRESHOLD_STEP)) / float(threshold_range)) return PadParameters(curve_value, on_threshold=int((((1 - t) * MIN_ON_THRESHOLD) + (t * MAX_ON_THRESHOLD))), off_threshold=int((((1 - t) * MIN_OFF_THRESHOLD) + (t * MAX_OFF_THRESHOLD))))
[ "def", "make_pad_parameters", "(", "curve_value", ",", "threshold_value", ")", ":", "threshold_range", "=", "(", "MAX_THRESHOLD_STEP", "-", "MIN_THRESHOLD_STEP", ")", "t", "=", "(", "float", "(", "(", "threshold_value", "-", "MIN_THRESHOLD_STEP", ")", ")", "/", "float", "(", "threshold_range", ")", ")", "return", "PadParameters", "(", "curve_value", ",", "on_threshold", "=", "int", "(", "(", "(", "(", "1", "-", "t", ")", "*", "MIN_ON_THRESHOLD", ")", "+", "(", "t", "*", "MAX_ON_THRESHOLD", ")", ")", ")", ",", "off_threshold", "=", "int", "(", "(", "(", "(", "1", "-", "t", ")", "*", "MIN_OFF_THRESHOLD", ")", "+", "(", "t", "*", "MAX_OFF_THRESHOLD", ")", ")", ")", ")" ]
creates a valid padparameters object merging the sensitivity curve and threshold settings .
train
false
5,804
def run_only_if_redis_is_available(func): try: import redis except ImportError: redis = None pred = (lambda : (redis is not None)) return run_only(func, pred)
[ "def", "run_only_if_redis_is_available", "(", "func", ")", ":", "try", ":", "import", "redis", "except", "ImportError", ":", "redis", "=", "None", "pred", "=", "(", "lambda", ":", "(", "redis", "is", "not", "None", ")", ")", "return", "run_only", "(", "func", ",", "pred", ")" ]
decorator for checking if python-redis is available .
train
false
5,805
def scan_update(item): path = str(item.path()) try: (title, desc, svg) = preview_parse(path) except SAXParseException as ex: log.error('%r is malformed (%r)', path, ex) item.setEnabled(False) item.setSelectable(False) return if (not svg): try: svg = scheme_svg_thumbnail(path) except Exception: log.error('Could not render scheme preview for %r', title, exc_info=True) if (item.name() != title): item.setName(title) if (item.description() != desc): item.setDescription(desc) if svg: item.setThumbnail(svg)
[ "def", "scan_update", "(", "item", ")", ":", "path", "=", "str", "(", "item", ".", "path", "(", ")", ")", "try", ":", "(", "title", ",", "desc", ",", "svg", ")", "=", "preview_parse", "(", "path", ")", "except", "SAXParseException", "as", "ex", ":", "log", ".", "error", "(", "'%r is malformed (%r)'", ",", "path", ",", "ex", ")", "item", ".", "setEnabled", "(", "False", ")", "item", ".", "setSelectable", "(", "False", ")", "return", "if", "(", "not", "svg", ")", ":", "try", ":", "svg", "=", "scheme_svg_thumbnail", "(", "path", ")", "except", "Exception", ":", "log", ".", "error", "(", "'Could not render scheme preview for %r'", ",", "title", ",", "exc_info", "=", "True", ")", "if", "(", "item", ".", "name", "(", ")", "!=", "title", ")", ":", "item", ".", "setName", "(", "title", ")", "if", "(", "item", ".", "description", "(", ")", "!=", "desc", ")", ":", "item", ".", "setDescription", "(", "desc", ")", "if", "svg", ":", "item", ".", "setThumbnail", "(", "svg", ")" ]
given a preview item .
train
false
5,807
def test_escaped_task_arg_split(): argstr = 'foo,bar\\,biz\\,baz,what comes after baz?' eq_(_escape_split(',', argstr), ['foo', 'bar,biz,baz', 'what comes after baz?'])
[ "def", "test_escaped_task_arg_split", "(", ")", ":", "argstr", "=", "'foo,bar\\\\,biz\\\\,baz,what comes after baz?'", "eq_", "(", "_escape_split", "(", "','", ",", "argstr", ")", ",", "[", "'foo'", ",", "'bar,biz,baz'", ",", "'what comes after baz?'", "]", ")" ]
allow backslashes to escape the task argument separator character .
train
false
5,809
def DumpClientYaml(client_urn, target_dir, token=None, overwrite=False): fd = aff4.FACTORY.Open(client_urn, aff4_grr.VFSGRRClient, token=token) dirpath = os.path.join(target_dir, fd.urn.Split()[0]) try: os.makedirs(dirpath) except OSError: pass filepath = os.path.join(dirpath, 'client_info.yaml') if ((not os.path.isfile(filepath)) or overwrite): with open(filepath, 'wb') as out_file: out_file.write(serialize.YamlDumper(fd))
[ "def", "DumpClientYaml", "(", "client_urn", ",", "target_dir", ",", "token", "=", "None", ",", "overwrite", "=", "False", ")", ":", "fd", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "client_urn", ",", "aff4_grr", ".", "VFSGRRClient", ",", "token", "=", "token", ")", "dirpath", "=", "os", ".", "path", ".", "join", "(", "target_dir", ",", "fd", ".", "urn", ".", "Split", "(", ")", "[", "0", "]", ")", "try", ":", "os", ".", "makedirs", "(", "dirpath", ")", "except", "OSError", ":", "pass", "filepath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "'client_info.yaml'", ")", "if", "(", "(", "not", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ")", "or", "overwrite", ")", ":", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "out_file", ":", "out_file", ".", "write", "(", "serialize", ".", "YamlDumper", "(", "fd", ")", ")" ]
dump a yaml file containing client info .
train
true
5,811
def splitdrive(p): return ('', p)
[ "def", "splitdrive", "(", "p", ")", ":", "return", "(", "''", ",", "p", ")" ]
split a pathname into drive/unc sharepoint and relative path specifiers .
train
false
5,812
def _engine_builder(con): global _SQLALCHEMY_INSTALLED if isinstance(con, string_types): try: import sqlalchemy except ImportError: _SQLALCHEMY_INSTALLED = False else: con = sqlalchemy.create_engine(con) return con return con
[ "def", "_engine_builder", "(", "con", ")", ":", "global", "_SQLALCHEMY_INSTALLED", "if", "isinstance", "(", "con", ",", "string_types", ")", ":", "try", ":", "import", "sqlalchemy", "except", "ImportError", ":", "_SQLALCHEMY_INSTALLED", "=", "False", "else", ":", "con", "=", "sqlalchemy", ".", "create_engine", "(", "con", ")", "return", "con", "return", "con" ]
returns a sqlalchemy engine from a uri else it just return con without modifying it .
train
true
5,813
def create_merge_tree(func, keys, token): level = 0 prev_width = len(keys) prev_keys = iter(keys) rv = {} while (prev_width > 1): width = tree_width(prev_width) groups = tree_groups(prev_width, width) keys = [(token, level, i) for i in range(width)] rv.update(((key, (func, list(take(num, prev_keys)))) for (num, key) in zip(groups, keys))) prev_width = width prev_keys = iter(keys) level += 1 return rv
[ "def", "create_merge_tree", "(", "func", ",", "keys", ",", "token", ")", ":", "level", "=", "0", "prev_width", "=", "len", "(", "keys", ")", "prev_keys", "=", "iter", "(", "keys", ")", "rv", "=", "{", "}", "while", "(", "prev_width", ">", "1", ")", ":", "width", "=", "tree_width", "(", "prev_width", ")", "groups", "=", "tree_groups", "(", "prev_width", ",", "width", ")", "keys", "=", "[", "(", "token", ",", "level", ",", "i", ")", "for", "i", "in", "range", "(", "width", ")", "]", "rv", ".", "update", "(", "(", "(", "key", ",", "(", "func", ",", "list", "(", "take", "(", "num", ",", "prev_keys", ")", ")", ")", ")", "for", "(", "num", ",", "key", ")", "in", "zip", "(", "groups", ",", "keys", ")", ")", ")", "prev_width", "=", "width", "prev_keys", "=", "iter", "(", "keys", ")", "level", "+=", "1", "return", "rv" ]
create a task tree that merges all the keys with a reduction function .
train
false
5,814
def set_servers(*servers): service_name = 'w32time' if (not __salt__['service.status'](service_name)): if (not __salt__['service.start'](service_name)): return False server_cmd = ['W32tm', '/config', '/syncfromflags:manual', '/manualpeerlist:{0}'.format(' '.join(servers))] reliable_cmd = ['W32tm', '/config', '/reliable:yes'] update_cmd = ['W32tm', '/config', '/update'] for cmd in (server_cmd, reliable_cmd, update_cmd): ret = __salt__['cmd.run'](cmd, python_shell=False) if ('command completed successfully' not in ret): return False __salt__['service.restart'](service_name) return True
[ "def", "set_servers", "(", "*", "servers", ")", ":", "service_name", "=", "'w32time'", "if", "(", "not", "__salt__", "[", "'service.status'", "]", "(", "service_name", ")", ")", ":", "if", "(", "not", "__salt__", "[", "'service.start'", "]", "(", "service_name", ")", ")", ":", "return", "False", "server_cmd", "=", "[", "'W32tm'", ",", "'/config'", ",", "'/syncfromflags:manual'", ",", "'/manualpeerlist:{0}'", ".", "format", "(", "' '", ".", "join", "(", "servers", ")", ")", "]", "reliable_cmd", "=", "[", "'W32tm'", ",", "'/config'", ",", "'/reliable:yes'", "]", "update_cmd", "=", "[", "'W32tm'", ",", "'/config'", ",", "'/update'", "]", "for", "cmd", "in", "(", "server_cmd", ",", "reliable_cmd", ",", "update_cmd", ")", ":", "ret", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "(", "'command completed successfully'", "not", "in", "ret", ")", ":", "return", "False", "__salt__", "[", "'service.restart'", "]", "(", "service_name", ")", "return", "True" ]
configures a list of ntp servers on the device .
train
false
5,815
def get_config(config_file, repo, ctx, dir): config_file = basic_util.strip_path(config_file) for changeset in reversed_upper_bounded_changelog(repo, ctx): changeset_ctx = repo.changectx(changeset) for ctx_file in changeset_ctx.files(): ctx_file_name = basic_util.strip_path(ctx_file) if (ctx_file_name == config_file): return get_named_tmpfile_from_ctx(changeset_ctx, ctx_file, dir) return None
[ "def", "get_config", "(", "config_file", ",", "repo", ",", "ctx", ",", "dir", ")", ":", "config_file", "=", "basic_util", ".", "strip_path", "(", "config_file", ")", "for", "changeset", "in", "reversed_upper_bounded_changelog", "(", "repo", ",", "ctx", ")", ":", "changeset_ctx", "=", "repo", ".", "changectx", "(", "changeset", ")", "for", "ctx_file", "in", "changeset_ctx", ".", "files", "(", ")", ":", "ctx_file_name", "=", "basic_util", ".", "strip_path", "(", "ctx_file", ")", "if", "(", "ctx_file_name", "==", "config_file", ")", ":", "return", "get_named_tmpfile_from_ctx", "(", "changeset_ctx", ",", "ctx_file", ",", "dir", ")", "return", "None" ]
load configuration from ini .
train
false
5,817
def get_student_from_identifier(unique_student_identifier): unique_student_identifier = strip_if_string(unique_student_identifier) if ('@' in unique_student_identifier): student = User.objects.get(email=unique_student_identifier) else: student = User.objects.get(username=unique_student_identifier) return student
[ "def", "get_student_from_identifier", "(", "unique_student_identifier", ")", ":", "unique_student_identifier", "=", "strip_if_string", "(", "unique_student_identifier", ")", "if", "(", "'@'", "in", "unique_student_identifier", ")", ":", "student", "=", "User", ".", "objects", ".", "get", "(", "email", "=", "unique_student_identifier", ")", "else", ":", "student", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "unique_student_identifier", ")", "return", "student" ]
gets a student object using either an email address or username .
train
false
5,818
def Rep(re): result = Opt(Rep1(re)) result.str = ('Rep(%s)' % re) return result
[ "def", "Rep", "(", "re", ")", ":", "result", "=", "Opt", "(", "Rep1", "(", "re", ")", ")", "result", ".", "str", "=", "(", "'Rep(%s)'", "%", "re", ")", "return", "result" ]
rep is an re which matches zero or more repetitions of |re| .
train
false
5,819
def get_message_date(content, header='Date'): message = MailParser().parsestr(content, True) dateheader = message.get(header) datetuple = email.utils.parsedate_tz(dateheader) if (datetuple is None): return None return email.utils.mktime_tz(datetuple)
[ "def", "get_message_date", "(", "content", ",", "header", "=", "'Date'", ")", ":", "message", "=", "MailParser", "(", ")", ".", "parsestr", "(", "content", ",", "True", ")", "dateheader", "=", "message", ".", "get", "(", "header", ")", "datetuple", "=", "email", ".", "utils", ".", "parsedate_tz", "(", "dateheader", ")", "if", "(", "datetuple", "is", "None", ")", ":", "return", "None", "return", "email", ".", "utils", ".", "mktime_tz", "(", "datetuple", ")" ]
parses mail and returns resulting timestamp .
train
false
5,820
@as_op(itypes=[tt.lscalar, tt.dscalar, tt.dscalar], otypes=[tt.dvector]) def rateFunc(switchpoint, early_mean, late_mean): out = empty(years) out[:switchpoint] = early_mean out[switchpoint:] = late_mean return out
[ "@", "as_op", "(", "itypes", "=", "[", "tt", ".", "lscalar", ",", "tt", ".", "dscalar", ",", "tt", ".", "dscalar", "]", ",", "otypes", "=", "[", "tt", ".", "dvector", "]", ")", "def", "rateFunc", "(", "switchpoint", ",", "early_mean", ",", "late_mean", ")", ":", "out", "=", "empty", "(", "years", ")", "out", "[", ":", "switchpoint", "]", "=", "early_mean", "out", "[", "switchpoint", ":", "]", "=", "late_mean", "return", "out" ]
concatenate poisson means .
train
false
5,821
def find_contours(array, level, fully_connected='low', positive_orientation='low'): array = np.asarray(array, dtype=np.double) if (array.ndim != 2): raise ValueError('Only 2D arrays are supported.') level = float(level) if ((fully_connected not in _param_options) or (positive_orientation not in _param_options)): raise ValueError('Parameters "fully_connected" and "positive_orientation" must be either "high" or "low".') point_list = _find_contours_cy.iterate_and_store(array, level, (fully_connected == 'high')) contours = _assemble_contours(_take_2(point_list)) if (positive_orientation == 'high'): contours = [c[::(-1)] for c in contours] return contours
[ "def", "find_contours", "(", "array", ",", "level", ",", "fully_connected", "=", "'low'", ",", "positive_orientation", "=", "'low'", ")", ":", "array", "=", "np", ".", "asarray", "(", "array", ",", "dtype", "=", "np", ".", "double", ")", "if", "(", "array", ".", "ndim", "!=", "2", ")", ":", "raise", "ValueError", "(", "'Only 2D arrays are supported.'", ")", "level", "=", "float", "(", "level", ")", "if", "(", "(", "fully_connected", "not", "in", "_param_options", ")", "or", "(", "positive_orientation", "not", "in", "_param_options", ")", ")", ":", "raise", "ValueError", "(", "'Parameters \"fully_connected\" and \"positive_orientation\" must be either \"high\" or \"low\".'", ")", "point_list", "=", "_find_contours_cy", ".", "iterate_and_store", "(", "array", ",", "level", ",", "(", "fully_connected", "==", "'high'", ")", ")", "contours", "=", "_assemble_contours", "(", "_take_2", "(", "point_list", ")", ")", "if", "(", "positive_orientation", "==", "'high'", ")", ":", "contours", "=", "[", "c", "[", ":", ":", "(", "-", "1", ")", "]", "for", "c", "in", "contours", "]", "return", "contours" ]
find iso-valued contours in a 2d array for a given level value .
train
true
5,823
def publish_cmdline_to_binary(reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=True, argv=None, usage=default_usage, description=default_description, destination=None, destination_class=io.BinaryFileOutput): pub = Publisher(reader, parser, writer, settings=settings, destination_class=destination_class) pub.set_components(reader_name, parser_name, writer_name) output = pub.publish(argv, usage, description, settings_spec, settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return output
[ "def", "publish_cmdline_to_binary", "(", "reader", "=", "None", ",", "reader_name", "=", "'standalone'", ",", "parser", "=", "None", ",", "parser_name", "=", "'restructuredtext'", ",", "writer", "=", "None", ",", "writer_name", "=", "'pseudoxml'", ",", "settings", "=", "None", ",", "settings_spec", "=", "None", ",", "settings_overrides", "=", "None", ",", "config_section", "=", "None", ",", "enable_exit_status", "=", "True", ",", "argv", "=", "None", ",", "usage", "=", "default_usage", ",", "description", "=", "default_description", ",", "destination", "=", "None", ",", "destination_class", "=", "io", ".", "BinaryFileOutput", ")", ":", "pub", "=", "Publisher", "(", "reader", ",", "parser", ",", "writer", ",", "settings", "=", "settings", ",", "destination_class", "=", "destination_class", ")", "pub", ".", "set_components", "(", "reader_name", ",", "parser_name", ",", "writer_name", ")", "output", "=", "pub", ".", "publish", "(", "argv", ",", "usage", ",", "description", ",", "settings_spec", ",", "settings_overrides", ",", "config_section", "=", "config_section", ",", "enable_exit_status", "=", "enable_exit_status", ")", "return", "output" ]
set up & run a publisher for command-line-based file i/o .
train
false
5,824
def getTokenEnd(characterIndex, fileText, token): tokenIndex = fileText.find(token, characterIndex) if (tokenIndex == (-1)): return (-1) return (tokenIndex + len(token))
[ "def", "getTokenEnd", "(", "characterIndex", ",", "fileText", ",", "token", ")", ":", "tokenIndex", "=", "fileText", ".", "find", "(", "token", ",", "characterIndex", ")", "if", "(", "tokenIndex", "==", "(", "-", "1", ")", ")", ":", "return", "(", "-", "1", ")", "return", "(", "tokenIndex", "+", "len", "(", "token", ")", ")" ]
get the token end index for the file text and token .
train
false
5,825
def strip_filenames(descriptor): print 'strip filename from {desc}'.format(desc=descriptor.location.to_deprecated_string()) if descriptor._field_data.has(descriptor, 'filename'): descriptor._field_data.delete(descriptor, 'filename') if hasattr(descriptor, 'xml_attributes'): if ('filename' in descriptor.xml_attributes): del descriptor.xml_attributes['filename'] for child in descriptor.get_children(): strip_filenames(child) descriptor.save()
[ "def", "strip_filenames", "(", "descriptor", ")", ":", "print", "'strip filename from {desc}'", ".", "format", "(", "desc", "=", "descriptor", ".", "location", ".", "to_deprecated_string", "(", ")", ")", "if", "descriptor", ".", "_field_data", ".", "has", "(", "descriptor", ",", "'filename'", ")", ":", "descriptor", ".", "_field_data", ".", "delete", "(", "descriptor", ",", "'filename'", ")", "if", "hasattr", "(", "descriptor", ",", "'xml_attributes'", ")", ":", "if", "(", "'filename'", "in", "descriptor", ".", "xml_attributes", ")", ":", "del", "descriptor", ".", "xml_attributes", "[", "'filename'", "]", "for", "child", "in", "descriptor", ".", "get_children", "(", ")", ":", "strip_filenames", "(", "child", ")", "descriptor", ".", "save", "(", ")" ]
recursively strips filename from all childrens definitions .
train
false
5,827
def cert_base_path(cacert_path=None): if (not cacert_path): cacert_path = __context__.get('ca.contextual_cert_base_path', __salt__['config.option']('ca.contextual_cert_base_path')) if (not cacert_path): cacert_path = __context__.get('ca.cert_base_path', __salt__['config.option']('ca.cert_base_path')) return cacert_path
[ "def", "cert_base_path", "(", "cacert_path", "=", "None", ")", ":", "if", "(", "not", "cacert_path", ")", ":", "cacert_path", "=", "__context__", ".", "get", "(", "'ca.contextual_cert_base_path'", ",", "__salt__", "[", "'config.option'", "]", "(", "'ca.contextual_cert_base_path'", ")", ")", "if", "(", "not", "cacert_path", ")", ":", "cacert_path", "=", "__context__", ".", "get", "(", "'ca.cert_base_path'", ",", "__salt__", "[", "'config.option'", "]", "(", "'ca.cert_base_path'", ")", ")", "return", "cacert_path" ]
return the base path for certs from cli or from options cacert_path absolute path to ca certificates root directory cli example: .
train
true
5,828
def get_expected_validation_developer_message(preference_key, preference_value): return u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(preference_key=preference_key, preference_value=preference_value, error={'key': [u'Ensure this value has at most 255 characters (it has 256).']})
[ "def", "get_expected_validation_developer_message", "(", "preference_key", ",", "preference_value", ")", ":", "return", "u\"Value '{preference_value}' not valid for preference '{preference_key}': {error}\"", ".", "format", "(", "preference_key", "=", "preference_key", ",", "preference_value", "=", "preference_value", ",", "error", "=", "{", "'key'", ":", "[", "u'Ensure this value has at most 255 characters (it has 256).'", "]", "}", ")" ]
returns the expected dict of validation messages for the specified key .
train
false
5,829
def raisingResolverFactory(*args, **kwargs): raise ResolverFactoryArguments(args, kwargs)
[ "def", "raisingResolverFactory", "(", "*", "args", ",", "**", "kwargs", ")", ":", "raise", "ResolverFactoryArguments", "(", "args", ",", "kwargs", ")" ]
raise a l{resolverfactoryarguments} exception containing the positional and keyword arguments passed to resolverfactory .
train
false
5,831
def handle_ctrl_c(): oldhook = sys.excepthook def newhook(exctype, value, traceback): if (exctype == KeyboardInterrupt): log('\nInterrupted.\n') else: return oldhook(exctype, value, traceback) sys.excepthook = newhook
[ "def", "handle_ctrl_c", "(", ")", ":", "oldhook", "=", "sys", ".", "excepthook", "def", "newhook", "(", "exctype", ",", "value", ",", "traceback", ")", ":", "if", "(", "exctype", "==", "KeyboardInterrupt", ")", ":", "log", "(", "'\\nInterrupted.\\n'", ")", "else", ":", "return", "oldhook", "(", "exctype", ",", "value", ",", "traceback", ")", "sys", ".", "excepthook", "=", "newhook" ]
replace the default exception handler for keyboardinterrupt .
train
false
5,833
def kruskal_MST(gr): sorted_edges = sorted(gr.get_edge_weights()) uf = UnionFind() min_cost = 0 for (w, (u, v)) in sorted_edges: if (((not uf.get_leader(u)) and (not uf.get_leader(v))) or (uf.get_leader(u) != uf.get_leader(v))): uf.insert(u, v) min_cost += w return min_cost
[ "def", "kruskal_MST", "(", "gr", ")", ":", "sorted_edges", "=", "sorted", "(", "gr", ".", "get_edge_weights", "(", ")", ")", "uf", "=", "UnionFind", "(", ")", "min_cost", "=", "0", "for", "(", "w", ",", "(", "u", ",", "v", ")", ")", "in", "sorted_edges", ":", "if", "(", "(", "(", "not", "uf", ".", "get_leader", "(", "u", ")", ")", "and", "(", "not", "uf", ".", "get_leader", "(", "v", ")", ")", ")", "or", "(", "uf", ".", "get_leader", "(", "u", ")", "!=", "uf", ".", "get_leader", "(", "v", ")", ")", ")", ":", "uf", ".", "insert", "(", "u", ",", "v", ")", "min_cost", "+=", "w", "return", "min_cost" ]
computes minimum cost spanning tree in a undirected .
train
false
5,836
def activateAaPdpContextReject(ProtocolConfigurationOptions_presence=0): a = TpPd(pd=8) b = MessageType(mesType=82) c = SmCause() packet = ((a / b) / c) if (ProtocolConfigurationOptions_presence is 1): d = ProtocolConfigurationOptions(ieiPCO=39) packet = (packet / d) return packet
[ "def", "activateAaPdpContextReject", "(", "ProtocolConfigurationOptions_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "8", ")", "b", "=", "MessageType", "(", "mesType", "=", "82", ")", "c", "=", "SmCause", "(", ")", "packet", "=", "(", "(", "a", "/", "b", ")", "/", "c", ")", "if", "(", "ProtocolConfigurationOptions_presence", "is", "1", ")", ":", "d", "=", "ProtocolConfigurationOptions", "(", "ieiPCO", "=", "39", ")", "packet", "=", "(", "packet", "/", "d", ")", "return", "packet" ]
activate aa pdp context reject section 9 .
train
true
5,837
def makeKW(rowClass, args): kw = {} for i in range(0, len(args)): columnName = rowClass.dbColumns[i][0].lower() for attr in rowClass.rowColumns: if (attr.lower() == columnName): kw[attr] = args[i] break return kw
[ "def", "makeKW", "(", "rowClass", ",", "args", ")", ":", "kw", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "len", "(", "args", ")", ")", ":", "columnName", "=", "rowClass", ".", "dbColumns", "[", "i", "]", "[", "0", "]", ".", "lower", "(", ")", "for", "attr", "in", "rowClass", ".", "rowColumns", ":", "if", "(", "attr", ".", "lower", "(", ")", "==", "columnName", ")", ":", "kw", "[", "attr", "]", "=", "args", "[", "i", "]", "break", "return", "kw" ]
utility method to construct a dictionary for the attributes of an object from set of args .
train
false
5,838
def save_file_dialog(default_format='png'): filename = QtGui.QFileDialog.getSaveFileName() filename = _format_filename(filename) if (filename is None): return None (basename, ext) = os.path.splitext(filename) if (not ext): filename = ('%s.%s' % (filename, default_format)) return filename
[ "def", "save_file_dialog", "(", "default_format", "=", "'png'", ")", ":", "filename", "=", "QtGui", ".", "QFileDialog", ".", "getSaveFileName", "(", ")", "filename", "=", "_format_filename", "(", "filename", ")", "if", "(", "filename", "is", "None", ")", ":", "return", "None", "(", "basename", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "(", "not", "ext", ")", ":", "filename", "=", "(", "'%s.%s'", "%", "(", "filename", ",", "default_format", ")", ")", "return", "filename" ]
return user-selected file path .
train
false
5,840
def find_device(ec2, module, device_id, isinstance=True): if isinstance: try: reservations = ec2.get_all_reservations(instance_ids=[device_id]) except boto.exception.EC2ResponseError as e: module.fail_json(msg=str(e)) if (len(reservations) == 1): instances = reservations[0].instances if (len(instances) == 1): return instances[0] else: try: interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id]) except boto.exception.EC2ResponseError as e: module.fail_json(msg=str(e)) if (len(interfaces) == 1): return interfaces[0] raise EIPException(('could not find instance' + device_id))
[ "def", "find_device", "(", "ec2", ",", "module", ",", "device_id", ",", "isinstance", "=", "True", ")", ":", "if", "isinstance", ":", "try", ":", "reservations", "=", "ec2", ".", "get_all_reservations", "(", "instance_ids", "=", "[", "device_id", "]", ")", "except", "boto", ".", "exception", ".", "EC2ResponseError", "as", "e", ":", "module", ".", "fail_json", "(", "msg", "=", "str", "(", "e", ")", ")", "if", "(", "len", "(", "reservations", ")", "==", "1", ")", ":", "instances", "=", "reservations", "[", "0", "]", ".", "instances", "if", "(", "len", "(", "instances", ")", "==", "1", ")", ":", "return", "instances", "[", "0", "]", "else", ":", "try", ":", "interfaces", "=", "ec2", ".", "get_all_network_interfaces", "(", "network_interface_ids", "=", "[", "device_id", "]", ")", "except", "boto", ".", "exception", ".", "EC2ResponseError", "as", "e", ":", "module", ".", "fail_json", "(", "msg", "=", "str", "(", "e", ")", ")", "if", "(", "len", "(", "interfaces", ")", "==", "1", ")", ":", "return", "interfaces", "[", "0", "]", "raise", "EIPException", "(", "(", "'could not find instance'", "+", "device_id", ")", ")" ]
find a device in zenoss .
train
false
5,841
def _check_cmd(call): if (call['retcode'] != 0): comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
[ "def", "_check_cmd", "(", "call", ")", ":", "if", "(", "call", "[", "'retcode'", "]", "!=", "0", ")", ":", "comment", "=", "''", "std_err", "=", "call", ".", "get", "(", "'stderr'", ")", "std_out", "=", "call", ".", "get", "(", "'stdout'", ")", "if", "std_err", ":", "comment", "+=", "std_err", "if", "std_out", ":", "comment", "+=", "std_out", "raise", "CommandExecutionError", "(", "'Error running command: {0}'", ".", "format", "(", "comment", ")", ")", "return", "call" ]
check the output of the cmd .
train
true
5,844
def salt(): letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/.' return '$6${0}'.format(''.join([random.choice(letters) for i in range(16)]))
[ "def", "salt", "(", ")", ":", "letters", "=", "'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/.'", "return", "'$6${0}'", ".", "format", "(", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "letters", ")", "for", "i", "in", "range", "(", "16", ")", "]", ")", ")" ]
returns a string of 2 random letters .
train
false
5,845
def is_rarfile(xfile): rar_ver = _get_rar_version(xfile) if rar_ver: return ('RAR%d' % rar_ver) else: return None
[ "def", "is_rarfile", "(", "xfile", ")", ":", "rar_ver", "=", "_get_rar_version", "(", "xfile", ")", "if", "rar_ver", ":", "return", "(", "'RAR%d'", "%", "rar_ver", ")", "else", ":", "return", "None" ]
check quickly whether file is rar archive .
train
false
5,846
@testing.requires_testing_data def test_edf_reduced(): _test_raw_reader(read_raw_edf, input_fname=edf_reduced, stim_channel=None)
[ "@", "testing", ".", "requires_testing_data", "def", "test_edf_reduced", "(", ")", ":", "_test_raw_reader", "(", "read_raw_edf", ",", "input_fname", "=", "edf_reduced", ",", "stim_channel", "=", "None", ")" ]
test edf with various sampling rates .
train
false
5,847
def enable_nat(interface): run(settings.iptables, '-t', 'nat', '-A', 'POSTROUTING', '-o', interface, '-j', 'MASQUERADE')
[ "def", "enable_nat", "(", "interface", ")", ":", "run", "(", "settings", ".", "iptables", ",", "'-t'", ",", "'nat'", ",", "'-A'", ",", "'POSTROUTING'", ",", "'-o'", ",", "interface", ",", "'-j'", ",", "'MASQUERADE'", ")" ]
enable nat on this interface .
train
false
5,848
def get_official_service_name(service_model): official_name = service_model.metadata.get('serviceFullName') short_name = service_model.metadata.get('serviceAbbreviation', '') if short_name.startswith('Amazon'): short_name = short_name[7:] if short_name.startswith('AWS'): short_name = short_name[4:] if (short_name and (short_name.lower() not in official_name.lower())): official_name += ' ({0})'.format(short_name) return official_name
[ "def", "get_official_service_name", "(", "service_model", ")", ":", "official_name", "=", "service_model", ".", "metadata", ".", "get", "(", "'serviceFullName'", ")", "short_name", "=", "service_model", ".", "metadata", ".", "get", "(", "'serviceAbbreviation'", ",", "''", ")", "if", "short_name", ".", "startswith", "(", "'Amazon'", ")", ":", "short_name", "=", "short_name", "[", "7", ":", "]", "if", "short_name", ".", "startswith", "(", "'AWS'", ")", ":", "short_name", "=", "short_name", "[", "4", ":", "]", "if", "(", "short_name", "and", "(", "short_name", ".", "lower", "(", ")", "not", "in", "official_name", ".", "lower", "(", ")", ")", ")", ":", "official_name", "+=", "' ({0})'", ".", "format", "(", "short_name", ")", "return", "official_name" ]
generate the official name of an aws service .
train
false
5,849
def MessageSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) local_VarintSize = _VarintSize assert (not is_packed) if is_repeated: def RepeatedFieldSize(value): result = (tag_size * len(value)) for element in value: l = element.ByteSize() result += (local_VarintSize(l) + l) return result return RepeatedFieldSize else: def FieldSize(value): l = value.ByteSize() return ((tag_size + local_VarintSize(l)) + l) return FieldSize
[ "def", "MessageSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "local_VarintSize", "=", "_VarintSize", "assert", "(", "not", "is_packed", ")", "if", "is_repeated", ":", "def", "RepeatedFieldSize", "(", "value", ")", ":", "result", "=", "(", "tag_size", "*", "len", "(", "value", ")", ")", "for", "element", "in", "value", ":", "l", "=", "element", ".", "ByteSize", "(", ")", "result", "+=", "(", "local_VarintSize", "(", "l", ")", "+", "l", ")", "return", "result", "return", "RepeatedFieldSize", "else", ":", "def", "FieldSize", "(", "value", ")", ":", "l", "=", "value", ".", "ByteSize", "(", ")", "return", "(", "(", "tag_size", "+", "local_VarintSize", "(", "l", ")", ")", "+", "l", ")", "return", "FieldSize" ]
returns a sizer for a message field .
train
true
5,851
def all_permissions_for_user_in_course(user, course_id): course = modulestore().get_course(course_id) if (course is None): raise ItemNotFoundError(course_id) all_roles = {role.name for role in Role.objects.filter(users=user, course_id=course_id)} permissions = {permission.name for permission in Permission.objects.filter(roles__users=user, roles__course_id=course_id) if (not permission_blacked_out(course, all_roles, permission.name))} return permissions
[ "def", "all_permissions_for_user_in_course", "(", "user", ",", "course_id", ")", ":", "course", "=", "modulestore", "(", ")", ".", "get_course", "(", "course_id", ")", "if", "(", "course", "is", "None", ")", ":", "raise", "ItemNotFoundError", "(", "course_id", ")", "all_roles", "=", "{", "role", ".", "name", "for", "role", "in", "Role", ".", "objects", ".", "filter", "(", "users", "=", "user", ",", "course_id", "=", "course_id", ")", "}", "permissions", "=", "{", "permission", ".", "name", "for", "permission", "in", "Permission", ".", "objects", ".", "filter", "(", "roles__users", "=", "user", ",", "roles__course_id", "=", "course_id", ")", "if", "(", "not", "permission_blacked_out", "(", "course", ",", "all_roles", ",", "permission", ".", "name", ")", ")", "}", "return", "permissions" ]
returns all the permissions the user has in the given course .
train
false
5,852
def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None): if (len(idx_use_data) < e.shape[1]): if use_sparse: data = (e[:, idx_use_data] * data) else: (col, row) = np.meshgrid(np.arange(data.shape[1]), idx_use_data) d_sparse = sparse.csr_matrix((data.ravel(), (row.ravel(), col.ravel())), shape=(e.shape[1], data.shape[1])) data = (e * d_sparse) data = np.asarray(data.todense()) else: data = (e * data) if (idx_use_out is not None): data = data[idx_use_out] return data
[ "def", "_morph_mult", "(", "data", ",", "e", ",", "use_sparse", ",", "idx_use_data", ",", "idx_use_out", "=", "None", ")", ":", "if", "(", "len", "(", "idx_use_data", ")", "<", "e", ".", "shape", "[", "1", "]", ")", ":", "if", "use_sparse", ":", "data", "=", "(", "e", "[", ":", ",", "idx_use_data", "]", "*", "data", ")", "else", ":", "(", "col", ",", "row", ")", "=", "np", ".", "meshgrid", "(", "np", ".", "arange", "(", "data", ".", "shape", "[", "1", "]", ")", ",", "idx_use_data", ")", "d_sparse", "=", "sparse", ".", "csr_matrix", "(", "(", "data", ".", "ravel", "(", ")", ",", "(", "row", ".", "ravel", "(", ")", ",", "col", ".", "ravel", "(", ")", ")", ")", ",", "shape", "=", "(", "e", ".", "shape", "[", "1", "]", ",", "data", ".", "shape", "[", "1", "]", ")", ")", "data", "=", "(", "e", "*", "d_sparse", ")", "data", "=", "np", ".", "asarray", "(", "data", ".", "todense", "(", ")", ")", "else", ":", "data", "=", "(", "e", "*", "data", ")", "if", "(", "idx_use_out", "is", "not", "None", ")", ":", "data", "=", "data", "[", "idx_use_out", "]", "return", "data" ]
helper for morphing .
train
false
5,853
def services_for_instance(instance_id): ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if (instance.id == instance_id): try: services = instance.tags['services'].split(',') except KeyError as ke: msg = "Tag named 'services' not found on this instance({})".format(instance_id) raise Exception(msg) for service in services: (yield service)
[ "def", "services_for_instance", "(", "instance_id", ")", ":", "ec2", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "REGION", ")", "reservations", "=", "ec2", ".", "get_all_instances", "(", "instance_ids", "=", "[", "instance_id", "]", ")", "for", "reservation", "in", "reservations", ":", "for", "instance", "in", "reservation", ".", "instances", ":", "if", "(", "instance", ".", "id", "==", "instance_id", ")", ":", "try", ":", "services", "=", "instance", ".", "tags", "[", "'services'", "]", ".", "split", "(", "','", ")", "except", "KeyError", "as", "ke", ":", "msg", "=", "\"Tag named 'services' not found on this instance({})\"", ".", "format", "(", "instance_id", ")", "raise", "Exception", "(", "msg", ")", "for", "service", "in", "services", ":", "(", "yield", "service", ")" ]
get the list of all services named by the services tag in this instances tags .
train
false
5,854
def sanitize_and_trim_path(path): path = path.strip() new_path = '' if sabnzbd.WIN32: if path.startswith(u'\\\\?\\UNC\\'): new_path = u'\\\\?\\UNC\\' path = path[8:] elif path.startswith(u'\\\\?\\'): new_path = u'\\\\?\\' path = path[4:] path = path.replace('\\', '/') parts = path.split('/') if (sabnzbd.WIN32 and (len(parts[0]) == 2) and (':' in parts[0])): new_path += (parts[0] + '/') parts.pop(0) elif path.startswith('//'): new_path = '//' elif path.startswith('/'): new_path = '/' for part in parts: new_path = os.path.join(new_path, sanitize_foldername(part)) return os.path.abspath(os.path.normpath(new_path))
[ "def", "sanitize_and_trim_path", "(", "path", ")", ":", "path", "=", "path", ".", "strip", "(", ")", "new_path", "=", "''", "if", "sabnzbd", ".", "WIN32", ":", "if", "path", ".", "startswith", "(", "u'\\\\\\\\?\\\\UNC\\\\'", ")", ":", "new_path", "=", "u'\\\\\\\\?\\\\UNC\\\\'", "path", "=", "path", "[", "8", ":", "]", "elif", "path", ".", "startswith", "(", "u'\\\\\\\\?\\\\'", ")", ":", "new_path", "=", "u'\\\\\\\\?\\\\'", "path", "=", "path", "[", "4", ":", "]", "path", "=", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "parts", "=", "path", ".", "split", "(", "'/'", ")", "if", "(", "sabnzbd", ".", "WIN32", "and", "(", "len", "(", "parts", "[", "0", "]", ")", "==", "2", ")", "and", "(", "':'", "in", "parts", "[", "0", "]", ")", ")", ":", "new_path", "+=", "(", "parts", "[", "0", "]", "+", "'/'", ")", "parts", ".", "pop", "(", "0", ")", "elif", "path", ".", "startswith", "(", "'//'", ")", ":", "new_path", "=", "'//'", "elif", "path", ".", "startswith", "(", "'/'", ")", ":", "new_path", "=", "'/'", "for", "part", "in", "parts", ":", "new_path", "=", "os", ".", "path", ".", "join", "(", "new_path", ",", "sanitize_foldername", "(", "part", ")", ")", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "normpath", "(", "new_path", ")", ")" ]
remove illegal characters and trim element size .
train
false
5,855
def early_stopping_monitor(i, est, locals): if (i == 9): return True else: return False
[ "def", "early_stopping_monitor", "(", "i", ",", "est", ",", "locals", ")", ":", "if", "(", "i", "==", "9", ")", ":", "return", "True", "else", ":", "return", "False" ]
returns true on the 10th iteration .
train
false
5,856
def sort_versions(version_list): versions = [] for version_obj in version_list: version_slug = version_obj.verbose_name comparable_version = parse_version_failsafe(version_slug) if comparable_version: versions.append((version_obj, comparable_version)) return list(sorted(versions, key=(lambda version_info: version_info[1]), reverse=True))
[ "def", "sort_versions", "(", "version_list", ")", ":", "versions", "=", "[", "]", "for", "version_obj", "in", "version_list", ":", "version_slug", "=", "version_obj", ".", "verbose_name", "comparable_version", "=", "parse_version_failsafe", "(", "version_slug", ")", "if", "comparable_version", ":", "versions", ".", "append", "(", "(", "version_obj", ",", "comparable_version", ")", ")", "return", "list", "(", "sorted", "(", "versions", ",", "key", "=", "(", "lambda", "version_info", ":", "version_info", "[", "1", "]", ")", ",", "reverse", "=", "True", ")", ")" ]
takes a list of version models and return a sorted list .
train
false
5,857
def _update_exif_orientation(exif, orientation): exif_dict = piexif.load(exif) if orientation: exif_dict['0th'][piexif.ImageIFD.Orientation] = orientation return piexif.dump(exif_dict)
[ "def", "_update_exif_orientation", "(", "exif", ",", "orientation", ")", ":", "exif_dict", "=", "piexif", ".", "load", "(", "exif", ")", "if", "orientation", ":", "exif_dict", "[", "'0th'", "]", "[", "piexif", ".", "ImageIFD", ".", "Orientation", "]", "=", "orientation", "return", "piexif", ".", "dump", "(", "exif_dict", ")" ]
given an exif value and an integer value 1-8 .
train
false
5,858
def test_too_many_cols1(): text = '\nA B C\n1 2 3\n4 5 6\n7 8 9 10\n11 12 13\n' with pytest.raises(CParserError) as e: table = FastBasic().read(text) assert ('CParserError: an error occurred while parsing table data: too many columns found in line 3 of data' in str(e))
[ "def", "test_too_many_cols1", "(", ")", ":", "text", "=", "'\\nA B C\\n1 2 3\\n4 5 6\\n7 8 9 10\\n11 12 13\\n'", "with", "pytest", ".", "raises", "(", "CParserError", ")", "as", "e", ":", "table", "=", "FastBasic", "(", ")", ".", "read", "(", "text", ")", "assert", "(", "'CParserError: an error occurred while parsing table data: too many columns found in line 3 of data'", "in", "str", "(", "e", ")", ")" ]
if a row contains too many columns .
train
false
5,860
def generate_subs(speed, source_speed, source_subs): if (speed == source_speed): return source_subs coefficient = ((1.0 * speed) / source_speed) subs = {'start': [int(round((timestamp * coefficient))) for timestamp in source_subs['start']], 'end': [int(round((timestamp * coefficient))) for timestamp in source_subs['end']], 'text': source_subs['text']} return subs
[ "def", "generate_subs", "(", "speed", ",", "source_speed", ",", "source_subs", ")", ":", "if", "(", "speed", "==", "source_speed", ")", ":", "return", "source_subs", "coefficient", "=", "(", "(", "1.0", "*", "speed", ")", "/", "source_speed", ")", "subs", "=", "{", "'start'", ":", "[", "int", "(", "round", "(", "(", "timestamp", "*", "coefficient", ")", ")", ")", "for", "timestamp", "in", "source_subs", "[", "'start'", "]", "]", ",", "'end'", ":", "[", "int", "(", "round", "(", "(", "timestamp", "*", "coefficient", ")", ")", ")", "for", "timestamp", "in", "source_subs", "[", "'end'", "]", "]", ",", "'text'", ":", "source_subs", "[", "'text'", "]", "}", "return", "subs" ]
generate transcripts from one speed to another speed .
train
false
5,861
def property_clean(prop, value): if (value is not None): try: prop.validate(prop.make_value_from_form(value)) except (db.BadValueError, ValueError) as e: raise forms.ValidationError(unicode(e))
[ "def", "property_clean", "(", "prop", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", ":", "try", ":", "prop", ".", "validate", "(", "prop", ".", "make_value_from_form", "(", "value", ")", ")", "except", "(", "db", ".", "BadValueError", ",", "ValueError", ")", "as", "e", ":", "raise", "forms", ".", "ValidationError", "(", "unicode", "(", "e", ")", ")" ]
apply property level validation to value .
train
false
5,862
def verifyReturnTo(realm_str, return_to, _vrfy=getAllowedReturnURLs): realm = TrustRoot.parse(realm_str) if (realm is None): return False try: allowable_urls = _vrfy(realm.buildDiscoveryURL()) except RealmVerificationRedirected as err: oidutil.log(str(err)) return False if returnToMatches(allowable_urls, return_to): return True else: oidutil.log(('Failed to validate return_to %r for realm %r, was not in %s' % (return_to, realm_str, allowable_urls))) return False
[ "def", "verifyReturnTo", "(", "realm_str", ",", "return_to", ",", "_vrfy", "=", "getAllowedReturnURLs", ")", ":", "realm", "=", "TrustRoot", ".", "parse", "(", "realm_str", ")", "if", "(", "realm", "is", "None", ")", ":", "return", "False", "try", ":", "allowable_urls", "=", "_vrfy", "(", "realm", ".", "buildDiscoveryURL", "(", ")", ")", "except", "RealmVerificationRedirected", "as", "err", ":", "oidutil", ".", "log", "(", "str", "(", "err", ")", ")", "return", "False", "if", "returnToMatches", "(", "allowable_urls", ",", "return_to", ")", ":", "return", "True", "else", ":", "oidutil", ".", "log", "(", "(", "'Failed to validate return_to %r for realm %r, was not in %s'", "%", "(", "return_to", ",", "realm_str", ",", "allowable_urls", ")", ")", ")", "return", "False" ]
verify that a return_to url is valid for the given realm .
train
false
5,864
def get_most_recent_messages(exp_id): thread_models = feedback_models.FeedbackThreadModel.get_threads(exp_id, limit=feconf.OPEN_FEEDBACK_COUNT_DASHBOARD) message_models = [] for thread_model in thread_models: message_models.append(feedback_models.FeedbackMessageModel.get_most_recent_message(exp_id, thread_model.thread_id)) return [_get_message_from_model(message_model) for message_model in message_models]
[ "def", "get_most_recent_messages", "(", "exp_id", ")", ":", "thread_models", "=", "feedback_models", ".", "FeedbackThreadModel", ".", "get_threads", "(", "exp_id", ",", "limit", "=", "feconf", ".", "OPEN_FEEDBACK_COUNT_DASHBOARD", ")", "message_models", "=", "[", "]", "for", "thread_model", "in", "thread_models", ":", "message_models", ".", "append", "(", "feedback_models", ".", "FeedbackMessageModel", ".", "get_most_recent_message", "(", "exp_id", ",", "thread_model", ".", "thread_id", ")", ")", "return", "[", "_get_message_from_model", "(", "message_model", ")", "for", "message_model", "in", "message_models", "]" ]
fetch the most recently updated feedback threads for a given exploration .
train
false
5,868
def show_to_programming_language(promo, programming_language): if promo.programming_language: return (programming_language == promo.programming_language) return True
[ "def", "show_to_programming_language", "(", "promo", ",", "programming_language", ")", ":", "if", "promo", ".", "programming_language", ":", "return", "(", "programming_language", "==", "promo", ".", "programming_language", ")", "return", "True" ]
filter a promo by a specific programming language return true if we havent set a specific language .
train
false
5,870
def ip_addrs(interface=None, include_loopback=False, interface_data=None): return _ip_addrs(interface, include_loopback, interface_data, 'inet')
[ "def", "ip_addrs", "(", "interface", "=", "None", ",", "include_loopback", "=", "False", ",", "interface_data", "=", "None", ")", ":", "return", "_ip_addrs", "(", "interface", ",", "include_loopback", ",", "interface_data", ",", "'inet'", ")" ]
returns a list of ipv4 addresses assigned to the host .
train
false
5,871
def get_startup_disk(): ret = salt.utils.mac_utils.execute_return_result('systemsetup -getstartupdisk') return salt.utils.mac_utils.parse_return(ret)
[ "def", "get_startup_disk", "(", ")", ":", "ret", "=", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_result", "(", "'systemsetup -getstartupdisk'", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "parse_return", "(", "ret", ")" ]
displays the current startup disk :return: the current startup disk :rtype: str cli example: .
train
false
5,873
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
create a new rc2 cipher .
train
false
5,874
def no_template_view(request): return HttpResponse('No template used. Sample content: twice once twice. Content ends.')
[ "def", "no_template_view", "(", "request", ")", ":", "return", "HttpResponse", "(", "'No template used. Sample content: twice once twice. Content ends.'", ")" ]
a simple view that expects a get request .
train
false
5,875
@library.global_function def profile_url(user, edit=False): if edit: return reverse('users.edit_profile', args=[user.username]) return reverse('users.profile', args=[user.username])
[ "@", "library", ".", "global_function", "def", "profile_url", "(", "user", ",", "edit", "=", "False", ")", ":", "if", "edit", ":", "return", "reverse", "(", "'users.edit_profile'", ",", "args", "=", "[", "user", ".", "username", "]", ")", "return", "reverse", "(", "'users.profile'", ",", "args", "=", "[", "user", ".", "username", "]", ")" ]
return a url to the users profile .
train
false
5,876
def seed_milestone_relationship_types(): if (not settings.FEATURES.get('MILESTONES_APP')): return None MilestoneRelationshipType.objects.create(name='requires') MilestoneRelationshipType.objects.create(name='fulfills')
[ "def", "seed_milestone_relationship_types", "(", ")", ":", "if", "(", "not", "settings", ".", "FEATURES", ".", "get", "(", "'MILESTONES_APP'", ")", ")", ":", "return", "None", "MilestoneRelationshipType", ".", "objects", ".", "create", "(", "name", "=", "'requires'", ")", "MilestoneRelationshipType", ".", "objects", ".", "create", "(", "name", "=", "'fulfills'", ")" ]
helper method to pre-populate mrts so the tests can run .
train
false
5,878
@removals.remove(message='keystoneclient auth plugins are deprecated. Use keystoneauth.', version='2.1.0', removal_version='3.0.0') def get_common_conf_options(): return [_AUTH_PLUGIN_OPT, _AUTH_SECTION_OPT]
[ "@", "removals", ".", "remove", "(", "message", "=", "'keystoneclient auth plugins are deprecated. Use keystoneauth.'", ",", "version", "=", "'2.1.0'", ",", "removal_version", "=", "'3.0.0'", ")", "def", "get_common_conf_options", "(", ")", ":", "return", "[", "_AUTH_PLUGIN_OPT", ",", "_AUTH_SECTION_OPT", "]" ]
get the oslo_config options common for all auth plugins .
train
false
5,879
@_FFI.callback(u'void(ExternContext*, Handle*, uint64_t)') def extern_drop_handles(context_handle, handles_ptr, handles_len): c = _FFI.from_handle(context_handle) handles = _FFI.unpack(handles_ptr, handles_len) c.drop_handles(handles)
[ "@", "_FFI", ".", "callback", "(", "u'void(ExternContext*, Handle*, uint64_t)'", ")", "def", "extern_drop_handles", "(", "context_handle", ",", "handles_ptr", ",", "handles_len", ")", ":", "c", "=", "_FFI", ".", "from_handle", "(", "context_handle", ")", "handles", "=", "_FFI", ".", "unpack", "(", "handles_ptr", ",", "handles_len", ")", "c", ".", "drop_handles", "(", "handles", ")" ]
drop the given handles .
train
false
5,881
@celery_app.task(base=ArchiverTask, ignore_result=False) @logged('archive_success') def archive_success(dst_pk, job_pk): create_app_context() dst = Node.load(dst_pk) for schema in dst.registered_schema.all(): if schema.has_files: utils.migrate_file_metadata(dst, schema) job = ArchiveJob.load(job_pk) if (not job.sent): job.sent = True job.save() dst.sanction.ask(dst.get_active_contributors_recursive(unique_users=True))
[ "@", "celery_app", ".", "task", "(", "base", "=", "ArchiverTask", ",", "ignore_result", "=", "False", ")", "@", "logged", "(", "'archive_success'", ")", "def", "archive_success", "(", "dst_pk", ",", "job_pk", ")", ":", "create_app_context", "(", ")", "dst", "=", "Node", ".", "load", "(", "dst_pk", ")", "for", "schema", "in", "dst", ".", "registered_schema", ".", "all", "(", ")", ":", "if", "schema", ".", "has_files", ":", "utils", ".", "migrate_file_metadata", "(", "dst", ",", "schema", ")", "job", "=", "ArchiveJob", ".", "load", "(", "job_pk", ")", "if", "(", "not", "job", ".", "sent", ")", ":", "job", ".", "sent", "=", "True", "job", ".", "save", "(", ")", "dst", ".", "sanction", ".", "ask", "(", "dst", ".", "get_active_contributors_recursive", "(", "unique_users", "=", "True", ")", ")" ]
archivers final callback .
train
false
5,882
def get_hot_factor(qdata, now, ageweight): ageweight = float((ageweight or 0.0)) (link_name, hot, timestamp) = qdata return max((hot + (((now - timestamp) * ageweight) / 45000.0)), 1.0)
[ "def", "get_hot_factor", "(", "qdata", ",", "now", ",", "ageweight", ")", ":", "ageweight", "=", "float", "(", "(", "ageweight", "or", "0.0", ")", ")", "(", "link_name", ",", "hot", ",", "timestamp", ")", "=", "qdata", "return", "max", "(", "(", "hot", "+", "(", "(", "(", "now", "-", "timestamp", ")", "*", "ageweight", ")", "/", "45000.0", ")", ")", ",", "1.0", ")" ]
return a "hot factor" score for a links hot tuple .
train
false
5,883
def superposition_basis(nqubits): amp = (1 / sqrt((2 ** nqubits))) return sum([(amp * IntQubit(n, nqubits)) for n in range((2 ** nqubits))])
[ "def", "superposition_basis", "(", "nqubits", ")", ":", "amp", "=", "(", "1", "/", "sqrt", "(", "(", "2", "**", "nqubits", ")", ")", ")", "return", "sum", "(", "[", "(", "amp", "*", "IntQubit", "(", "n", ",", "nqubits", ")", ")", "for", "n", "in", "range", "(", "(", "2", "**", "nqubits", ")", ")", "]", ")" ]
creates an equal superposition of the computational basis .
train
false
5,884
def itervalues(d): return getattr(d, _itervalues)()
[ "def", "itervalues", "(", "d", ")", ":", "return", "getattr", "(", "d", ",", "_itervalues", ")", "(", ")" ]
return an iterator over the values of a dictionary .
train
false
5,885
def generate_table_alias(src_table_alias, joined_tables=[]): alias = src_table_alias if (not joined_tables): return (('%s' % alias), ('%s' % _quote(alias))) for link in joined_tables: alias += ('__' + link[1]) if (len(alias) >= 64): alias_hash = hex(crc32(alias))[2:] ALIAS_PREFIX_LENGTH = ((63 - len(alias_hash)) - 1) alias = ('%s_%s' % (alias[:ALIAS_PREFIX_LENGTH], alias_hash)) return (('%s' % alias), ('%s as %s' % (_quote(joined_tables[(-1)][0]), _quote(alias))))
[ "def", "generate_table_alias", "(", "src_table_alias", ",", "joined_tables", "=", "[", "]", ")", ":", "alias", "=", "src_table_alias", "if", "(", "not", "joined_tables", ")", ":", "return", "(", "(", "'%s'", "%", "alias", ")", ",", "(", "'%s'", "%", "_quote", "(", "alias", ")", ")", ")", "for", "link", "in", "joined_tables", ":", "alias", "+=", "(", "'__'", "+", "link", "[", "1", "]", ")", "if", "(", "len", "(", "alias", ")", ">=", "64", ")", ":", "alias_hash", "=", "hex", "(", "crc32", "(", "alias", ")", ")", "[", "2", ":", "]", "ALIAS_PREFIX_LENGTH", "=", "(", "(", "63", "-", "len", "(", "alias_hash", ")", ")", "-", "1", ")", "alias", "=", "(", "'%s_%s'", "%", "(", "alias", "[", ":", "ALIAS_PREFIX_LENGTH", "]", ",", "alias_hash", ")", ")", "return", "(", "(", "'%s'", "%", "alias", ")", ",", "(", "'%s as %s'", "%", "(", "_quote", "(", "joined_tables", "[", "(", "-", "1", ")", "]", "[", "0", "]", ")", ",", "_quote", "(", "alias", ")", ")", ")", ")" ]
generate a standard table alias name .
train
false
5,886
def BuildAdGroupOperations(batch_job_helper, campaign_operations, number_of_adgroups=1): adgroup_operations = [{'xsi_type': 'AdGroupOperation', 'operand': {'campaignId': campaign_operation['operand']['id'], 'id': batch_job_helper.GetId(), 'name': ('Batch Ad Group #%s' % uuid.uuid4()), 'biddingStrategyConfiguration': {'bids': [{'xsi_type': 'CpcBid', 'bid': {'microAmount': 10000000}}]}}, 'operator': 'ADD'} for campaign_operation in campaign_operations for _ in range(number_of_adgroups)] return adgroup_operations
[ "def", "BuildAdGroupOperations", "(", "batch_job_helper", ",", "campaign_operations", ",", "number_of_adgroups", "=", "1", ")", ":", "adgroup_operations", "=", "[", "{", "'xsi_type'", ":", "'AdGroupOperation'", ",", "'operand'", ":", "{", "'campaignId'", ":", "campaign_operation", "[", "'operand'", "]", "[", "'id'", "]", ",", "'id'", ":", "batch_job_helper", ".", "GetId", "(", ")", ",", "'name'", ":", "(", "'Batch Ad Group #%s'", "%", "uuid", ".", "uuid4", "(", ")", ")", ",", "'biddingStrategyConfiguration'", ":", "{", "'bids'", ":", "[", "{", "'xsi_type'", ":", "'CpcBid'", ",", "'bid'", ":", "{", "'microAmount'", ":", "10000000", "}", "}", "]", "}", "}", ",", "'operator'", ":", "'ADD'", "}", "for", "campaign_operation", "in", "campaign_operations", "for", "_", "in", "range", "(", "number_of_adgroups", ")", "]", "return", "adgroup_operations" ]
builds the operations adding desired number of adgroups to given campaigns .
train
true
5,887
def invalidate_country_rule_cache(sender, instance, **kwargs): if isinstance(instance, RestrictedCourse): RestrictedCourse.invalidate_cache_for_course(instance.course_key) CountryAccessRule.invalidate_cache_for_course(instance.course_key) if isinstance(instance, CountryAccessRule): try: restricted_course = instance.restricted_course except RestrictedCourse.DoesNotExist: pass else: CountryAccessRule.invalidate_cache_for_course(restricted_course.course_key)
[ "def", "invalidate_country_rule_cache", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "if", "isinstance", "(", "instance", ",", "RestrictedCourse", ")", ":", "RestrictedCourse", ".", "invalidate_cache_for_course", "(", "instance", ".", "course_key", ")", "CountryAccessRule", ".", "invalidate_cache_for_course", "(", "instance", ".", "course_key", ")", "if", "isinstance", "(", "instance", ",", "CountryAccessRule", ")", ":", "try", ":", "restricted_course", "=", "instance", ".", "restricted_course", "except", "RestrictedCourse", ".", "DoesNotExist", ":", "pass", "else", ":", "CountryAccessRule", ".", "invalidate_cache_for_course", "(", "restricted_course", ".", "course_key", ")" ]
invalidate cached rule information on changes to the rule models .
train
false
5,888
def looks_like_xml(text): key = hash(text) try: return _looks_like_xml_cache[key] except KeyError: m = doctype_lookup_re.match(text) if (m is not None): return True rv = (tag_re.search(text[:1000]) is not None) _looks_like_xml_cache[key] = rv return rv
[ "def", "looks_like_xml", "(", "text", ")", ":", "key", "=", "hash", "(", "text", ")", "try", ":", "return", "_looks_like_xml_cache", "[", "key", "]", "except", "KeyError", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "(", "m", "is", "not", "None", ")", ":", "return", "True", "rv", "=", "(", "tag_re", ".", "search", "(", "text", "[", ":", "1000", "]", ")", "is", "not", "None", ")", "_looks_like_xml_cache", "[", "key", "]", "=", "rv", "return", "rv" ]
check if a doctype exists or if we have some tags .
train
true
5,889
@db_api.retry_if_session_inactive() @db_api.context_manager.writer def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True): query = db_utils.model_query(context, quota_models.QuotaUsage) query = query.filter_by(tenant_id=tenant_id) if resources: query = query.filter(quota_models.QuotaUsage.resource.in_(resources)) return query.update({'dirty': dirty}, synchronize_session=False)
[ "@", "db_api", ".", "retry_if_session_inactive", "(", ")", "@", "db_api", ".", "context_manager", ".", "writer", "def", "set_resources_quota_usage_dirty", "(", "context", ",", "resources", ",", "tenant_id", ",", "dirty", "=", "True", ")", ":", "query", "=", "db_utils", ".", "model_query", "(", "context", ",", "quota_models", ".", "QuotaUsage", ")", "query", "=", "query", ".", "filter_by", "(", "tenant_id", "=", "tenant_id", ")", "if", "resources", ":", "query", "=", "query", ".", "filter", "(", "quota_models", ".", "QuotaUsage", ".", "resource", ".", "in_", "(", "resources", ")", ")", "return", "query", ".", "update", "(", "{", "'dirty'", ":", "dirty", "}", ",", "synchronize_session", "=", "False", ")" ]
set quota usage dirty bit for a given tenant and multiple resources .
train
false
5,891
def test_host_role_merge_deduping_off(): @roles('r1', 'r2') @hosts('a') def command(): pass with settings(dedupe_hosts=False): true_eq_hosts(command, ['a', 'a', 'b', 'b', 'c'], env={'roledefs': fake_roles})
[ "def", "test_host_role_merge_deduping_off", "(", ")", ":", "@", "roles", "(", "'r1'", ",", "'r2'", ")", "@", "hosts", "(", "'a'", ")", "def", "command", "(", ")", ":", "pass", "with", "settings", "(", "dedupe_hosts", "=", "False", ")", ":", "true_eq_hosts", "(", "command", ",", "[", "'a'", ",", "'a'", ",", "'b'", ",", "'b'", ",", "'c'", "]", ",", "env", "=", "{", "'roledefs'", ":", "fake_roles", "}", ")" ]
allow turning deduping off .
train
false
5,894
def sub_tempita(s, context, file=None, name=None): if (not s): return None if file: context['__name'] = ('%s:%s' % (file, name)) elif name: context['__name'] = name from ..Tempita import sub return sub(s, **context)
[ "def", "sub_tempita", "(", "s", ",", "context", ",", "file", "=", "None", ",", "name", "=", "None", ")", ":", "if", "(", "not", "s", ")", ":", "return", "None", "if", "file", ":", "context", "[", "'__name'", "]", "=", "(", "'%s:%s'", "%", "(", "file", ",", "name", ")", ")", "elif", "name", ":", "context", "[", "'__name'", "]", "=", "name", "from", ".", ".", "Tempita", "import", "sub", "return", "sub", "(", "s", ",", "**", "context", ")" ]
run tempita on string s with given context .
train
false
5,895
def expanduser(path): return path
[ "def", "expanduser", "(", "path", ")", ":", "return", "path" ]
expand ~ and ~user constructs .
train
false
5,896
def _key_from_query(query, qualifier=None): stmt = query.with_labels().statement compiled = stmt.compile() params = compiled.params return ' '.join(([str(compiled)] + [str(params[k]) for k in sorted(params)]))
[ "def", "_key_from_query", "(", "query", ",", "qualifier", "=", "None", ")", ":", "stmt", "=", "query", ".", "with_labels", "(", ")", ".", "statement", "compiled", "=", "stmt", ".", "compile", "(", ")", "params", "=", "compiled", ".", "params", "return", "' '", ".", "join", "(", "(", "[", "str", "(", "compiled", ")", "]", "+", "[", "str", "(", "params", "[", "k", "]", ")", "for", "k", "in", "sorted", "(", "params", ")", "]", ")", ")" ]
given a query .
train
false
5,897
def is_user_meta(server_type, key): if (len(key) <= (8 + len(server_type))): return False return key.lower().startswith(get_user_meta_prefix(server_type))
[ "def", "is_user_meta", "(", "server_type", ",", "key", ")", ":", "if", "(", "len", "(", "key", ")", "<=", "(", "8", "+", "len", "(", "server_type", ")", ")", ")", ":", "return", "False", "return", "key", ".", "lower", "(", ")", ".", "startswith", "(", "get_user_meta_prefix", "(", "server_type", ")", ")" ]
tests if a header key starts with and is longer than the user metadata prefix for given server type .
train
false
5,899
def decamel(s, separator='_'): s = re.sub('([a-z0-9])([A-Z])', ('\\1%s\\2' % separator), s) s = re.sub('([A-Z])([A-Z][a-z])', ('\\1%s\\2' % separator), s) s = s.lower() return s
[ "def", "decamel", "(", "s", ",", "separator", "=", "'_'", ")", ":", "s", "=", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "(", "'\\\\1%s\\\\2'", "%", "separator", ")", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "'([A-Z])([A-Z][a-z])'", ",", "(", "'\\\\1%s\\\\2'", "%", "separator", ")", ",", "s", ")", "s", "=", "s", ".", "lower", "(", ")", "return", "s" ]
returns the string with camelcase converted to underscores .
train
false
5,900
def register_extension(id, extension): EXTENSION[extension.lower()] = id.upper()
[ "def", "register_extension", "(", "id", ",", "extension", ")", ":", "EXTENSION", "[", "extension", ".", "lower", "(", ")", "]", "=", "id", ".", "upper", "(", ")" ]
registers an image extension .
train
false
5,901
def guard_fsys_type(): sabnzbd.encoding.change_fsys(cfg.fsys_type())
[ "def", "guard_fsys_type", "(", ")", ":", "sabnzbd", ".", "encoding", ".", "change_fsys", "(", "cfg", ".", "fsys_type", "(", ")", ")" ]
callback for change of file system naming type .
train
false
5,902
def _get_baseline_from_tag(config, tag): last_snapshot = None for snapshot in __salt__['snapper.list_snapshots'](config): if (tag == snapshot['userdata'].get('baseline_tag')): if ((not last_snapshot) or (last_snapshot['timestamp'] < snapshot['timestamp'])): last_snapshot = snapshot return last_snapshot
[ "def", "_get_baseline_from_tag", "(", "config", ",", "tag", ")", ":", "last_snapshot", "=", "None", "for", "snapshot", "in", "__salt__", "[", "'snapper.list_snapshots'", "]", "(", "config", ")", ":", "if", "(", "tag", "==", "snapshot", "[", "'userdata'", "]", ".", "get", "(", "'baseline_tag'", ")", ")", ":", "if", "(", "(", "not", "last_snapshot", ")", "or", "(", "last_snapshot", "[", "'timestamp'", "]", "<", "snapshot", "[", "'timestamp'", "]", ")", ")", ":", "last_snapshot", "=", "snapshot", "return", "last_snapshot" ]
returns the last created baseline snapshot marked with tag .
train
true
5,903
def get_random_mac(api_handle, virt_type='xenpv'): if virt_type.startswith('vmware'): mac = [0, 80, 86, random.randint(0, 63), random.randint(0, 255), random.randint(0, 255)] elif (virt_type.startswith('xen') or virt_type.startswith('qemu') or virt_type.startswith('kvm')): mac = [0, 22, 62, random.randint(0, 127), random.randint(0, 255), random.randint(0, 255)] else: raise CX('virt mac assignment not yet supported') mac = ':'.join(map((lambda x: ('%02x' % x)), mac)) systems = api_handle.systems() while systems.find(mac_address=mac): mac = get_random_mac(api_handle) return mac
[ "def", "get_random_mac", "(", "api_handle", ",", "virt_type", "=", "'xenpv'", ")", ":", "if", "virt_type", ".", "startswith", "(", "'vmware'", ")", ":", "mac", "=", "[", "0", ",", "80", ",", "86", ",", "random", ".", "randint", "(", "0", ",", "63", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", "]", "elif", "(", "virt_type", ".", "startswith", "(", "'xen'", ")", "or", "virt_type", ".", "startswith", "(", "'qemu'", ")", "or", "virt_type", ".", "startswith", "(", "'kvm'", ")", ")", ":", "mac", "=", "[", "0", ",", "22", ",", "62", ",", "random", ".", "randint", "(", "0", ",", "127", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", "]", "else", ":", "raise", "CX", "(", "'virt mac assignment not yet supported'", ")", "mac", "=", "':'", ".", "join", "(", "map", "(", "(", "lambda", "x", ":", "(", "'%02x'", "%", "x", ")", ")", ",", "mac", ")", ")", "systems", "=", "api_handle", ".", "systems", "(", ")", "while", "systems", ".", "find", "(", "mac_address", "=", "mac", ")", ":", "mac", "=", "get_random_mac", "(", "api_handle", ")", "return", "mac" ]
generate a random mac address starting with fe:16:3e .
train
false
5,904
def _setup_browser_offsets(params, n_channels): ylim = [((n_channels * 2) + 1), 0] offset = (ylim[0] / n_channels) params['offsets'] = ((np.arange(n_channels) * offset) + (offset / 2.0)) params['n_channels'] = n_channels params['ax'].set_yticks(params['offsets']) params['ax'].set_ylim(ylim) params['vsel_patch'].set_height(n_channels) line = params['ax_vertline'] line.set_data(line._x, np.array(params['ax'].get_ylim()))
[ "def", "_setup_browser_offsets", "(", "params", ",", "n_channels", ")", ":", "ylim", "=", "[", "(", "(", "n_channels", "*", "2", ")", "+", "1", ")", ",", "0", "]", "offset", "=", "(", "ylim", "[", "0", "]", "/", "n_channels", ")", "params", "[", "'offsets'", "]", "=", "(", "(", "np", ".", "arange", "(", "n_channels", ")", "*", "offset", ")", "+", "(", "offset", "/", "2.0", ")", ")", "params", "[", "'n_channels'", "]", "=", "n_channels", "params", "[", "'ax'", "]", ".", "set_yticks", "(", "params", "[", "'offsets'", "]", ")", "params", "[", "'ax'", "]", ".", "set_ylim", "(", "ylim", ")", "params", "[", "'vsel_patch'", "]", ".", "set_height", "(", "n_channels", ")", "line", "=", "params", "[", "'ax_vertline'", "]", "line", ".", "set_data", "(", "line", ".", "_x", ",", "np", ".", "array", "(", "params", "[", "'ax'", "]", ".", "get_ylim", "(", ")", ")", ")" ]
aux function for computing viewport height and adjusting offsets .
train
false
5,905
def validate_volume_type(volume_type): if (volume_type not in VALID_VOLUME_TYPES): raise ValueError(('Elasticsearch Domain VolumeType must be one of: %s' % ', '.join(VALID_VOLUME_TYPES))) return volume_type
[ "def", "validate_volume_type", "(", "volume_type", ")", ":", "if", "(", "volume_type", "not", "in", "VALID_VOLUME_TYPES", ")", ":", "raise", "ValueError", "(", "(", "'Elasticsearch Domain VolumeType must be one of: %s'", "%", "', '", ".", "join", "(", "VALID_VOLUME_TYPES", ")", ")", ")", "return", "volume_type" ]
validate volumetype for elasticsearchdomain .
train
false
5,906
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None): generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(((X[:, 0] + (2 * X[:, 1])) - (2 * X[:, 2])) - (1.5 * X[:, 3])), scale=np.ones(n_samples)) return (X, y)
[ "def", "make_sparse_uncorrelated", "(", "n_samples", "=", "100", ",", "n_features", "=", "10", ",", "random_state", "=", "None", ")", ":", "generator", "=", "check_random_state", "(", "random_state", ")", "X", "=", "generator", ".", "normal", "(", "loc", "=", "0", ",", "scale", "=", "1", ",", "size", "=", "(", "n_samples", ",", "n_features", ")", ")", "y", "=", "generator", ".", "normal", "(", "loc", "=", "(", "(", "(", "X", "[", ":", ",", "0", "]", "+", "(", "2", "*", "X", "[", ":", ",", "1", "]", ")", ")", "-", "(", "2", "*", "X", "[", ":", ",", "2", "]", ")", ")", "-", "(", "1.5", "*", "X", "[", ":", ",", "3", "]", ")", ")", ",", "scale", "=", "np", ".", "ones", "(", "n_samples", ")", ")", "return", "(", "X", ",", "y", ")" ]
generate a random regression problem with sparse uncorrelated design this dataset is described in celeux et al [1] .
train
false
5,908
def getCarving(fileName): pluginModule = fabmetheus_interpret.getInterpretPlugin(fileName) if (pluginModule == None): return None return pluginModule.getCarving(fileName)
[ "def", "getCarving", "(", "fileName", ")", ":", "pluginModule", "=", "fabmetheus_interpret", ".", "getInterpretPlugin", "(", "fileName", ")", "if", "(", "pluginModule", "==", "None", ")", ":", "return", "None", "return", "pluginModule", ".", "getCarving", "(", "fileName", ")" ]
get carving .
train
false
5,909
def get_urlconf(default=None): thread = currentThread() if (thread in _urlconfs): return _urlconfs[thread] return default
[ "def", "get_urlconf", "(", "default", "=", "None", ")", ":", "thread", "=", "currentThread", "(", ")", "if", "(", "thread", "in", "_urlconfs", ")", ":", "return", "_urlconfs", "[", "thread", "]", "return", "default" ]
return the root urlconf to use for the current thread if it has been changed from the default one .
train
false
5,911
def evaluate_pauli_product(arg): start = arg end = arg if (not isinstance(arg, Mul)): return arg while ((not (start == end)) | ((start == arg) & (end == arg))): start = end tmp = start.as_coeff_mul() sigma_product = 1 com_product = 1 keeper = 1 for el in tmp[1]: if isinstance(el, Pauli): sigma_product *= el elif (not el.is_commutative): keeper = ((keeper * sigma_product) * el) sigma_product = 1 else: com_product *= el end = (((tmp[0] * keeper) * sigma_product) * com_product) if (end == arg): break return end
[ "def", "evaluate_pauli_product", "(", "arg", ")", ":", "start", "=", "arg", "end", "=", "arg", "if", "(", "not", "isinstance", "(", "arg", ",", "Mul", ")", ")", ":", "return", "arg", "while", "(", "(", "not", "(", "start", "==", "end", ")", ")", "|", "(", "(", "start", "==", "arg", ")", "&", "(", "end", "==", "arg", ")", ")", ")", ":", "start", "=", "end", "tmp", "=", "start", ".", "as_coeff_mul", "(", ")", "sigma_product", "=", "1", "com_product", "=", "1", "keeper", "=", "1", "for", "el", "in", "tmp", "[", "1", "]", ":", "if", "isinstance", "(", "el", ",", "Pauli", ")", ":", "sigma_product", "*=", "el", "elif", "(", "not", "el", ".", "is_commutative", ")", ":", "keeper", "=", "(", "(", "keeper", "*", "sigma_product", ")", "*", "el", ")", "sigma_product", "=", "1", "else", ":", "com_product", "*=", "el", "end", "=", "(", "(", "(", "tmp", "[", "0", "]", "*", "keeper", ")", "*", "sigma_product", ")", "*", "com_product", ")", "if", "(", "end", "==", "arg", ")", ":", "break", "return", "end" ]
help function to evaluate pauli matrices product with symbolic objects parameters arg: symbolic expression that contains paulimatrices examples .
train
false
5,914
def _update_object(table, table_obj): _db_content[table][table_obj.obj] = table_obj
[ "def", "_update_object", "(", "table", ",", "table_obj", ")", ":", "_db_content", "[", "table", "]", "[", "table_obj", ".", "obj", "]", "=", "table_obj" ]
update objects of the type .
train
false
5,916
def _set_to_get(set_cmd, module): set_cmd = truncate_before(set_cmd, ' option:') get_cmd = set_cmd.split(' ') (key, value) = get_cmd[(-1)].split('=') module.log(('get commands %s ' % key)) return (((['--', 'get'] + get_cmd[:(-1)]) + [key]), value)
[ "def", "_set_to_get", "(", "set_cmd", ",", "module", ")", ":", "set_cmd", "=", "truncate_before", "(", "set_cmd", ",", "' option:'", ")", "get_cmd", "=", "set_cmd", ".", "split", "(", "' '", ")", "(", "key", ",", "value", ")", "=", "get_cmd", "[", "(", "-", "1", ")", "]", ".", "split", "(", "'='", ")", "module", ".", "log", "(", "(", "'get commands %s '", "%", "key", ")", ")", "return", "(", "(", "(", "[", "'--'", ",", "'get'", "]", "+", "get_cmd", "[", ":", "(", "-", "1", ")", "]", ")", "+", "[", "key", "]", ")", ",", "value", ")" ]
convert set command to get command and set value .
train
false
5,918
def _convert_comp_data(res4): if (res4['ncomp'] == 0): return res4['comp'] = sorted(res4['comp'], key=_comp_sort_keys) _check_comp(res4['comp']) first = 0 kind = (-1) comps = list() for k in range(len(res4['comp'])): if (res4['comp'][k]['coeff_type'] != kind): if (k > 0): comps.append(_conv_comp(res4['comp'], first, (k - 1), res4['chs'])) kind = res4['comp'][k]['coeff_type'] first = k comps.append(_conv_comp(res4['comp'], first, k, res4['chs'])) return comps
[ "def", "_convert_comp_data", "(", "res4", ")", ":", "if", "(", "res4", "[", "'ncomp'", "]", "==", "0", ")", ":", "return", "res4", "[", "'comp'", "]", "=", "sorted", "(", "res4", "[", "'comp'", "]", ",", "key", "=", "_comp_sort_keys", ")", "_check_comp", "(", "res4", "[", "'comp'", "]", ")", "first", "=", "0", "kind", "=", "(", "-", "1", ")", "comps", "=", "list", "(", ")", "for", "k", "in", "range", "(", "len", "(", "res4", "[", "'comp'", "]", ")", ")", ":", "if", "(", "res4", "[", "'comp'", "]", "[", "k", "]", "[", "'coeff_type'", "]", "!=", "kind", ")", ":", "if", "(", "k", ">", "0", ")", ":", "comps", ".", "append", "(", "_conv_comp", "(", "res4", "[", "'comp'", "]", ",", "first", ",", "(", "k", "-", "1", ")", ",", "res4", "[", "'chs'", "]", ")", ")", "kind", "=", "res4", "[", "'comp'", "]", "[", "k", "]", "[", "'coeff_type'", "]", "first", "=", "k", "comps", ".", "append", "(", "_conv_comp", "(", "res4", "[", "'comp'", "]", ",", "first", ",", "k", ",", "res4", "[", "'chs'", "]", ")", ")", "return", "comps" ]
convert the compensation data into named matrices .
train
false
5,919
def _get_cibfile_cksum(cibname): cibfile_cksum = '{0}.cksum'.format(_get_cibfile(cibname)) log.trace('cibfile_cksum: {0}'.format(cibfile_cksum)) return cibfile_cksum
[ "def", "_get_cibfile_cksum", "(", "cibname", ")", ":", "cibfile_cksum", "=", "'{0}.cksum'", ".", "format", "(", "_get_cibfile", "(", "cibname", ")", ")", "log", ".", "trace", "(", "'cibfile_cksum: {0}'", ".", "format", "(", "cibfile_cksum", ")", ")", "return", "cibfile_cksum" ]
get the full path of the file containing a checksum of a cib-file with the name of the cib .
train
true
5,921
def vhost_exists(name, runas=None): if ((runas is None) and (not salt.utils.is_windows())): runas = salt.utils.get_user() return (name in list_vhosts(runas=runas))
[ "def", "vhost_exists", "(", "name", ",", "runas", "=", "None", ")", ":", "if", "(", "(", "runas", "is", "None", ")", "and", "(", "not", "salt", ".", "utils", ".", "is_windows", "(", ")", ")", ")", ":", "runas", "=", "salt", ".", "utils", ".", "get_user", "(", ")", "return", "(", "name", "in", "list_vhosts", "(", "runas", "=", "runas", ")", ")" ]
return whether the vhost exists based on rabbitmqctl list_vhosts .
train
true
5,922
def day_crumb(date): year = date.strftime('%Y') month = date.strftime('%m') day = date.strftime('%d') return Crumb(day, reverse('zinnia:entry_archive_day', args=[year, month, day]))
[ "def", "day_crumb", "(", "date", ")", ":", "year", "=", "date", ".", "strftime", "(", "'%Y'", ")", "month", "=", "date", ".", "strftime", "(", "'%m'", ")", "day", "=", "date", ".", "strftime", "(", "'%d'", ")", "return", "Crumb", "(", "day", ",", "reverse", "(", "'zinnia:entry_archive_day'", ",", "args", "=", "[", "year", ",", "month", ",", "day", "]", ")", ")" ]
crumb for a day .
train
true
5,924
def find_management_module(app_name): parts = app_name.split('.') parts.append('management') parts.reverse() part = parts.pop() path = None try: (f, path, descr) = imp.find_module(part, path) except ImportError as e: if (os.path.basename(os.getcwd()) != part): raise e while parts: part = parts.pop() (f, path, descr) = imp.find_module(part, ((path and [path]) or None)) return path
[ "def", "find_management_module", "(", "app_name", ")", ":", "parts", "=", "app_name", ".", "split", "(", "'.'", ")", "parts", ".", "append", "(", "'management'", ")", "parts", ".", "reverse", "(", ")", "part", "=", "parts", ".", "pop", "(", ")", "path", "=", "None", "try", ":", "(", "f", ",", "path", ",", "descr", ")", "=", "imp", ".", "find_module", "(", "part", ",", "path", ")", "except", "ImportError", "as", "e", ":", "if", "(", "os", ".", "path", ".", "basename", "(", "os", ".", "getcwd", "(", ")", ")", "!=", "part", ")", ":", "raise", "e", "while", "parts", ":", "part", "=", "parts", ".", "pop", "(", ")", "(", "f", ",", "path", ",", "descr", ")", "=", "imp", ".", "find_module", "(", "part", ",", "(", "(", "path", "and", "[", "path", "]", ")", "or", "None", ")", ")", "return", "path" ]
determines the path to the management module for the given app_name .
train
false
5,925
def warnAboutFunction(offender, warningString): offenderModule = sys.modules[offender.__module__] filename = inspect.getabsfile(offenderModule) lineStarts = list(findlinestarts(offender.func_code)) lastLineNo = lineStarts[(-1)][1] globals = offender.func_globals kwargs = dict(category=DeprecationWarning, filename=filename, lineno=lastLineNo, module=offenderModule.__name__, registry=globals.setdefault('__warningregistry__', {}), module_globals=None) if (sys.version_info[:2] < (2, 5)): kwargs.pop('module_globals') warn_explicit(warningString, **kwargs)
[ "def", "warnAboutFunction", "(", "offender", ",", "warningString", ")", ":", "offenderModule", "=", "sys", ".", "modules", "[", "offender", ".", "__module__", "]", "filename", "=", "inspect", ".", "getabsfile", "(", "offenderModule", ")", "lineStarts", "=", "list", "(", "findlinestarts", "(", "offender", ".", "func_code", ")", ")", "lastLineNo", "=", "lineStarts", "[", "(", "-", "1", ")", "]", "[", "1", "]", "globals", "=", "offender", ".", "func_globals", "kwargs", "=", "dict", "(", "category", "=", "DeprecationWarning", ",", "filename", "=", "filename", ",", "lineno", "=", "lastLineNo", ",", "module", "=", "offenderModule", ".", "__name__", ",", "registry", "=", "globals", ".", "setdefault", "(", "'__warningregistry__'", ",", "{", "}", ")", ",", "module_globals", "=", "None", ")", "if", "(", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "2", ",", "5", ")", ")", ":", "kwargs", ".", "pop", "(", "'module_globals'", ")", "warn_explicit", "(", "warningString", ",", "**", "kwargs", ")" ]
issue a warning string .
train
false
5,926
def processArchivable(archivableClass, elementNode): if (elementNode == None): return elementNode.xmlObject = archivableClass() elementNode.xmlObject.setToElementNode(elementNode) elementNode.getXMLProcessor().processChildNodes(elementNode)
[ "def", "processArchivable", "(", "archivableClass", ",", "elementNode", ")", ":", "if", "(", "elementNode", "==", "None", ")", ":", "return", "elementNode", ".", "xmlObject", "=", "archivableClass", "(", ")", "elementNode", ".", "xmlObject", ".", "setToElementNode", "(", "elementNode", ")", "elementNode", ".", "getXMLProcessor", "(", ")", ".", "processChildNodes", "(", "elementNode", ")" ]
get any new elements and process the archivable .
train
false
5,927
@profiler.trace def port_create(request, network_id, **kwargs): LOG.debug(('port_create(): netid=%s, kwargs=%s' % (network_id, kwargs))) if ('policy_profile_id' in kwargs): kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id') kwargs = unescape_port_kwargs(**kwargs) body = {'port': {'network_id': network_id}} if ('tenant_id' not in kwargs): kwargs['tenant_id'] = request.user.project_id body['port'].update(kwargs) port = neutronclient(request).create_port(body=body).get('port') return Port(port)
[ "@", "profiler", ".", "trace", "def", "port_create", "(", "request", ",", "network_id", ",", "**", "kwargs", ")", ":", "LOG", ".", "debug", "(", "(", "'port_create(): netid=%s, kwargs=%s'", "%", "(", "network_id", ",", "kwargs", ")", ")", ")", "if", "(", "'policy_profile_id'", "in", "kwargs", ")", ":", "kwargs", "[", "'n1kv:profile'", "]", "=", "kwargs", ".", "pop", "(", "'policy_profile_id'", ")", "kwargs", "=", "unescape_port_kwargs", "(", "**", "kwargs", ")", "body", "=", "{", "'port'", ":", "{", "'network_id'", ":", "network_id", "}", "}", "if", "(", "'tenant_id'", "not", "in", "kwargs", ")", ":", "kwargs", "[", "'tenant_id'", "]", "=", "request", ".", "user", ".", "project_id", "body", "[", "'port'", "]", ".", "update", "(", "kwargs", ")", "port", "=", "neutronclient", "(", "request", ")", ".", "create_port", "(", "body", "=", "body", ")", ".", "get", "(", "'port'", ")", "return", "Port", "(", "port", ")" ]
create a port on a specified network .
train
true
5,930
def _get_direct_filter_query(args): query = Q() for arg in args: if (hasattr(Document, arg) and args[arg]): append = Q(**{str((arg + '__id')): long(args[arg])}) query = (query & append) return query
[ "def", "_get_direct_filter_query", "(", "args", ")", ":", "query", "=", "Q", "(", ")", "for", "arg", "in", "args", ":", "if", "(", "hasattr", "(", "Document", ",", "arg", ")", "and", "args", "[", "arg", "]", ")", ":", "append", "=", "Q", "(", "**", "{", "str", "(", "(", "arg", "+", "'__id'", ")", ")", ":", "long", "(", "args", "[", "arg", "]", ")", "}", ")", "query", "=", "(", "query", "&", "append", ")", "return", "query" ]
creates a query to filter documents .
train
false
5,932
def _convert_fits2record(format): (repeat, dtype, option) = _parse_tformat(format) if (dtype in FITS2NUMPY): if (dtype == 'A'): output_format = (FITS2NUMPY[dtype] + str(repeat)) if ((format.lstrip()[0] == 'A') and (option != '')): output_format = (FITS2NUMPY[dtype] + str(int(option))) else: repeat_str = '' if (repeat != 1): repeat_str = str(repeat) output_format = (repeat_str + FITS2NUMPY[dtype]) elif (dtype == 'X'): output_format = _FormatX(repeat) elif (dtype == 'P'): output_format = _FormatP.from_tform(format) elif (dtype == 'Q'): output_format = _FormatQ.from_tform(format) elif (dtype == 'F'): output_format = 'f8' else: raise ValueError('Illegal format {}.'.format(format)) return output_format
[ "def", "_convert_fits2record", "(", "format", ")", ":", "(", "repeat", ",", "dtype", ",", "option", ")", "=", "_parse_tformat", "(", "format", ")", "if", "(", "dtype", "in", "FITS2NUMPY", ")", ":", "if", "(", "dtype", "==", "'A'", ")", ":", "output_format", "=", "(", "FITS2NUMPY", "[", "dtype", "]", "+", "str", "(", "repeat", ")", ")", "if", "(", "(", "format", ".", "lstrip", "(", ")", "[", "0", "]", "==", "'A'", ")", "and", "(", "option", "!=", "''", ")", ")", ":", "output_format", "=", "(", "FITS2NUMPY", "[", "dtype", "]", "+", "str", "(", "int", "(", "option", ")", ")", ")", "else", ":", "repeat_str", "=", "''", "if", "(", "repeat", "!=", "1", ")", ":", "repeat_str", "=", "str", "(", "repeat", ")", "output_format", "=", "(", "repeat_str", "+", "FITS2NUMPY", "[", "dtype", "]", ")", "elif", "(", "dtype", "==", "'X'", ")", ":", "output_format", "=", "_FormatX", "(", "repeat", ")", "elif", "(", "dtype", "==", "'P'", ")", ":", "output_format", "=", "_FormatP", ".", "from_tform", "(", "format", ")", "elif", "(", "dtype", "==", "'Q'", ")", ":", "output_format", "=", "_FormatQ", ".", "from_tform", "(", "format", ")", "elif", "(", "dtype", "==", "'F'", ")", ":", "output_format", "=", "'f8'", "else", ":", "raise", "ValueError", "(", "'Illegal format {}.'", ".", "format", "(", "format", ")", ")", "return", "output_format" ]
convert fits format spec to record format spec .
train
false
5,933
def is_template_file(path): ext = os.path.splitext(path)[1].lower() return (ext in [u'.html', u'.htm', u'.xml'])
[ "def", "is_template_file", "(", "path", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", ".", "lower", "(", ")", "return", "(", "ext", "in", "[", "u'.html'", ",", "u'.htm'", ",", "u'.xml'", "]", ")" ]
return true if the given file path is an html file .
train
false
5,934
def register_reader(data_format, data_class, function, force=False): if ((not ((data_format, data_class) in _readers)) or force): _readers[(data_format, data_class)] = function else: raise IORegistryError(u"Reader for format '{0}' and class '{1}' is already defined".format(data_format, data_class.__name__)) if (data_class not in _delayed_docs_classes): _update__doc__(data_class, u'read')
[ "def", "register_reader", "(", "data_format", ",", "data_class", ",", "function", ",", "force", "=", "False", ")", ":", "if", "(", "(", "not", "(", "(", "data_format", ",", "data_class", ")", "in", "_readers", ")", ")", "or", "force", ")", ":", "_readers", "[", "(", "data_format", ",", "data_class", ")", "]", "=", "function", "else", ":", "raise", "IORegistryError", "(", "u\"Reader for format '{0}' and class '{1}' is already defined\"", ".", "format", "(", "data_format", ",", "data_class", ".", "__name__", ")", ")", "if", "(", "data_class", "not", "in", "_delayed_docs_classes", ")", ":", "_update__doc__", "(", "data_class", ",", "u'read'", ")" ]
register a reader function .
train
false
5,936
def _akima_interpolate(xi, yi, x, der=0, axis=0): from scipy import interpolate try: P = interpolate.Akima1DInterpolator(xi, yi, axis=axis) except TypeError: P = interpolate.Akima1DInterpolator(xi, yi) if (der == 0): return P(x) elif interpolate._isscalar(der): return P(x, der=der) else: return [P(x, nu) for nu in der]
[ "def", "_akima_interpolate", "(", "xi", ",", "yi", ",", "x", ",", "der", "=", "0", ",", "axis", "=", "0", ")", ":", "from", "scipy", "import", "interpolate", "try", ":", "P", "=", "interpolate", ".", "Akima1DInterpolator", "(", "xi", ",", "yi", ",", "axis", "=", "axis", ")", "except", "TypeError", ":", "P", "=", "interpolate", ".", "Akima1DInterpolator", "(", "xi", ",", "yi", ")", "if", "(", "der", "==", "0", ")", ":", "return", "P", "(", "x", ")", "elif", "interpolate", ".", "_isscalar", "(", "der", ")", ":", "return", "P", "(", "x", ",", "der", "=", "der", ")", "else", ":", "return", "[", "P", "(", "x", ",", "nu", ")", "for", "nu", "in", "der", "]" ]
convenience function for akima interpolation .
train
true
5,937
def encode_utf8(string): if isinstance(string, unicode): try: return string.encode('utf-8') except: return string return str(string)
[ "def", "encode_utf8", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "unicode", ")", ":", "try", ":", "return", "string", ".", "encode", "(", "'utf-8'", ")", "except", ":", "return", "string", "return", "str", "(", "string", ")" ]
encode a utf-8 string to a sequence of bytes .
train
false
5,940
def test_named_import(): s = 'import time as dt' assert (len(Script(s, 1, 15, '/').goto_definitions()) == 1) assert (len(Script(s, 1, 10, '/').goto_definitions()) == 1)
[ "def", "test_named_import", "(", ")", ":", "s", "=", "'import time as dt'", "assert", "(", "len", "(", "Script", "(", "s", ",", "1", ",", "15", ",", "'/'", ")", ".", "goto_definitions", "(", ")", ")", "==", "1", ")", "assert", "(", "len", "(", "Script", "(", "s", ",", "1", ",", "10", ",", "'/'", ")", ".", "goto_definitions", "(", ")", ")", "==", "1", ")" ]
named import - jedi-vim issue #8 .
train
false