id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
16,020
@depends('cx_Oracle', fallback_function=_cx_oracle_req) def run_query(db, query): log.debug('run query on {0}: {1}'.format(db, query)) conn = _connect(show_dbs(db)[db]['uri']) return conn.cursor().execute(query).fetchall()
[ "@", "depends", "(", "'cx_Oracle'", ",", "fallback_function", "=", "_cx_oracle_req", ")", "def", "run_query", "(", "db", ",", "query", ")", ":", "log", ".", "debug", "(", "'run query on {0}: {1}'", ".", "format", "(", "db", ",", "query", ")", ")", "conn", "=", "_connect", "(", "show_dbs", "(", "db", ")", "[", "db", "]", "[", "'uri'", "]", ")", "return", "conn", ".", "cursor", "(", ")", ".", "execute", "(", "query", ")", ".", "fetchall", "(", ")" ]
run sql query and return result cli example: .
train
false
16,021
def _rods_strerror(errno): if (not hasattr(irods, '__rods_strerror_map')): irods.__rods_strerror_map = {} for name in dir(irods): v = getattr(irods, name) if ((type(v) == int) and (v < 0)): irods.__rods_strerror_map[v] = name return irods.__rods_strerror_map.get(errno, 'GALAXY_NO_ERRNO_MAPPING_FOUND')
[ "def", "_rods_strerror", "(", "errno", ")", ":", "if", "(", "not", "hasattr", "(", "irods", ",", "'__rods_strerror_map'", ")", ")", ":", "irods", ".", "__rods_strerror_map", "=", "{", "}", "for", "name", "in", "dir", "(", "irods", ")", ":", "v", "=", "getattr", "(", "irods", ",", "name", ")", "if", "(", "(", "type", "(", "v", ")", "==", "int", ")", "and", "(", "v", "<", "0", ")", ")", ":", "irods", ".", "__rods_strerror_map", "[", "v", "]", "=", "name", "return", "irods", ".", "__rods_strerror_map", ".", "get", "(", "errno", ",", "'GALAXY_NO_ERRNO_MAPPING_FOUND'", ")" ]
the missing strerror for irods error codes .
train
false
16,023
@pytest.fixture def quickmark_manager_stub(stubs): stub = stubs.QuickmarkManagerStub() objreg.register('quickmark-manager', stub) (yield stub) objreg.delete('quickmark-manager')
[ "@", "pytest", ".", "fixture", "def", "quickmark_manager_stub", "(", "stubs", ")", ":", "stub", "=", "stubs", ".", "QuickmarkManagerStub", "(", ")", "objreg", ".", "register", "(", "'quickmark-manager'", ",", "stub", ")", "(", "yield", "stub", ")", "objreg", ".", "delete", "(", "'quickmark-manager'", ")" ]
fixture which provides a fake quickmark manager object .
train
false
16,025
def ensure_list(param): if (not param): param = [] elif (not is_iterable(param)): param = [param] return param
[ "def", "ensure_list", "(", "param", ")", ":", "if", "(", "not", "param", ")", ":", "param", "=", "[", "]", "elif", "(", "not", "is_iterable", "(", "param", ")", ")", ":", "param", "=", "[", "param", "]", "return", "param" ]
wrap thing in a list if its a single str .
train
false
16,026
def enforce_required_arguments(module): missing_args = [] for arg in ('min_size', 'max_size', 'launch_config_name'): if (module.params[arg] is None): missing_args.append(arg) if missing_args: module.fail_json(msg=('Missing required arguments for autoscaling group create/update: %s' % ','.join(missing_args)))
[ "def", "enforce_required_arguments", "(", "module", ")", ":", "missing_args", "=", "[", "]", "for", "arg", "in", "(", "'min_size'", ",", "'max_size'", ",", "'launch_config_name'", ")", ":", "if", "(", "module", ".", "params", "[", "arg", "]", "is", "None", ")", ":", "missing_args", ".", "append", "(", "arg", ")", "if", "missing_args", ":", "module", ".", "fail_json", "(", "msg", "=", "(", "'Missing required arguments for autoscaling group create/update: %s'", "%", "','", ".", "join", "(", "missing_args", ")", ")", ")" ]
as many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module .
train
false
16,027
def can_eliminate_repository_dependency(metadata_dict, tool_shed_url, name, owner): rd_dict = metadata_dict.get('repository_dependencies', {}) rd_tups = rd_dict.get('repository_dependencies', []) for rd_tup in rd_tups: (tsu, n, o, none1, none2, none3) = common_util.parse_repository_dependency_tuple(rd_tup) if ((tsu == tool_shed_url) and (n == name) and (o == owner)): return False return True
[ "def", "can_eliminate_repository_dependency", "(", "metadata_dict", ",", "tool_shed_url", ",", "name", ",", "owner", ")", ":", "rd_dict", "=", "metadata_dict", ".", "get", "(", "'repository_dependencies'", ",", "{", "}", ")", "rd_tups", "=", "rd_dict", ".", "get", "(", "'repository_dependencies'", ",", "[", "]", ")", "for", "rd_tup", "in", "rd_tups", ":", "(", "tsu", ",", "n", ",", "o", ",", "none1", ",", "none2", ",", "none3", ")", "=", "common_util", ".", "parse_repository_dependency_tuple", "(", "rd_tup", ")", "if", "(", "(", "tsu", "==", "tool_shed_url", ")", "and", "(", "n", "==", "name", ")", "and", "(", "o", "==", "owner", ")", ")", ":", "return", "False", "return", "True" ]
determine if the relationship between a repository_dependency record associated with a tool_shed_repository record on the galaxy side can be eliminated .
train
false
16,028
def makeXMLTags(tagStr): return _makeTags(tagStr, True)
[ "def", "makeXMLTags", "(", "tagStr", ")", ":", "return", "_makeTags", "(", "tagStr", ",", "True", ")" ]
helper to construct opening and closing tag expressions for xml .
train
false
16,029
def send_refund_notification(course_enrollment, refund_ids): tags = [u'auto_refund'] if theming_helpers.is_request_in_themed_site(): raise NotImplementedError(u'Unable to send refund processing emails to support teams.') student = course_enrollment.user subject = _(u'[Refund] User-Requested Refund') body = generate_refund_notification_body(student, refund_ids) requester_name = (student.profile.name or student.username) create_zendesk_ticket(requester_name, student.email, subject, body, tags)
[ "def", "send_refund_notification", "(", "course_enrollment", ",", "refund_ids", ")", ":", "tags", "=", "[", "u'auto_refund'", "]", "if", "theming_helpers", ".", "is_request_in_themed_site", "(", ")", ":", "raise", "NotImplementedError", "(", "u'Unable to send refund processing emails to support teams.'", ")", "student", "=", "course_enrollment", ".", "user", "subject", "=", "_", "(", "u'[Refund] User-Requested Refund'", ")", "body", "=", "generate_refund_notification_body", "(", "student", ",", "refund_ids", ")", "requester_name", "=", "(", "student", ".", "profile", ".", "name", "or", "student", ".", "username", ")", "create_zendesk_ticket", "(", "requester_name", ",", "student", ".", "email", ",", "subject", ",", "body", ",", "tags", ")" ]
notify the support team of the refund request .
train
false
16,030
@not_implemented_for('directed') def global_efficiency(G): n = len(G) denom = (n * (n - 1)) return (sum((efficiency(G, u, v) for (u, v) in permutations(G, 2))) / denom)
[ "@", "not_implemented_for", "(", "'directed'", ")", "def", "global_efficiency", "(", "G", ")", ":", "n", "=", "len", "(", "G", ")", "denom", "=", "(", "n", "*", "(", "n", "-", "1", ")", ")", "return", "(", "sum", "(", "(", "efficiency", "(", "G", ",", "u", ",", "v", ")", "for", "(", "u", ",", "v", ")", "in", "permutations", "(", "G", ",", "2", ")", ")", ")", "/", "denom", ")" ]
returns the average global efficiency of the graph .
train
false
16,031
@memoize def _construct_uri(profile): return 'http://{host}:{port}'.format(**profile)
[ "@", "memoize", "def", "_construct_uri", "(", "profile", ")", ":", "return", "'http://{host}:{port}'", ".", "format", "(", "**", "profile", ")" ]
examine configuration and return a uri for the couchdb server in the following format: .
train
false
16,033
def describe_enum(enum_definition): enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[(-1)] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
[ "def", "describe_enum", "(", "enum_definition", ")", ":", "enum_descriptor", "=", "EnumDescriptor", "(", ")", "enum_descriptor", ".", "name", "=", "enum_definition", ".", "definition_name", "(", ")", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", "values", "=", "[", "]", "for", "number", "in", "enum_definition", ".", "numbers", "(", ")", ":", "value", "=", "enum_definition", ".", "lookup_by_number", "(", "number", ")", "values", ".", "append", "(", "describe_enum_value", "(", "value", ")", ")", "if", "values", ":", "enum_descriptor", ".", "values", "=", "values", "return", "enum_descriptor" ]
build descriptor for enum class .
train
true
16,036
def test_discard_invalid_filenames(): runner = Runner(join(abspath(dirname(__file__)), 'invalid_module_name'), verbosity=0) runner.run()
[ "def", "test_discard_invalid_filenames", "(", ")", ":", "runner", "=", "Runner", "(", "join", "(", "abspath", "(", "dirname", "(", "__file__", ")", ")", ",", "'invalid_module_name'", ")", ",", "verbosity", "=", "0", ")", "runner", ".", "run", "(", ")" ]
if a module has a invalid file name .
train
false
16,037
def _git_config(cwd, user, password): contextkey = ('git.config.' + cwd) if (contextkey not in __context__): git_dir = rev_parse(cwd, opts=['--git-dir'], user=user, password=password, ignore_retcode=True) if (not os.path.isabs(git_dir)): paths = (cwd, git_dir, 'config') else: paths = (git_dir, 'config') __context__[contextkey] = os.path.join(*paths) return __context__[contextkey]
[ "def", "_git_config", "(", "cwd", ",", "user", ",", "password", ")", ":", "contextkey", "=", "(", "'git.config.'", "+", "cwd", ")", "if", "(", "contextkey", "not", "in", "__context__", ")", ":", "git_dir", "=", "rev_parse", "(", "cwd", ",", "opts", "=", "[", "'--git-dir'", "]", ",", "user", "=", "user", ",", "password", "=", "password", ",", "ignore_retcode", "=", "True", ")", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "git_dir", ")", ")", ":", "paths", "=", "(", "cwd", ",", "git_dir", ",", "'config'", ")", "else", ":", "paths", "=", "(", "git_dir", ",", "'config'", ")", "__context__", "[", "contextkey", "]", "=", "os", ".", "path", ".", "join", "(", "*", "paths", ")", "return", "__context__", "[", "contextkey", "]" ]
helper to retrieve git config options .
train
true
16,038
def dump_crl(type, crl): bio = _new_mem_buf() if (type == FILETYPE_PEM): ret = _lib.PEM_write_bio_X509_CRL(bio, crl._crl) elif (type == FILETYPE_ASN1): ret = _lib.i2d_X509_CRL_bio(bio, crl._crl) elif (type == FILETYPE_TEXT): ret = _lib.X509_CRL_print(bio, crl._crl) else: raise ValueError('type argument must be FILETYPE_PEM, FILETYPE_ASN1, or FILETYPE_TEXT') assert (ret == 1) return _bio_to_string(bio)
[ "def", "dump_crl", "(", "type", ",", "crl", ")", ":", "bio", "=", "_new_mem_buf", "(", ")", "if", "(", "type", "==", "FILETYPE_PEM", ")", ":", "ret", "=", "_lib", ".", "PEM_write_bio_X509_CRL", "(", "bio", ",", "crl", ".", "_crl", ")", "elif", "(", "type", "==", "FILETYPE_ASN1", ")", ":", "ret", "=", "_lib", ".", "i2d_X509_CRL_bio", "(", "bio", ",", "crl", ".", "_crl", ")", "elif", "(", "type", "==", "FILETYPE_TEXT", ")", ":", "ret", "=", "_lib", ".", "X509_CRL_print", "(", "bio", ",", "crl", ".", "_crl", ")", "else", ":", "raise", "ValueError", "(", "'type argument must be FILETYPE_PEM, FILETYPE_ASN1, or FILETYPE_TEXT'", ")", "assert", "(", "ret", "==", "1", ")", "return", "_bio_to_string", "(", "bio", ")" ]
dump a certificate revocation list to a buffer .
train
true
16,039
def test_datasource_untouched(): original_data = copy.deepcopy(MEMORY_DATA) table = UnorderedTable(MEMORY_DATA) table.order_by = u'i' list(table.rows) assert (MEMORY_DATA == original_data) table = UnorderedTable(MEMORY_DATA) table.order_by = u'beta' list(table.rows) assert (MEMORY_DATA == original_data)
[ "def", "test_datasource_untouched", "(", ")", ":", "original_data", "=", "copy", ".", "deepcopy", "(", "MEMORY_DATA", ")", "table", "=", "UnorderedTable", "(", "MEMORY_DATA", ")", "table", ".", "order_by", "=", "u'i'", "list", "(", "table", ".", "rows", ")", "assert", "(", "MEMORY_DATA", "==", "original_data", ")", "table", "=", "UnorderedTable", "(", "MEMORY_DATA", ")", "table", ".", "order_by", "=", "u'beta'", "list", "(", "table", ".", "rows", ")", "assert", "(", "MEMORY_DATA", "==", "original_data", ")" ]
ensure that data that is provided to the table is not modified by table operations .
train
false
16,040
def test_performance(): clock = task.Clock() call = task.LoopingCall((lambda : None)) call.clock = clock call.start(0.1) clock.advance(1000000)
[ "def", "test_performance", "(", ")", ":", "clock", "=", "task", ".", "Clock", "(", ")", "call", "=", "task", ".", "LoopingCall", "(", "(", "lambda", ":", "None", ")", ")", "call", ".", "clock", "=", "clock", "call", ".", "start", "(", "0.1", ")", "clock", ".", "advance", "(", "1000000", ")" ]
l{loopingcall} should not take long to skip a lot of iterations .
train
false
16,041
def border_control_point(): def prep(r): s3db.gis_location_filter(r) if r.interactive: if r.component: if (r.component.name == 'human_resource'): s3db.org_site_staff_config(r) elif (r.component.name == 'inv_item'): s3db.configure('inv_inv_item', create=False, deletable=False, editable=False, listadd=False) return True s3.prep = prep return s3_rest_controller(rheader=s3db.transport_rheader)
[ "def", "border_control_point", "(", ")", ":", "def", "prep", "(", "r", ")", ":", "s3db", ".", "gis_location_filter", "(", "r", ")", "if", "r", ".", "interactive", ":", "if", "r", ".", "component", ":", "if", "(", "r", ".", "component", ".", "name", "==", "'human_resource'", ")", ":", "s3db", ".", "org_site_staff_config", "(", "r", ")", "elif", "(", "r", ".", "component", ".", "name", "==", "'inv_item'", ")", ":", "s3db", ".", "configure", "(", "'inv_inv_item'", ",", "create", "=", "False", ",", "deletable", "=", "False", ",", "editable", "=", "False", ",", "listadd", "=", "False", ")", "return", "True", "s3", ".", "prep", "=", "prep", "return", "s3_rest_controller", "(", "rheader", "=", "s3db", ".", "transport_rheader", ")" ]
border control points - restful crud controller .
train
false
16,042
def on_plugin_shutdown(config): pass
[ "def", "on_plugin_shutdown", "(", "config", ")", ":", "pass" ]
called before plugin is stopped .
train
false
16,043
def verify_compliance(filename): hdulist = fits.open(filename) try: hdulist.verify('exception') except fits.VerifyError as exc: log.warning('NONCOMPLIANT {!r} .. {}'.format(filename), str(exc).replace('\n', ' ')) return 1 return 0
[ "def", "verify_compliance", "(", "filename", ")", ":", "hdulist", "=", "fits", ".", "open", "(", "filename", ")", "try", ":", "hdulist", ".", "verify", "(", "'exception'", ")", "except", "fits", ".", "VerifyError", "as", "exc", ":", "log", ".", "warning", "(", "'NONCOMPLIANT {!r} .. {}'", ".", "format", "(", "filename", ")", ",", "str", "(", "exc", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ")", "return", "1", "return", "0" ]
check for fits standard compliance .
train
false
16,044
def get_recently_published_exp_summary_dicts(limit): recently_published_exploration_summaries = [exp_summary for exp_summary in exp_services.get_recently_published_exp_summaries(limit).values()] summaries = sorted(recently_published_exploration_summaries, key=(lambda exp_summary: exp_summary.first_published_msec), reverse=True) return get_displayable_exp_summary_dicts(summaries)
[ "def", "get_recently_published_exp_summary_dicts", "(", "limit", ")", ":", "recently_published_exploration_summaries", "=", "[", "exp_summary", "for", "exp_summary", "in", "exp_services", ".", "get_recently_published_exp_summaries", "(", "limit", ")", ".", "values", "(", ")", "]", "summaries", "=", "sorted", "(", "recently_published_exploration_summaries", ",", "key", "=", "(", "lambda", "exp_summary", ":", "exp_summary", ".", "first_published_msec", ")", ",", "reverse", "=", "True", ")", "return", "get_displayable_exp_summary_dicts", "(", "summaries", ")" ]
returns a list of recently published explorations with the given language code .
train
false
16,045
def get_cluster_manager(config_file=None, cache=False): cfg = get_config(config_file, cache) return cfg.get_cluster_manager()
[ "def", "get_cluster_manager", "(", "config_file", "=", "None", ",", "cache", "=", "False", ")", ":", "cfg", "=", "get_config", "(", "config_file", ",", "cache", ")", "return", "cfg", ".", "get_cluster_manager", "(", ")" ]
factory for clustermanager class that attempts to load aws credentials from the starcluster config file .
train
false
16,046
def get_real_method(obj, name): if inspect.isclass(obj): return None try: canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None) except Exception: return None if (canary is not None): return None try: m = getattr(obj, name, None) except Exception: return None if callable(m): return m return None
[ "def", "get_real_method", "(", "obj", ",", "name", ")", ":", "if", "inspect", ".", "isclass", "(", "obj", ")", ":", "return", "None", "try", ":", "canary", "=", "getattr", "(", "obj", ",", "'_ipython_canary_method_should_not_exist_'", ",", "None", ")", "except", "Exception", ":", "return", "None", "if", "(", "canary", "is", "not", "None", ")", ":", "return", "None", "try", ":", "m", "=", "getattr", "(", "obj", ",", "name", ",", "None", ")", "except", "Exception", ":", "return", "None", "if", "callable", "(", "m", ")", ":", "return", "m", "return", "None" ]
like getattr .
train
false
16,047
def matroska_bps_to_bitrate(bps): m = re.search(u'([\\d.]+)\\s*(\\D.*)', bps) if m: (bps, suffix) = m.groups() if (u'kbit' in suffix): return (float(bps) * 1024) elif (u'kbyte' in suffix): return ((float(bps) * 1024) * 8) elif (u'byte' in suffix): return (float(bps) * 8) elif ((u'bps' in suffix) or (u'bit' in suffix)): return float(bps) if bps.replace(u'.', u'').isdigit(): if (float(bps) < 30000): return (float(bps) * 1024) return float(bps)
[ "def", "matroska_bps_to_bitrate", "(", "bps", ")", ":", "m", "=", "re", ".", "search", "(", "u'([\\\\d.]+)\\\\s*(\\\\D.*)'", ",", "bps", ")", "if", "m", ":", "(", "bps", ",", "suffix", ")", "=", "m", ".", "groups", "(", ")", "if", "(", "u'kbit'", "in", "suffix", ")", ":", "return", "(", "float", "(", "bps", ")", "*", "1024", ")", "elif", "(", "u'kbyte'", "in", "suffix", ")", ":", "return", "(", "(", "float", "(", "bps", ")", "*", "1024", ")", "*", "8", ")", "elif", "(", "u'byte'", "in", "suffix", ")", ":", "return", "(", "float", "(", "bps", ")", "*", "8", ")", "elif", "(", "(", "u'bps'", "in", "suffix", ")", "or", "(", "u'bit'", "in", "suffix", ")", ")", ":", "return", "float", "(", "bps", ")", "if", "bps", ".", "replace", "(", "u'.'", ",", "u''", ")", ".", "isdigit", "(", ")", ":", "if", "(", "float", "(", "bps", ")", "<", "30000", ")", ":", "return", "(", "float", "(", "bps", ")", "*", "1024", ")", "return", "float", "(", "bps", ")" ]
tries to convert a free-form bps string into a bitrate .
train
false
16,049
def Plot(obj, ys=None, style='', **options): options = _UnderrideColor(options) label = getattr(obj, 'label', '_nolegend_') options = _Underride(options, linewidth=3, alpha=0.8, label=label) xs = obj if (ys is None): if hasattr(obj, 'Render'): (xs, ys) = obj.Render() if isinstance(obj, pandas.Series): ys = obj.values xs = obj.index if (ys is None): pyplot.plot(xs, style, **options) else: pyplot.plot(xs, ys, style, **options)
[ "def", "Plot", "(", "obj", ",", "ys", "=", "None", ",", "style", "=", "''", ",", "**", "options", ")", ":", "options", "=", "_UnderrideColor", "(", "options", ")", "label", "=", "getattr", "(", "obj", ",", "'label'", ",", "'_nolegend_'", ")", "options", "=", "_Underride", "(", "options", ",", "linewidth", "=", "3", ",", "alpha", "=", "0.8", ",", "label", "=", "label", ")", "xs", "=", "obj", "if", "(", "ys", "is", "None", ")", ":", "if", "hasattr", "(", "obj", ",", "'Render'", ")", ":", "(", "xs", ",", "ys", ")", "=", "obj", ".", "Render", "(", ")", "if", "isinstance", "(", "obj", ",", "pandas", ".", "Series", ")", ":", "ys", "=", "obj", ".", "values", "xs", "=", "obj", ".", "index", "if", "(", "ys", "is", "None", ")", ":", "pyplot", ".", "plot", "(", "xs", ",", "style", ",", "**", "options", ")", "else", ":", "pyplot", ".", "plot", "(", "xs", ",", "ys", ",", "style", ",", "**", "options", ")" ]
plots a line .
train
false
16,051
def _ensure_pynumpy(): import warnings from . import numpy_support pyver = sys.version_info[:2] if ((pyver < (2, 7)) or ((3,) <= pyver < (3, 4))): raise ImportError('Numba needs Python 2.7 or greater, or 3.4 or greater') np_version = numpy_support.version[:2] if (np_version < (1, 7)): raise ImportError('Numba needs Numpy 1.7 or greater')
[ "def", "_ensure_pynumpy", "(", ")", ":", "import", "warnings", "from", ".", "import", "numpy_support", "pyver", "=", "sys", ".", "version_info", "[", ":", "2", "]", "if", "(", "(", "pyver", "<", "(", "2", ",", "7", ")", ")", "or", "(", "(", "3", ",", ")", "<=", "pyver", "<", "(", "3", ",", "4", ")", ")", ")", ":", "raise", "ImportError", "(", "'Numba needs Python 2.7 or greater, or 3.4 or greater'", ")", "np_version", "=", "numpy_support", ".", "version", "[", ":", "2", "]", "if", "(", "np_version", "<", "(", "1", ",", "7", ")", ")", ":", "raise", "ImportError", "(", "'Numba needs Numpy 1.7 or greater'", ")" ]
make sure python and numpy have supported versions .
train
false
16,052
def _parse_options(opts, delim): options = {} for opt in opts.split(delim): (key, val) = opt.split('=') if (key.lower() == 'readpreferencetags'): options.setdefault('readpreferencetags', []).append(val) else: if (str(key) in options): warnings.warn(('Duplicate URI option %s' % (str(key),))) options[str(key)] = unquote_plus(val) if ('wtimeout' in options): if ('wtimeoutMS' in options): options.pop('wtimeout') warnings.warn("Option wtimeout is deprecated, use 'wtimeoutMS' instead") return options
[ "def", "_parse_options", "(", "opts", ",", "delim", ")", ":", "options", "=", "{", "}", "for", "opt", "in", "opts", ".", "split", "(", "delim", ")", ":", "(", "key", ",", "val", ")", "=", "opt", ".", "split", "(", "'='", ")", "if", "(", "key", ".", "lower", "(", ")", "==", "'readpreferencetags'", ")", ":", "options", ".", "setdefault", "(", "'readpreferencetags'", ",", "[", "]", ")", ".", "append", "(", "val", ")", "else", ":", "if", "(", "str", "(", "key", ")", "in", "options", ")", ":", "warnings", ".", "warn", "(", "(", "'Duplicate URI option %s'", "%", "(", "str", "(", "key", ")", ",", ")", ")", ")", "options", "[", "str", "(", "key", ")", "]", "=", "unquote_plus", "(", "val", ")", "if", "(", "'wtimeout'", "in", "options", ")", ":", "if", "(", "'wtimeoutMS'", "in", "options", ")", ":", "options", ".", "pop", "(", "'wtimeout'", ")", "warnings", ".", "warn", "(", "\"Option wtimeout is deprecated, use 'wtimeoutMS' instead\"", ")", "return", "options" ]
helper method for split_options which creates the options dict .
train
true
16,053
def test_annotate_text_utf32_directly_index_into_unicode(): test_string = u'a \xe3 \u0201 \U0001f636 b' result = analyze.analyze_syntax(test_string, encoding='UTF32') tokens = result['tokens'] assert (tokens[0]['text']['content'] == 'a') offset = tokens[0]['text'].get('beginOffset', 0) assert (test_string[offset] == tokens[0]['text']['content']) assert (tokens[1]['text']['content'] == u'\xe3') offset = tokens[1]['text'].get('beginOffset', 0) assert (test_string[offset] == tokens[1]['text']['content']) assert (tokens[2]['text']['content'] == u'\u0201') offset = tokens[2]['text'].get('beginOffset', 0) assert (test_string[offset] == tokens[2]['text']['content']) assert (tokens[3]['text']['content'] == u'\U0001f636') offset = tokens[3]['text'].get('beginOffset', 0) assert (test_string[offset] == tokens[3]['text']['content']) assert (tokens[4]['text']['content'] == u'b') offset = tokens[4]['text'].get('beginOffset', 0) assert (test_string[offset] == tokens[4]['text']['content'])
[ "def", "test_annotate_text_utf32_directly_index_into_unicode", "(", ")", ":", "test_string", "=", "u'a \\xe3 \\u0201 \\U0001f636 b'", "result", "=", "analyze", ".", "analyze_syntax", "(", "test_string", ",", "encoding", "=", "'UTF32'", ")", "tokens", "=", "result", "[", "'tokens'", "]", "assert", "(", "tokens", "[", "0", "]", "[", "'text'", "]", "[", "'content'", "]", "==", "'a'", ")", "offset", "=", "tokens", "[", "0", "]", "[", "'text'", "]", ".", "get", "(", "'beginOffset'", ",", "0", ")", "assert", "(", "test_string", "[", "offset", "]", "==", "tokens", "[", "0", "]", "[", "'text'", "]", "[", "'content'", "]", ")", "assert", "(", "tokens", "[", "1", "]", "[", "'text'", "]", "[", "'content'", "]", "==", "u'\\xe3'", ")", "offset", "=", "tokens", "[", "1", "]", "[", "'text'", "]", ".", "get", "(", "'beginOffset'", ",", "0", ")", "assert", "(", "test_string", "[", "offset", "]", "==", "tokens", "[", "1", "]", "[", "'text'", "]", "[", "'content'", "]", ")", "assert", "(", "tokens", "[", "2", "]", "[", "'text'", "]", "[", "'content'", "]", "==", "u'\\u0201'", ")", "offset", "=", "tokens", "[", "2", "]", "[", "'text'", "]", ".", "get", "(", "'beginOffset'", ",", "0", ")", "assert", "(", "test_string", "[", "offset", "]", "==", "tokens", "[", "2", "]", "[", "'text'", "]", "[", "'content'", "]", ")", "assert", "(", "tokens", "[", "3", "]", "[", "'text'", "]", "[", "'content'", "]", "==", "u'\\U0001f636'", ")", "offset", "=", "tokens", "[", "3", "]", "[", "'text'", "]", ".", "get", "(", "'beginOffset'", ",", "0", ")", "assert", "(", "test_string", "[", "offset", "]", "==", "tokens", "[", "3", "]", "[", "'text'", "]", "[", "'content'", "]", ")", "assert", "(", "tokens", "[", "4", "]", "[", "'text'", "]", "[", "'content'", "]", "==", "u'b'", ")", "offset", "=", "tokens", "[", "4", "]", "[", "'text'", "]", ".", "get", "(", "'beginOffset'", ",", "0", ")", "assert", "(", "test_string", "[", "offset", "]", "==", "tokens", "[", "4", "]", "[", "'text'", "]", "[", "'content'", "]", ")" ]
demonstrate using offsets directly .
train
false
16,054
def ker_zeros(nt): if ((not isscalar(nt)) or (floor(nt) != nt) or (nt <= 0)): raise ValueError('nt must be positive integer scalar.') return specfun.klvnzo(nt, 3)
[ "def", "ker_zeros", "(", "nt", ")", ":", "if", "(", "(", "not", "isscalar", "(", "nt", ")", ")", "or", "(", "floor", "(", "nt", ")", "!=", "nt", ")", "or", "(", "nt", "<=", "0", ")", ")", ":", "raise", "ValueError", "(", "'nt must be positive integer scalar.'", ")", "return", "specfun", ".", "klvnzo", "(", "nt", ",", "3", ")" ]
compute nt zeros of the kelvin function ker(x) .
train
false
16,057
def _import_pydot(): import pydot if (parse_version(pydot.__version__) < parse_version(PYDOT_VERSION_MIN)): raise ImportError(('pydot %s < %s' % (pydot.__version__, PYDOT_VERSION_MIN))) return pydot
[ "def", "_import_pydot", "(", ")", ":", "import", "pydot", "if", "(", "parse_version", "(", "pydot", ".", "__version__", ")", "<", "parse_version", "(", "PYDOT_VERSION_MIN", ")", ")", ":", "raise", "ImportError", "(", "(", "'pydot %s < %s'", "%", "(", "pydot", ".", "__version__", ",", "PYDOT_VERSION_MIN", ")", ")", ")", "return", "pydot" ]
import and return the pydot module if the currently installed version of this module satisfies networkx requirements _or_ raise an exception .
train
false
16,059
def getWithoutIntersections(loop): lastLoopLength = len(loop) while (lastLoopLength > 3): removeIntersection(loop) if (len(loop) == lastLoopLength): return loop lastLoopLength = len(loop) return loop
[ "def", "getWithoutIntersections", "(", "loop", ")", ":", "lastLoopLength", "=", "len", "(", "loop", ")", "while", "(", "lastLoopLength", ">", "3", ")", ":", "removeIntersection", "(", "loop", ")", "if", "(", "len", "(", "loop", ")", "==", "lastLoopLength", ")", ":", "return", "loop", "lastLoopLength", "=", "len", "(", "loop", ")", "return", "loop" ]
get loop without intersections .
train
false
16,060
def _StaticFilePathRe(url_map): handler_type = url_map.GetHandlerType() if (handler_type == 'static_files'): return (url_map.upload + '$') elif (handler_type == 'static_dir'): path = url_map.static_dir.rstrip(os.path.sep) return ((path + re.escape(os.path.sep)) + '(.*)') assert False, 'This property only applies to static handlers.'
[ "def", "_StaticFilePathRe", "(", "url_map", ")", ":", "handler_type", "=", "url_map", ".", "GetHandlerType", "(", ")", "if", "(", "handler_type", "==", "'static_files'", ")", ":", "return", "(", "url_map", ".", "upload", "+", "'$'", ")", "elif", "(", "handler_type", "==", "'static_dir'", ")", ":", "path", "=", "url_map", ".", "static_dir", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")", "return", "(", "(", "path", "+", "re", ".", "escape", "(", "os", ".", "path", ".", "sep", ")", ")", "+", "'(.*)'", ")", "assert", "False", ",", "'This property only applies to static handlers.'" ]
returns a regular expression string that matches static file paths .
train
false
16,061
def _get_oldest_tweet(locale, n=0): try: return Tweet.objects.filter(locale=locale).order_by('-created')[n] except IndexError: return None
[ "def", "_get_oldest_tweet", "(", "locale", ",", "n", "=", "0", ")", ":", "try", ":", "return", "Tweet", ".", "objects", ".", "filter", "(", "locale", "=", "locale", ")", ".", "order_by", "(", "'-created'", ")", "[", "n", "]", "except", "IndexError", ":", "return", "None" ]
returns the nth oldest tweet per locale .
train
false
16,062
def user_has_passed_entrance_exam(request, course): if (not course_has_entrance_exam(course)): return True if (not request.user.is_authenticated()): return False entrance_exam_score = get_entrance_exam_score(request, course) if (entrance_exam_score >= course.entrance_exam_minimum_score_pct): return True return False
[ "def", "user_has_passed_entrance_exam", "(", "request", ",", "course", ")", ":", "if", "(", "not", "course_has_entrance_exam", "(", "course", ")", ")", ":", "return", "True", "if", "(", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "False", "entrance_exam_score", "=", "get_entrance_exam_score", "(", "request", ",", "course", ")", "if", "(", "entrance_exam_score", ">=", "course", ".", "entrance_exam_minimum_score_pct", ")", ":", "return", "True", "return", "False" ]
checks to see if the user has attained a sufficient score to pass the exam begin by short-circuiting if the course does not have an entrance exam .
train
false
16,063
def _uuid_find(context, host, name_label): for i in objects.InstanceList.get_by_host(context, host): if (i.name == name_label): return i.uuid return None
[ "def", "_uuid_find", "(", "context", ",", "host", ",", "name_label", ")", ":", "for", "i", "in", "objects", ".", "InstanceList", ".", "get_by_host", "(", "context", ",", "host", ")", ":", "if", "(", "i", ".", "name", "==", "name_label", ")", ":", "return", "i", ".", "uuid", "return", "None" ]
return instance uuid by name_label .
train
false
16,066
def exception_context(e): if hasattr(e, '_context'): return e._context
[ "def", "exception_context", "(", "e", ")", ":", "if", "hasattr", "(", "e", ",", "'_context'", ")", ":", "return", "e", ".", "_context" ]
return the context of a given exception .
train
false
16,068
def is_matching(G, matching): if isinstance(matching, dict): matching = matching_dict_to_set(matching) return all(((len((set(e1) & set(e2))) == 0) for (e1, e2) in combinations(matching, 2)))
[ "def", "is_matching", "(", "G", ",", "matching", ")", ":", "if", "isinstance", "(", "matching", ",", "dict", ")", ":", "matching", "=", "matching_dict_to_set", "(", "matching", ")", "return", "all", "(", "(", "(", "len", "(", "(", "set", "(", "e1", ")", "&", "set", "(", "e2", ")", ")", ")", "==", "0", ")", "for", "(", "e1", ",", "e2", ")", "in", "combinations", "(", "matching", ",", "2", ")", ")", ")" ]
decides whether the given set or dictionary represents a valid matching in g .
train
false
16,069
def check_ownership(obj, raise_if_false=True): if (not obj): return False security_exception = utils.SupersetSecurityException(u"You don't have the rights to alter [{}]".format(obj)) if g.user.is_anonymous(): if raise_if_false: raise security_exception return False roles = (r.name for r in get_user_roles()) if (u'Admin' in roles): return True session = db.create_scoped_session() orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first() owner_names = (user.username for user in orig_obj.owners) if (hasattr(orig_obj, u'created_by') and orig_obj.created_by and (orig_obj.created_by.username == g.user.username)): return True if (hasattr(orig_obj, u'owners') and g.user and hasattr(g.user, u'username') and (g.user.username in owner_names)): return True if raise_if_false: raise security_exception else: return False
[ "def", "check_ownership", "(", "obj", ",", "raise_if_false", "=", "True", ")", ":", "if", "(", "not", "obj", ")", ":", "return", "False", "security_exception", "=", "utils", ".", "SupersetSecurityException", "(", "u\"You don't have the rights to alter [{}]\"", ".", "format", "(", "obj", ")", ")", "if", "g", ".", "user", ".", "is_anonymous", "(", ")", ":", "if", "raise_if_false", ":", "raise", "security_exception", "return", "False", "roles", "=", "(", "r", ".", "name", "for", "r", "in", "get_user_roles", "(", ")", ")", "if", "(", "u'Admin'", "in", "roles", ")", ":", "return", "True", "session", "=", "db", ".", "create_scoped_session", "(", ")", "orig_obj", "=", "session", ".", "query", "(", "obj", ".", "__class__", ")", ".", "filter_by", "(", "id", "=", "obj", ".", "id", ")", ".", "first", "(", ")", "owner_names", "=", "(", "user", ".", "username", "for", "user", "in", "orig_obj", ".", "owners", ")", "if", "(", "hasattr", "(", "orig_obj", ",", "u'created_by'", ")", "and", "orig_obj", ".", "created_by", "and", "(", "orig_obj", ".", "created_by", ".", "username", "==", "g", ".", "user", ".", "username", ")", ")", ":", "return", "True", "if", "(", "hasattr", "(", "orig_obj", ",", "u'owners'", ")", "and", "g", ".", "user", "and", "hasattr", "(", "g", ".", "user", ",", "u'username'", ")", "and", "(", "g", ".", "user", ".", "username", "in", "owner_names", ")", ")", ":", "return", "True", "if", "raise_if_false", ":", "raise", "security_exception", "else", ":", "return", "False" ]
a convenience function .
train
false
16,070
def member_definitions(old_resources, new_definition, num_resources, num_new, get_new_id, customise=_identity): old_resources = old_resources[(- num_resources):] num_create = (num_resources - len(old_resources)) num_replace = (num_new - num_create) for i in range(num_resources): if (i < len(old_resources)): (old_name, old_definition) = old_resources[i] custom_definition = customise(old_name, new_definition) if ((old_definition != custom_definition) and (num_replace > 0)): num_replace -= 1 (yield (old_name, custom_definition)) else: (yield (old_name, old_definition)) else: new_name = get_new_id() (yield (new_name, customise(new_name, new_definition)))
[ "def", "member_definitions", "(", "old_resources", ",", "new_definition", ",", "num_resources", ",", "num_new", ",", "get_new_id", ",", "customise", "=", "_identity", ")", ":", "old_resources", "=", "old_resources", "[", "(", "-", "num_resources", ")", ":", "]", "num_create", "=", "(", "num_resources", "-", "len", "(", "old_resources", ")", ")", "num_replace", "=", "(", "num_new", "-", "num_create", ")", "for", "i", "in", "range", "(", "num_resources", ")", ":", "if", "(", "i", "<", "len", "(", "old_resources", ")", ")", ":", "(", "old_name", ",", "old_definition", ")", "=", "old_resources", "[", "i", "]", "custom_definition", "=", "customise", "(", "old_name", ",", "new_definition", ")", "if", "(", "(", "old_definition", "!=", "custom_definition", ")", "and", "(", "num_replace", ">", "0", ")", ")", ":", "num_replace", "-=", "1", "(", "yield", "(", "old_name", ",", "custom_definition", ")", ")", "else", ":", "(", "yield", "(", "old_name", ",", "old_definition", ")", ")", "else", ":", "new_name", "=", "get_new_id", "(", ")", "(", "yield", "(", "new_name", ",", "customise", "(", "new_name", ",", "new_definition", ")", ")", ")" ]
iterate over resource definitions for a scaling group generates the definitions for the next change to the scaling group .
train
false
16,074
def _format_ipv6(a): left = [] right = [] current = 'left' for i in range(0, 16, 2): group = ((a[i] << 8) + a[(i + 1)]) if (current == 'left'): if ((group == 0) and (i < 14)): if (((a[(i + 2)] << 8) + a[(i + 3)]) == 0): current = 'right' else: left.append('0') else: left.append(('%x' % group)) elif ((group == 0) and (len(right) == 0)): pass else: right.append(('%x' % group)) if (len(left) < 8): return ((':'.join(left) + '::') + ':'.join(right)) else: return ':'.join(left)
[ "def", "_format_ipv6", "(", "a", ")", ":", "left", "=", "[", "]", "right", "=", "[", "]", "current", "=", "'left'", "for", "i", "in", "range", "(", "0", ",", "16", ",", "2", ")", ":", "group", "=", "(", "(", "a", "[", "i", "]", "<<", "8", ")", "+", "a", "[", "(", "i", "+", "1", ")", "]", ")", "if", "(", "current", "==", "'left'", ")", ":", "if", "(", "(", "group", "==", "0", ")", "and", "(", "i", "<", "14", ")", ")", ":", "if", "(", "(", "(", "a", "[", "(", "i", "+", "2", ")", "]", "<<", "8", ")", "+", "a", "[", "(", "i", "+", "3", ")", "]", ")", "==", "0", ")", ":", "current", "=", "'right'", "else", ":", "left", ".", "append", "(", "'0'", ")", "else", ":", "left", ".", "append", "(", "(", "'%x'", "%", "group", ")", ")", "elif", "(", "(", "group", "==", "0", ")", "and", "(", "len", "(", "right", ")", "==", "0", ")", ")", ":", "pass", "else", ":", "right", ".", "append", "(", "(", "'%x'", "%", "group", ")", ")", "if", "(", "len", "(", "left", ")", "<", "8", ")", ":", "return", "(", "(", "':'", ".", "join", "(", "left", ")", "+", "'::'", ")", "+", "':'", ".", "join", "(", "right", ")", ")", "else", ":", "return", "':'", ".", "join", "(", "left", ")" ]
format ipv6 address compressing sequence of zero bytes to :: .
train
false
16,075
def shortest_augmenting_path(G, s, t, capacity='capacity', residual=None, value_only=False, two_phase=False, cutoff=None): R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff) R.graph['algorithm'] = 'shortest_augmenting_path' return R
[ "def", "shortest_augmenting_path", "(", "G", ",", "s", ",", "t", ",", "capacity", "=", "'capacity'", ",", "residual", "=", "None", ",", "value_only", "=", "False", ",", "two_phase", "=", "False", ",", "cutoff", "=", "None", ")", ":", "R", "=", "shortest_augmenting_path_impl", "(", "G", ",", "s", ",", "t", ",", "capacity", ",", "residual", ",", "two_phase", ",", "cutoff", ")", "R", ".", "graph", "[", "'algorithm'", "]", "=", "'shortest_augmenting_path'", "return", "R" ]
find a maximum single-commodity flow using the shortest augmenting path algorithm .
train
false
16,076
def module_name_for_filename(filename): all_segments = filename.split(os.sep) path_elements = all_segments[:(-1)] module_elements = [all_segments[(-1)].rsplit('.', 1)[0]] while True: init_path = os.path.join(*(path_elements + ['__init__.py'])) if (path_elements[0] is ''): init_path = ('/' + init_path) if os.path.exists(init_path): module_elements.insert(0, path_elements.pop()) else: break modulename = '.'.join(module_elements) basename = '/'.join(path_elements) return (basename, modulename)
[ "def", "module_name_for_filename", "(", "filename", ")", ":", "all_segments", "=", "filename", ".", "split", "(", "os", ".", "sep", ")", "path_elements", "=", "all_segments", "[", ":", "(", "-", "1", ")", "]", "module_elements", "=", "[", "all_segments", "[", "(", "-", "1", ")", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "]", "while", "True", ":", "init_path", "=", "os", ".", "path", ".", "join", "(", "*", "(", "path_elements", "+", "[", "'__init__.py'", "]", ")", ")", "if", "(", "path_elements", "[", "0", "]", "is", "''", ")", ":", "init_path", "=", "(", "'/'", "+", "init_path", ")", "if", "os", ".", "path", ".", "exists", "(", "init_path", ")", ":", "module_elements", ".", "insert", "(", "0", ",", "path_elements", ".", "pop", "(", ")", ")", "else", ":", "break", "modulename", "=", "'.'", ".", "join", "(", "module_elements", ")", "basename", "=", "'/'", ".", "join", "(", "path_elements", ")", "return", "(", "basename", ",", "modulename", ")" ]
given the name of a python file .
train
false
16,077
def shellglob(args): expanded = [] unescape = (unescape_glob if (sys.platform != 'win32') else (lambda x: x)) for a in args: expanded.extend((glob.glob(a) or [unescape(a)])) return expanded
[ "def", "shellglob", "(", "args", ")", ":", "expanded", "=", "[", "]", "unescape", "=", "(", "unescape_glob", "if", "(", "sys", ".", "platform", "!=", "'win32'", ")", "else", "(", "lambda", "x", ":", "x", ")", ")", "for", "a", "in", "args", ":", "expanded", ".", "extend", "(", "(", "glob", ".", "glob", "(", "a", ")", "or", "[", "unescape", "(", "a", ")", "]", ")", ")", "return", "expanded" ]
do glob expansion for each element in args and return a flattened list .
train
false
16,078
def exit_tf(sess=None): text = '[tl] Close tensorboard and nvidia-process if available' sess.close() if ((_platform == 'linux') or (_platform == 'linux2')): print ('linux: %s' % text) os.system('nvidia-smi') os.system('fuser 6006/tcp -k') os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") elif (_platform == 'darwin'): print ('OS X: %s' % text) os.system("lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill") elif (_platform == 'win32'): print ('Windows: %s' % text) else: print _platform exit()
[ "def", "exit_tf", "(", "sess", "=", "None", ")", ":", "text", "=", "'[tl] Close tensorboard and nvidia-process if available'", "sess", ".", "close", "(", ")", "if", "(", "(", "_platform", "==", "'linux'", ")", "or", "(", "_platform", "==", "'linux2'", ")", ")", ":", "print", "(", "'linux: %s'", "%", "text", ")", "os", ".", "system", "(", "'nvidia-smi'", ")", "os", ".", "system", "(", "'fuser 6006/tcp -k'", ")", "os", ".", "system", "(", "\"nvidia-smi | grep python |awk '{print $3}'|xargs kill\"", ")", "elif", "(", "_platform", "==", "'darwin'", ")", ":", "print", "(", "'OS X: %s'", "%", "text", ")", "os", ".", "system", "(", "\"lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill\"", ")", "elif", "(", "_platform", "==", "'win32'", ")", ":", "print", "(", "'Windows: %s'", "%", "text", ")", "else", ":", "print", "_platform", "exit", "(", ")" ]
close tensorboard and nvidia-process if available parameters sess : a session instance of tensorflow tensorflow session .
train
false
16,079
def do_UserChangePassword(po): UserChangePassword(_get_option(po, 'account_name_dn'), _get_option(po, 'password')) return 'Password changed OK'
[ "def", "do_UserChangePassword", "(", "po", ")", ":", "UserChangePassword", "(", "_get_option", "(", "po", ",", "'account_name_dn'", ")", ",", "_get_option", "(", "po", ",", "'password'", ")", ")", "return", "'Password changed OK'" ]
change the password for a specified user .
train
false
16,080
def get_fused_cname(fused_cname, orig_cname): assert (fused_cname and orig_cname) return StringEncoding.EncodedString(('%s%s%s' % (Naming.fused_func_prefix, fused_cname, orig_cname)))
[ "def", "get_fused_cname", "(", "fused_cname", ",", "orig_cname", ")", ":", "assert", "(", "fused_cname", "and", "orig_cname", ")", "return", "StringEncoding", ".", "EncodedString", "(", "(", "'%s%s%s'", "%", "(", "Naming", ".", "fused_func_prefix", ",", "fused_cname", ",", "orig_cname", ")", ")", ")" ]
given the fused cname id and an original cname .
train
false
16,081
def make_step_guided(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective_fn=objective_guide, **objective_params): src = net.blobs['data'] dst = net.blobs[end] (ox, oy) = np.random.randint((- jitter), (jitter + 1), 2) src.data[0] = np.roll(np.roll(src.data[0], ox, (-1)), oy, (-2)) net.forward(end=end) objective_fn(dst, **objective_params) net.backward(start=end) g = src.diff[0] src.data[:] += ((step_size / np.abs(g).mean()) * g) src.data[0] = np.roll(np.roll(src.data[0], (- ox), (-1)), (- oy), (-2)) if clip: bias = net.transformer.mean['data'] src.data[:] = np.clip(src.data, (- bias), (255 - bias))
[ "def", "make_step_guided", "(", "net", ",", "step_size", "=", "1.5", ",", "end", "=", "'inception_4c/output'", ",", "jitter", "=", "32", ",", "clip", "=", "True", ",", "objective_fn", "=", "objective_guide", ",", "**", "objective_params", ")", ":", "src", "=", "net", ".", "blobs", "[", "'data'", "]", "dst", "=", "net", ".", "blobs", "[", "end", "]", "(", "ox", ",", "oy", ")", "=", "np", ".", "random", ".", "randint", "(", "(", "-", "jitter", ")", ",", "(", "jitter", "+", "1", ")", ",", "2", ")", "src", ".", "data", "[", "0", "]", "=", "np", ".", "roll", "(", "np", ".", "roll", "(", "src", ".", "data", "[", "0", "]", ",", "ox", ",", "(", "-", "1", ")", ")", ",", "oy", ",", "(", "-", "2", ")", ")", "net", ".", "forward", "(", "end", "=", "end", ")", "objective_fn", "(", "dst", ",", "**", "objective_params", ")", "net", ".", "backward", "(", "start", "=", "end", ")", "g", "=", "src", ".", "diff", "[", "0", "]", "src", ".", "data", "[", ":", "]", "+=", "(", "(", "step_size", "/", "np", ".", "abs", "(", "g", ")", ".", "mean", "(", ")", ")", "*", "g", ")", "src", ".", "data", "[", "0", "]", "=", "np", ".", "roll", "(", "np", ".", "roll", "(", "src", ".", "data", "[", "0", "]", ",", "(", "-", "ox", ")", ",", "(", "-", "1", ")", ")", ",", "(", "-", "oy", ")", ",", "(", "-", "2", ")", ")", "if", "clip", ":", "bias", "=", "net", ".", "transformer", ".", "mean", "[", "'data'", "]", "src", ".", "data", "[", ":", "]", "=", "np", ".", "clip", "(", "src", ".", "data", ",", "(", "-", "bias", ")", ",", "(", "255", "-", "bias", ")", ")" ]
basic gradient ascent step .
train
true
16,082
def select_nth_last_of_type(cache, function, elem): (a, b) = function.parsed_arguments try: num = (cache.sibling_count(elem, before=False, same_type=True) + 1) except ValueError: return False if (a == 0): return (num == b) n = ((num - b) / a) return (n.is_integer() and (n > (-1)))
[ "def", "select_nth_last_of_type", "(", "cache", ",", "function", ",", "elem", ")", ":", "(", "a", ",", "b", ")", "=", "function", ".", "parsed_arguments", "try", ":", "num", "=", "(", "cache", ".", "sibling_count", "(", "elem", ",", "before", "=", "False", ",", "same_type", "=", "True", ")", "+", "1", ")", "except", "ValueError", ":", "return", "False", "if", "(", "a", "==", "0", ")", ":", "return", "(", "num", "==", "b", ")", "n", "=", "(", "(", "num", "-", "b", ")", "/", "a", ")", "return", "(", "n", ".", "is_integer", "(", ")", "and", "(", "n", ">", "(", "-", "1", ")", ")", ")" ]
implement :nth-last-of-type() .
train
false
16,083
def p_command_if_bad(p): p[0] = 'BAD RELATIONAL EXPRESSION'
[ "def", "p_command_if_bad", "(", "p", ")", ":", "p", "[", "0", "]", "=", "'BAD RELATIONAL EXPRESSION'" ]
command : if error then integer .
train
false
16,084
def force_decode(string, encoding): if isinstance(string, bytes): try: if encoding: string = string.decode(encoding) else: string = string.decode('utf-8') except UnicodeError: string = string.decode('latin1') return string
[ "def", "force_decode", "(", "string", ",", "encoding", ")", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "try", ":", "if", "encoding", ":", "string", "=", "string", ".", "decode", "(", "encoding", ")", "else", ":", "string", "=", "string", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeError", ":", "string", "=", "string", ".", "decode", "(", "'latin1'", ")", "return", "string" ]
forcibly get a unicode string out of a bytestring .
train
false
16,085
def create_collection_index(collection, keys, ignore_created=True, ignore_created_opts=True, **kwargs): INDEX_ALREADY_EXISTS = 68 INDEX_OPTIONS_CONFLICT = 85 try: collection.create_index(keys, **kwargs) except pymongo.errors.OperationFailure as exc: errors_to_ignore = [] if ignore_created: errors_to_ignore.append(INDEX_ALREADY_EXISTS) if ignore_created_opts: errors_to_ignore.append(INDEX_OPTIONS_CONFLICT) if (exc.code in errors_to_ignore): logger.warning("Existing index in collection '{}' remained unchanged!: {}".format(collection.full_name, exc.details['errmsg'])) else: raise exc
[ "def", "create_collection_index", "(", "collection", ",", "keys", ",", "ignore_created", "=", "True", ",", "ignore_created_opts", "=", "True", ",", "**", "kwargs", ")", ":", "INDEX_ALREADY_EXISTS", "=", "68", "INDEX_OPTIONS_CONFLICT", "=", "85", "try", ":", "collection", ".", "create_index", "(", "keys", ",", "**", "kwargs", ")", "except", "pymongo", ".", "errors", ".", "OperationFailure", "as", "exc", ":", "errors_to_ignore", "=", "[", "]", "if", "ignore_created", ":", "errors_to_ignore", ".", "append", "(", "INDEX_ALREADY_EXISTS", ")", "if", "ignore_created_opts", ":", "errors_to_ignore", ".", "append", "(", "INDEX_OPTIONS_CONFLICT", ")", "if", "(", "exc", ".", "code", "in", "errors_to_ignore", ")", ":", "logger", ".", "warning", "(", "\"Existing index in collection '{}' remained unchanged!: {}\"", ".", "format", "(", "collection", ".", "full_name", ",", "exc", ".", "details", "[", "'errmsg'", "]", ")", ")", "else", ":", "raise", "exc" ]
create a mongodb index in a collection .
train
false
16,086
def rsa_crt_dmq1(private_exponent, q): return (private_exponent % (q - 1))
[ "def", "rsa_crt_dmq1", "(", "private_exponent", ",", "q", ")", ":", "return", "(", "private_exponent", "%", "(", "q", "-", "1", ")", ")" ]
compute the crt private_exponent % value from the rsa private_exponent and q .
train
false
16,087
def valid_privkey(privkey): try: return OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey).check() except (TypeError, OpenSSL.crypto.Error): return False
[ "def", "valid_privkey", "(", "privkey", ")", ":", "try", ":", "return", "OpenSSL", ".", "crypto", ".", "load_privatekey", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "privkey", ")", ".", "check", "(", ")", "except", "(", "TypeError", ",", "OpenSSL", ".", "crypto", ".", "Error", ")", ":", "return", "False" ]
is valid rsa private key? .
train
false
16,088
def load_multi_database(gb_filename_or_handle, gb_filename_or_handle2): TESTDB = create_database() db_name = 'biosql-test' db_name2 = 'biosql-test2' server = BioSeqDatabase.open_database(driver=DBDRIVER, user=DBUSER, passwd=DBPASSWD, host=DBHOST, db=TESTDB) db = server.new_database(db_name) iterator = SeqIO.parse(gb_filename_or_handle, 'gb') count = db.load(iterator) db = server.new_database(db_name2) iterator = SeqIO.parse(gb_filename_or_handle2, 'gb') count2 = db.load(iterator) server.commit() server.close() return (count + count2)
[ "def", "load_multi_database", "(", "gb_filename_or_handle", ",", "gb_filename_or_handle2", ")", ":", "TESTDB", "=", "create_database", "(", ")", "db_name", "=", "'biosql-test'", "db_name2", "=", "'biosql-test2'", "server", "=", "BioSeqDatabase", ".", "open_database", "(", "driver", "=", "DBDRIVER", ",", "user", "=", "DBUSER", ",", "passwd", "=", "DBPASSWD", ",", "host", "=", "DBHOST", ",", "db", "=", "TESTDB", ")", "db", "=", "server", ".", "new_database", "(", "db_name", ")", "iterator", "=", "SeqIO", ".", "parse", "(", "gb_filename_or_handle", ",", "'gb'", ")", "count", "=", "db", ".", "load", "(", "iterator", ")", "db", "=", "server", ".", "new_database", "(", "db_name2", ")", "iterator", "=", "SeqIO", ".", "parse", "(", "gb_filename_or_handle2", ",", "'gb'", ")", "count2", "=", "db", ".", "load", "(", "iterator", ")", "server", ".", "commit", "(", ")", "server", ".", "close", "(", ")", "return", "(", "count", "+", "count2", ")" ]
load two genbank files into a new biosql database as different subdatabases .
train
false
16,091
@register.simple_tag def bootstrap_field(*args, **kwargs): return render_field(*args, **kwargs)
[ "@", "register", ".", "simple_tag", "def", "bootstrap_field", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "render_field", "(", "*", "args", ",", "**", "kwargs", ")" ]
render a field **tag name**:: bootstrap_field **parameters**: field the form field to be rendered layout if set to horizontal then the field and label will be rendered side-by-side .
train
false
16,092
def raises(*exceptions): valid = ' or '.join([e.__name__ for e in exceptions]) def decorate(func): name = func.__name__ def newfunc(*arg, **kw): try: func(*arg, **kw) except exceptions: pass else: message = '{}() did not raise {}'.format(name, valid) raise AssertionError(message) newfunc = make_decorator(func)(newfunc) return newfunc return decorate
[ "def", "raises", "(", "*", "exceptions", ")", ":", "valid", "=", "' or '", ".", "join", "(", "[", "e", ".", "__name__", "for", "e", "in", "exceptions", "]", ")", "def", "decorate", "(", "func", ")", ":", "name", "=", "func", ".", "__name__", "def", "newfunc", "(", "*", "arg", ",", "**", "kw", ")", ":", "try", ":", "func", "(", "*", "arg", ",", "**", "kw", ")", "except", "exceptions", ":", "pass", "else", ":", "message", "=", "'{}() did not raise {}'", ".", "format", "(", "name", ",", "valid", ")", "raise", "AssertionError", "(", "message", ")", "newfunc", "=", "make_decorator", "(", "func", ")", "(", "newfunc", ")", "return", "newfunc", "return", "decorate" ]
raise :exc:assertionerror if func does not raise *exc* .
train
true
16,093
def reduce_mtx(distmat, indices): return distmat.take(indices, 0).take(indices, 1)
[ "def", "reduce_mtx", "(", "distmat", ",", "indices", ")", ":", "return", "distmat", ".", "take", "(", "indices", ",", "0", ")", ".", "take", "(", "indices", ",", "1", ")" ]
returns rows .
train
false
16,095
def caa_art(album): if album.mb_albumid: (yield CAA_URL.format(mbid=album.mb_albumid)) if album.mb_releasegroupid: (yield CAA_GROUP_URL.format(mbid=album.mb_releasegroupid))
[ "def", "caa_art", "(", "album", ")", ":", "if", "album", ".", "mb_albumid", ":", "(", "yield", "CAA_URL", ".", "format", "(", "mbid", "=", "album", ".", "mb_albumid", ")", ")", "if", "album", ".", "mb_releasegroupid", ":", "(", "yield", "CAA_GROUP_URL", ".", "format", "(", "mbid", "=", "album", ".", "mb_releasegroupid", ")", ")" ]
return the cover art archive and cover art archive release group urls using album musicbrainz release id and release group id .
train
false
16,099
def test_get_platform_returns_platform_system(): p = helpers.platform.system helpers.platform.system = (lambda : 'Sega Saturn') assert (helpers.get_platform() == 'Sega Saturn') helpers.platform.system = p
[ "def", "test_get_platform_returns_platform_system", "(", ")", ":", "p", "=", "helpers", ".", "platform", ".", "system", "helpers", ".", "platform", ".", "system", "=", "(", "lambda", ":", "'Sega Saturn'", ")", "assert", "(", "helpers", ".", "get_platform", "(", ")", "==", "'Sega Saturn'", ")", "helpers", ".", "platform", ".", "system", "=", "p" ]
get_platform() returns platform .
train
false
16,100
def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): for i in ((limit and range(limit)) or count()): try: (yield conn.drain_events(timeout=timeout)) except socket.timeout: if (timeout and (not ignore_timeouts)): raise
[ "def", "eventloop", "(", "conn", ",", "limit", "=", "None", ",", "timeout", "=", "None", ",", "ignore_timeouts", "=", "False", ")", ":", "for", "i", "in", "(", "(", "limit", "and", "range", "(", "limit", ")", ")", "or", "count", "(", ")", ")", ":", "try", ":", "(", "yield", "conn", ".", "drain_events", "(", "timeout", "=", "timeout", ")", ")", "except", "socket", ".", "timeout", ":", "if", "(", "timeout", "and", "(", "not", "ignore_timeouts", ")", ")", ":", "raise" ]
best practice generator wrapper around connection .
train
false
16,101
def test_variable_names_are_pushed_to_module_scope(expected_attrs): from gooey.gui import image_repository assert all(((attr in image_repository.__dict__) for attr in expected_attrs))
[ "def", "test_variable_names_are_pushed_to_module_scope", "(", "expected_attrs", ")", ":", "from", "gooey", ".", "gui", "import", "image_repository", "assert", "all", "(", "(", "(", "attr", "in", "image_repository", ".", "__dict__", ")", "for", "attr", "in", "expected_attrs", ")", ")" ]
the dynamically initialized globals() should contain the expected images at runtime .
train
false
16,102
def tp_read(fd, n): return get_hub().threadpool.apply(_read, (fd, n))
[ "def", "tp_read", "(", "fd", ",", "n", ")", ":", "return", "get_hub", "(", ")", ".", "threadpool", ".", "apply", "(", "_read", ",", "(", "fd", ",", "n", ")", ")" ]
read up to n bytes from file descriptor fd .
train
false
16,103
def before_nearest_workday(dt): return previous_workday(nearest_workday(dt))
[ "def", "before_nearest_workday", "(", "dt", ")", ":", "return", "previous_workday", "(", "nearest_workday", "(", "dt", ")", ")" ]
returns previous workday after nearest workday .
train
false
16,105
def select_combinedselector(cache, combined): combinator = cache.combinator_mapping[combined.combinator] right = (None if (isinstance(combined.subselector, Element) and ((combined.subselector.element or u'*') == u'*')) else cache.iterparsedselector(combined.subselector)) for item in cache.dispatch_map[combinator](cache, cache.iterparsedselector(combined.selector), right): (yield item)
[ "def", "select_combinedselector", "(", "cache", ",", "combined", ")", ":", "combinator", "=", "cache", ".", "combinator_mapping", "[", "combined", ".", "combinator", "]", "right", "=", "(", "None", "if", "(", "isinstance", "(", "combined", ".", "subselector", ",", "Element", ")", "and", "(", "(", "combined", ".", "subselector", ".", "element", "or", "u'*'", ")", "==", "u'*'", ")", ")", "else", "cache", ".", "iterparsedselector", "(", "combined", ".", "subselector", ")", ")", "for", "item", "in", "cache", ".", "dispatch_map", "[", "combinator", "]", "(", "cache", ",", "cache", ".", "iterparsedselector", "(", "combined", ".", "selector", ")", ",", "right", ")", ":", "(", "yield", "item", ")" ]
translate a combined selector .
train
false
16,107
def contract_schema(): _sync_repo(repo_name=CONTRACT_REPO)
[ "def", "contract_schema", "(", ")", ":", "_sync_repo", "(", "repo_name", "=", "CONTRACT_REPO", ")" ]
contract the database .
train
false
16,108
def shard_chooser(mapper, instance, clause=None): if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location)
[ "def", "shard_chooser", "(", "mapper", ",", "instance", ",", "clause", "=", "None", ")", ":", "if", "isinstance", "(", "instance", ",", "WeatherLocation", ")", ":", "return", "shard_lookup", "[", "instance", ".", "continent", "]", "else", ":", "return", "shard_chooser", "(", "mapper", ",", "instance", ".", "location", ")" ]
shard chooser .
train
false
16,109
def get_connections_current_ratio(name): try: result = ((float(get_value((NAME_PREFIX + 'connections_current'))) / float(get_value((NAME_PREFIX + 'connections_available')))) * 100) except ZeroDivisionError: result = 0 return result
[ "def", "get_connections_current_ratio", "(", "name", ")", ":", "try", ":", "result", "=", "(", "(", "float", "(", "get_value", "(", "(", "NAME_PREFIX", "+", "'connections_current'", ")", ")", ")", "/", "float", "(", "get_value", "(", "(", "NAME_PREFIX", "+", "'connections_available'", ")", ")", ")", ")", "*", "100", ")", "except", "ZeroDivisionError", ":", "result", "=", "0", "return", "result" ]
return the percentage of connections used .
train
false
16,110
def get_all_ips(): ips = set() interfaces = netifaces.interfaces() for interface in interfaces: addresses = netifaces.ifaddresses(interface) for address_family in (netifaces.AF_INET, netifaces.AF_INET6): family_addresses = addresses.get(address_family) if (not family_addresses): continue for address in family_addresses: ips.add(ipaddress_from_string(address['addr'])) return ips
[ "def", "get_all_ips", "(", ")", ":", "ips", "=", "set", "(", ")", "interfaces", "=", "netifaces", ".", "interfaces", "(", ")", "for", "interface", "in", "interfaces", ":", "addresses", "=", "netifaces", ".", "ifaddresses", "(", "interface", ")", "for", "address_family", "in", "(", "netifaces", ".", "AF_INET", ",", "netifaces", ".", "AF_INET6", ")", ":", "family_addresses", "=", "addresses", ".", "get", "(", "address_family", ")", "if", "(", "not", "family_addresses", ")", ":", "continue", "for", "address", "in", "family_addresses", ":", "ips", ".", "add", "(", "ipaddress_from_string", "(", "address", "[", "'addr'", "]", ")", ")", "return", "ips" ]
get the ips for all deployment nodes .
train
false
16,113
def client_end_all(): for (request, socket, context) in CLIENTS.values()[:]: client_end(request, socket, context)
[ "def", "client_end_all", "(", ")", ":", "for", "(", "request", ",", "socket", ",", "context", ")", "in", "CLIENTS", ".", "values", "(", ")", "[", ":", "]", ":", "client_end", "(", "request", ",", "socket", ",", "context", ")" ]
performs cleanup on all clients - called by runserver_socketio when the server is shut down or reloaded .
train
true
16,114
def filterSimilarKeywords(keyword, kwdsIterator): seenDict = {} kwdSndx = soundex(keyword.encode('ascii', 'ignore')) matches = [] matchesappend = matches.append checkContained = False if (len(keyword) > 4): checkContained = True for (movieID, key) in kwdsIterator: if (key in seenDict): continue seenDict[key] = None if (checkContained and (keyword in key)): matchesappend(key) continue if (kwdSndx == soundex(key.encode('ascii', 'ignore'))): matchesappend(key) return _sortKeywords(keyword, matches)
[ "def", "filterSimilarKeywords", "(", "keyword", ",", "kwdsIterator", ")", ":", "seenDict", "=", "{", "}", "kwdSndx", "=", "soundex", "(", "keyword", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ")", "matches", "=", "[", "]", "matchesappend", "=", "matches", ".", "append", "checkContained", "=", "False", "if", "(", "len", "(", "keyword", ")", ">", "4", ")", ":", "checkContained", "=", "True", "for", "(", "movieID", ",", "key", ")", "in", "kwdsIterator", ":", "if", "(", "key", "in", "seenDict", ")", ":", "continue", "seenDict", "[", "key", "]", "=", "None", "if", "(", "checkContained", "and", "(", "keyword", "in", "key", ")", ")", ":", "matchesappend", "(", "key", ")", "continue", "if", "(", "kwdSndx", "==", "soundex", "(", "key", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ")", ")", ":", "matchesappend", "(", "key", ")", "return", "_sortKeywords", "(", "keyword", ",", "matches", ")" ]
return a sorted list of keywords similar to the one given .
train
false
16,115
def test_random_sample_random_state(): a = db.from_sequence(range(50), npartitions=5) b = a.random_sample(0.5, 1234) c = a.random_sample(0.5, 1234) assert (list(b) == list(c))
[ "def", "test_random_sample_random_state", "(", ")", ":", "a", "=", "db", ".", "from_sequence", "(", "range", "(", "50", ")", ",", "npartitions", "=", "5", ")", "b", "=", "a", ".", "random_sample", "(", "0.5", ",", "1234", ")", "c", "=", "a", ".", "random_sample", "(", "0.5", ",", "1234", ")", "assert", "(", "list", "(", "b", ")", "==", "list", "(", "c", ")", ")" ]
sampling with fixed random seed generates identical results .
train
false
16,116
def _get_default_context(request): folders = Object.filter_by_request(request, Folder.objects, mode='r') massform = MassActionForm(request.user.profile) context = {'folders': folders, 'massform': massform} return context
[ "def", "_get_default_context", "(", "request", ")", ":", "folders", "=", "Object", ".", "filter_by_request", "(", "request", ",", "Folder", ".", "objects", ",", "mode", "=", "'r'", ")", "massform", "=", "MassActionForm", "(", "request", ".", "user", ".", "profile", ")", "context", "=", "{", "'folders'", ":", "folders", ",", "'massform'", ":", "massform", "}", "return", "context" ]
returns default context as a dict() .
train
false
16,117
def sort_string_key(): return sort_string
[ "def", "sort_string_key", "(", ")", ":", "return", "sort_string" ]
returns a key function that produces a key by sorting a string .
train
false
16,118
def __get_storage_module(collection_type): return module_loader.get_module_from_file('serializers', collection_type, 'serializer_file')
[ "def", "__get_storage_module", "(", "collection_type", ")", ":", "return", "module_loader", ".", "get_module_from_file", "(", "'serializers'", ",", "collection_type", ",", "'serializer_file'", ")" ]
look up serializer in /etc/cobbler/modules .
train
false
16,119
def _instance_callable(obj): if (not isinstance(obj, ClassTypes)): return (getattr(obj, '__call__', None) is not None) klass = obj if (klass.__dict__.get('__call__') is not None): return True for base in klass.__bases__: if _instance_callable(base): return True return False
[ "def", "_instance_callable", "(", "obj", ")", ":", "if", "(", "not", "isinstance", "(", "obj", ",", "ClassTypes", ")", ")", ":", "return", "(", "getattr", "(", "obj", ",", "'__call__'", ",", "None", ")", "is", "not", "None", ")", "klass", "=", "obj", "if", "(", "klass", ".", "__dict__", ".", "get", "(", "'__call__'", ")", "is", "not", "None", ")", ":", "return", "True", "for", "base", "in", "klass", ".", "__bases__", ":", "if", "_instance_callable", "(", "base", ")", ":", "return", "True", "return", "False" ]
given an object .
train
true
16,120
def _create_character(session, new_player, typeclass, home, permissions): try: new_character = create.create_object(typeclass, key=new_player.key, home=home, permissions=permissions) new_player.db._playable_characters.append(new_character) new_character.locks.add(('puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)' % (new_character.id, new_player.id))) if (not new_character.db.desc): new_character.db.desc = 'This is a Player.' new_player.db._last_puppet = new_character except Exception as e: session.msg(('There was an error creating the Character:\n%s\n If this problem persists, contact an admin.' % e)) logger.log_trace() return False
[ "def", "_create_character", "(", "session", ",", "new_player", ",", "typeclass", ",", "home", ",", "permissions", ")", ":", "try", ":", "new_character", "=", "create", ".", "create_object", "(", "typeclass", ",", "key", "=", "new_player", ".", "key", ",", "home", "=", "home", ",", "permissions", "=", "permissions", ")", "new_player", ".", "db", ".", "_playable_characters", ".", "append", "(", "new_character", ")", "new_character", ".", "locks", ".", "add", "(", "(", "'puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)'", "%", "(", "new_character", ".", "id", ",", "new_player", ".", "id", ")", ")", ")", "if", "(", "not", "new_character", ".", "db", ".", "desc", ")", ":", "new_character", ".", "db", ".", "desc", "=", "'This is a Player.'", "new_player", ".", "db", ".", "_last_puppet", "=", "new_character", "except", "Exception", "as", "e", ":", "session", ".", "msg", "(", "(", "'There was an error creating the Character:\\n%s\\n If this problem persists, contact an admin.'", "%", "e", ")", ")", "logger", ".", "log_trace", "(", ")", "return", "False" ]
helper function .
train
false
16,121
def pr_define_role(pe_id, role=None, role_type=None, entity_type=None, sub_type=None): if (not pe_id): return s3db = current.s3db if (role_type not in s3db.pr_role_types): role_type = 9 data = {'pe_id': pe_id, 'role': role, 'role_type': role_type, 'entity_type': entity_type, 'sub_type': sub_type} rtable = s3db.pr_role if role: query = ((rtable.pe_id == pe_id) & (rtable.role == role)) duplicate = current.db(query).select(rtable.id, rtable.role_type, limitby=(0, 1)).first() else: duplicate = None if duplicate: if (duplicate.role_type != role_type): if (str(role_type) != str(OU)): data['path'] = None s3db.pr_role_rebuild_path(duplicate.id, clear=True) duplicate.update_record(**data) record_id = duplicate.id else: record_id = rtable.insert(**data) return record_id
[ "def", "pr_define_role", "(", "pe_id", ",", "role", "=", "None", ",", "role_type", "=", "None", ",", "entity_type", "=", "None", ",", "sub_type", "=", "None", ")", ":", "if", "(", "not", "pe_id", ")", ":", "return", "s3db", "=", "current", ".", "s3db", "if", "(", "role_type", "not", "in", "s3db", ".", "pr_role_types", ")", ":", "role_type", "=", "9", "data", "=", "{", "'pe_id'", ":", "pe_id", ",", "'role'", ":", "role", ",", "'role_type'", ":", "role_type", ",", "'entity_type'", ":", "entity_type", ",", "'sub_type'", ":", "sub_type", "}", "rtable", "=", "s3db", ".", "pr_role", "if", "role", ":", "query", "=", "(", "(", "rtable", ".", "pe_id", "==", "pe_id", ")", "&", "(", "rtable", ".", "role", "==", "role", ")", ")", "duplicate", "=", "current", ".", "db", "(", "query", ")", ".", "select", "(", "rtable", ".", "id", ",", "rtable", ".", "role_type", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "else", ":", "duplicate", "=", "None", "if", "duplicate", ":", "if", "(", "duplicate", ".", "role_type", "!=", "role_type", ")", ":", "if", "(", "str", "(", "role_type", ")", "!=", "str", "(", "OU", ")", ")", ":", "data", "[", "'path'", "]", "=", "None", "s3db", ".", "pr_role_rebuild_path", "(", "duplicate", ".", "id", ",", "clear", "=", "True", ")", "duplicate", ".", "update_record", "(", "**", "data", ")", "record_id", "=", "duplicate", ".", "id", "else", ":", "record_id", "=", "rtable", ".", "insert", "(", "**", "data", ")", "return", "record_id" ]
back-end method to define a new affiliates-role for a person entity .
train
false
16,122
def is_wrong_i18n_format(n): if isinstance(n.parent, compiler.ast.Mod): n = n.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Name): if (n.parent.node.name == '_'): return True return False
[ "def", "is_wrong_i18n_format", "(", "n", ")", ":", "if", "isinstance", "(", "n", ".", "parent", ",", "compiler", ".", "ast", ".", "Mod", ")", ":", "n", "=", "n", ".", "parent", "if", "isinstance", "(", "n", ".", "parent", ",", "compiler", ".", "ast", ".", "CallFunc", ")", ":", "if", "isinstance", "(", "n", ".", "parent", ".", "node", ",", "compiler", ".", "ast", ".", "Name", ")", ":", "if", "(", "n", ".", "parent", ".", "node", ".", "name", "==", "'_'", ")", ":", "return", "True", "return", "False" ]
check _ .
train
false
16,123
def set_config_value(filepath, key, value): replacement_line = ('%s = %s\n' % (key, value)) match = re.compile(('^%s\\s+=' % key)).match with open(filepath, 'r+') as f: lines = f.readlines() f.seek(0, 0) f.truncate() for line in lines: f.write((line if (not match(line)) else replacement_line))
[ "def", "set_config_value", "(", "filepath", ",", "key", ",", "value", ")", ":", "replacement_line", "=", "(", "'%s = %s\\n'", "%", "(", "key", ",", "value", ")", ")", "match", "=", "re", ".", "compile", "(", "(", "'^%s\\\\s+='", "%", "key", ")", ")", ".", "match", "with", "open", "(", "filepath", ",", "'r+'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "seek", "(", "0", ",", "0", ")", "f", ".", "truncate", "(", ")", "for", "line", "in", "lines", ":", "f", ".", "write", "(", "(", "line", "if", "(", "not", "match", "(", "line", ")", ")", "else", "replacement_line", ")", ")" ]
set key = value in config file .
train
false
16,124
def decode_string_escape(str_): raise Exception('Should be overriden')
[ "def", "decode_string_escape", "(", "str_", ")", ":", "raise", "Exception", "(", "'Should be overriden'", ")" ]
generic python string escape .
train
false
16,126
def DeleteGRRTempFile(path): if (not os.path.isabs(path)): raise ErrorBadPath('Path must be absolute') prefix = config_lib.CONFIG['Client.tempfile_prefix'] directories = [GetTempDirForRoot(root) for root in config_lib.CONFIG['Client.tempdir_roots']] if (not _CheckIfPathIsValidForDeletion(path, prefix=prefix, directories=directories)): msg = "Can't delete temp file %s. Filename must start with %s or lie within any of %s." raise ErrorNotTempFile((msg % (path, prefix, directories))) if os.path.exists(path): files.FILE_HANDLE_CACHE.Flush() os.remove(path) else: raise ErrorNotAFile(('%s does not exist.' % path))
[ "def", "DeleteGRRTempFile", "(", "path", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ")", ":", "raise", "ErrorBadPath", "(", "'Path must be absolute'", ")", "prefix", "=", "config_lib", ".", "CONFIG", "[", "'Client.tempfile_prefix'", "]", "directories", "=", "[", "GetTempDirForRoot", "(", "root", ")", "for", "root", "in", "config_lib", ".", "CONFIG", "[", "'Client.tempdir_roots'", "]", "]", "if", "(", "not", "_CheckIfPathIsValidForDeletion", "(", "path", ",", "prefix", "=", "prefix", ",", "directories", "=", "directories", ")", ")", ":", "msg", "=", "\"Can't delete temp file %s. Filename must start with %s or lie within any of %s.\"", "raise", "ErrorNotTempFile", "(", "(", "msg", "%", "(", "path", ",", "prefix", ",", "directories", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "files", ".", "FILE_HANDLE_CACHE", ".", "Flush", "(", ")", "os", ".", "remove", "(", "path", ")", "else", ":", "raise", "ErrorNotAFile", "(", "(", "'%s does not exist.'", "%", "path", ")", ")" ]
delete a grr temp file .
train
false
16,127
def setugid(user): try: from pwd import getpwuid passwd = getpwuid(int(user)) except ValueError: from pwd import getpwnam passwd = getpwnam(user) if hasattr(os, 'initgroups'): os.initgroups(passwd.pw_name, passwd.pw_gid) else: import ctypes if (ctypes.CDLL(None).initgroups(passwd.pw_name, passwd.pw_gid) < 0): err = ctypes.c_int.in_dll(ctypes.pythonapi, 'errno').value raise OSError(err, os.strerror(err), 'initgroups') os.setgid(passwd.pw_gid) os.setuid(passwd.pw_uid) os.environ['HOME'] = passwd.pw_dir
[ "def", "setugid", "(", "user", ")", ":", "try", ":", "from", "pwd", "import", "getpwuid", "passwd", "=", "getpwuid", "(", "int", "(", "user", ")", ")", "except", "ValueError", ":", "from", "pwd", "import", "getpwnam", "passwd", "=", "getpwnam", "(", "user", ")", "if", "hasattr", "(", "os", ",", "'initgroups'", ")", ":", "os", ".", "initgroups", "(", "passwd", ".", "pw_name", ",", "passwd", ".", "pw_gid", ")", "else", ":", "import", "ctypes", "if", "(", "ctypes", ".", "CDLL", "(", "None", ")", ".", "initgroups", "(", "passwd", ".", "pw_name", ",", "passwd", ".", "pw_gid", ")", "<", "0", ")", ":", "err", "=", "ctypes", ".", "c_int", ".", "in_dll", "(", "ctypes", ".", "pythonapi", ",", "'errno'", ")", ".", "value", "raise", "OSError", "(", "err", ",", "os", ".", "strerror", "(", "err", ")", ",", "'initgroups'", ")", "os", ".", "setgid", "(", "passwd", ".", "pw_gid", ")", "os", ".", "setuid", "(", "passwd", ".", "pw_uid", ")", "os", ".", "environ", "[", "'HOME'", "]", "=", "passwd", ".", "pw_dir" ]
change process user and group id argument is a numeric user id or a user name .
train
false
16,130
def UnpackTag(tag): return ((tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK))
[ "def", "UnpackTag", "(", "tag", ")", ":", "return", "(", "(", "tag", ">>", "TAG_TYPE_BITS", ")", ",", "(", "tag", "&", "TAG_TYPE_MASK", ")", ")" ]
the inverse of packtag() .
train
false
16,131
def _flag_rereview_adult(app, ratings_body, rating): old_rating = app.content_ratings.filter(ratings_body=ratings_body.id) if (not old_rating.exists()): return if (rating.adult and (not old_rating[0].get_rating().adult)): RereviewQueue.flag(app, mkt.LOG.CONTENT_RATING_TO_ADULT, message=_('Content rating changed to Adult.'))
[ "def", "_flag_rereview_adult", "(", "app", ",", "ratings_body", ",", "rating", ")", ":", "old_rating", "=", "app", ".", "content_ratings", ".", "filter", "(", "ratings_body", "=", "ratings_body", ".", "id", ")", "if", "(", "not", "old_rating", ".", "exists", "(", ")", ")", ":", "return", "if", "(", "rating", ".", "adult", "and", "(", "not", "old_rating", "[", "0", "]", ".", "get_rating", "(", ")", ".", "adult", ")", ")", ":", "RereviewQueue", ".", "flag", "(", "app", ",", "mkt", ".", "LOG", ".", "CONTENT_RATING_TO_ADULT", ",", "message", "=", "_", "(", "'Content rating changed to Adult.'", ")", ")" ]
flag app for rereview if it receives an adult content rating .
train
false
16,134
def ParseCodeToTree(code): try: parser_driver = driver.Driver(_GRAMMAR_FOR_PY3, convert=pytree.convert) tree = parser_driver.parse_string(code, debug=False) except parse.ParseError: try: parser_driver = driver.Driver(_GRAMMAR_FOR_PY2, convert=pytree.convert) tree = parser_driver.parse_string(code, debug=False) except parse.ParseError: try: ast.parse(code) except SyntaxError as e: raise e else: raise return _WrapEndMarker(tree)
[ "def", "ParseCodeToTree", "(", "code", ")", ":", "try", ":", "parser_driver", "=", "driver", ".", "Driver", "(", "_GRAMMAR_FOR_PY3", ",", "convert", "=", "pytree", ".", "convert", ")", "tree", "=", "parser_driver", ".", "parse_string", "(", "code", ",", "debug", "=", "False", ")", "except", "parse", ".", "ParseError", ":", "try", ":", "parser_driver", "=", "driver", ".", "Driver", "(", "_GRAMMAR_FOR_PY2", ",", "convert", "=", "pytree", ".", "convert", ")", "tree", "=", "parser_driver", ".", "parse_string", "(", "code", ",", "debug", "=", "False", ")", "except", "parse", ".", "ParseError", ":", "try", ":", "ast", ".", "parse", "(", "code", ")", "except", "SyntaxError", "as", "e", ":", "raise", "e", "else", ":", "raise", "return", "_WrapEndMarker", "(", "tree", ")" ]
parse the given code to a lib2to3 pytree .
train
false
16,135
def _follow_symlinks(filepath): filepath = os.path.abspath(filepath) while os.path.islink(filepath): filepath = os.path.normpath(os.path.join(os.path.dirname(filepath), os.readlink(filepath))) return filepath
[ "def", "_follow_symlinks", "(", "filepath", ")", ":", "filepath", "=", "os", ".", "path", ".", "abspath", "(", "filepath", ")", "while", "os", ".", "path", ".", "islink", "(", "filepath", ")", ":", "filepath", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filepath", ")", ",", "os", ".", "readlink", "(", "filepath", ")", ")", ")", "return", "filepath" ]
in case filepath is a symlink .
train
false
16,137
def get_open_port(): open_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) open_socket.bind(('', 0)) port = open_socket.getsockname()[1] open_socket.close() return port
[ "def", "get_open_port", "(", ")", ":", "open_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ",", "0", ")", "open_socket", ".", "bind", "(", "(", "''", ",", "0", ")", ")", "port", "=", "open_socket", ".", "getsockname", "(", ")", "[", "1", "]", "open_socket", ".", "close", "(", ")", "return", "port" ]
gets an open port number from the os .
train
false
16,138
def _make_file(elements, fp): with open(fp, 'w') as f: f.write('\n'.join(elements)) return fp
[ "def", "_make_file", "(", "elements", ",", "fp", ")", ":", "with", "open", "(", "fp", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "elements", ")", ")", "return", "fp" ]
create a file with one element per line this function is just a helper for create_commands_slf .
train
false
16,139
def distribution_item(): return s3_rest_controller()
[ "def", "distribution_item", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
16,140
def create_message_set(messages, codec=CODEC_NONE, key=None, compresslevel=None): if (codec == CODEC_NONE): return [create_message(m, k) for (m, k) in messages] elif (codec == CODEC_GZIP): return [create_gzip_message(messages, key, compresslevel)] elif (codec == CODEC_SNAPPY): return [create_snappy_message(messages, key)] else: raise UnsupportedCodecError(('Codec 0x%02x unsupported' % codec))
[ "def", "create_message_set", "(", "messages", ",", "codec", "=", "CODEC_NONE", ",", "key", "=", "None", ",", "compresslevel", "=", "None", ")", ":", "if", "(", "codec", "==", "CODEC_NONE", ")", ":", "return", "[", "create_message", "(", "m", ",", "k", ")", "for", "(", "m", ",", "k", ")", "in", "messages", "]", "elif", "(", "codec", "==", "CODEC_GZIP", ")", ":", "return", "[", "create_gzip_message", "(", "messages", ",", "key", ",", "compresslevel", ")", "]", "elif", "(", "codec", "==", "CODEC_SNAPPY", ")", ":", "return", "[", "create_snappy_message", "(", "messages", ",", "key", ")", "]", "else", ":", "raise", "UnsupportedCodecError", "(", "(", "'Codec 0x%02x unsupported'", "%", "codec", ")", ")" ]
create a message set using the given codec .
train
true
16,141
def get_parse_error(code): code_buffer = StringIO(code) try: tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline)) except tokenize.TokenError as err: return ('Could not parse code: %s' % err) except IndentationError as err: return ('Indentation error: %s' % err) except tabnanny.NannyNag as err: return ("Ambiguous tab at line %d; line is '%s'." % (err.get_lineno(), err.get_line())) return None
[ "def", "get_parse_error", "(", "code", ")", ":", "code_buffer", "=", "StringIO", "(", "code", ")", "try", ":", "tabnanny", ".", "process_tokens", "(", "tokenize", ".", "generate_tokens", "(", "code_buffer", ".", "readline", ")", ")", "except", "tokenize", ".", "TokenError", "as", "err", ":", "return", "(", "'Could not parse code: %s'", "%", "err", ")", "except", "IndentationError", "as", "err", ":", "return", "(", "'Indentation error: %s'", "%", "err", ")", "except", "tabnanny", ".", "NannyNag", "as", "err", ":", "return", "(", "\"Ambiguous tab at line %d; line is '%s'.\"", "%", "(", "err", ".", "get_lineno", "(", ")", ",", "err", ".", "get_line", "(", ")", ")", ")", "return", "None" ]
checks code for ambiguous tabs or other basic parsing issues .
train
false
16,142
def clear_dag_runs(): session = settings.Session() drs = session.query(DagRun).filter(DagRun.dag_id.in_(DAG_IDS)).all() for dr in drs: logging.info('Deleting DagRun :: {}'.format(dr)) session.delete(dr)
[ "def", "clear_dag_runs", "(", ")", ":", "session", "=", "settings", ".", "Session", "(", ")", "drs", "=", "session", ".", "query", "(", "DagRun", ")", ".", "filter", "(", "DagRun", ".", "dag_id", ".", "in_", "(", "DAG_IDS", ")", ")", ".", "all", "(", ")", "for", "dr", "in", "drs", ":", "logging", ".", "info", "(", "'Deleting DagRun :: {}'", ".", "format", "(", "dr", ")", ")", "session", ".", "delete", "(", "dr", ")" ]
remove any existing dag runs for the perf test dags .
train
true
16,145
def constructor(f): __oplist_constructor_list.append(f) return f
[ "def", "constructor", "(", "f", ")", ":", "__oplist_constructor_list", ".", "append", "(", "f", ")", "return", "f" ]
add f to :doc:oplist .
train
false
16,147
def getSymmetricYLoop(path, vertexes, y): loop = [] for point in path: vector3Index = Vector3Index(len(vertexes), point.real, y, point.imag) loop.append(vector3Index) vertexes.append(vector3Index) return loop
[ "def", "getSymmetricYLoop", "(", "path", ",", "vertexes", ",", "y", ")", ":", "loop", "=", "[", "]", "for", "point", "in", "path", ":", "vector3Index", "=", "Vector3Index", "(", "len", "(", "vertexes", ")", ",", "point", ".", "real", ",", "y", ",", "point", ".", "imag", ")", "loop", ".", "append", "(", "vector3Index", ")", "vertexes", ".", "append", "(", "vector3Index", ")", "return", "loop" ]
get symmetrix y loop .
train
false
16,149
def setIsOutside(yCloseToCenterPath, yIntersectionPaths): beforeClose = (yCloseToCenterPath.yMinusCenter < 0.0) for yIntersectionPath in yIntersectionPaths: if (yIntersectionPath != yCloseToCenterPath): beforePath = (yIntersectionPath.yMinusCenter < 0.0) if (beforeClose == beforePath): yCloseToCenterPath.isOutside = False return yCloseToCenterPath.isOutside = True
[ "def", "setIsOutside", "(", "yCloseToCenterPath", ",", "yIntersectionPaths", ")", ":", "beforeClose", "=", "(", "yCloseToCenterPath", ".", "yMinusCenter", "<", "0.0", ")", "for", "yIntersectionPath", "in", "yIntersectionPaths", ":", "if", "(", "yIntersectionPath", "!=", "yCloseToCenterPath", ")", ":", "beforePath", "=", "(", "yIntersectionPath", ".", "yMinusCenter", "<", "0.0", ")", "if", "(", "beforeClose", "==", "beforePath", ")", ":", "yCloseToCenterPath", ".", "isOutside", "=", "False", "return", "yCloseToCenterPath", ".", "isOutside", "=", "True" ]
determine if the yclosetocenterpath is outside .
train
false
16,150
@utils.positional(1) def transaction(callback, **ctx_options): fut = transaction_async(callback, **ctx_options) return fut.get_result()
[ "@", "utils", ".", "positional", "(", "1", ")", "def", "transaction", "(", "callback", ",", "**", "ctx_options", ")", ":", "fut", "=", "transaction_async", "(", "callback", ",", "**", "ctx_options", ")", "return", "fut", ".", "get_result", "(", ")" ]
transaction decorator factory .
train
false
16,152
def _statsd_tag(course_title): return u'course_email:{0}'.format(course_title)
[ "def", "_statsd_tag", "(", "course_title", ")", ":", "return", "u'course_email:{0}'", ".", "format", "(", "course_title", ")" ]
prefix the tag we will use for datadog .
train
false
16,153
def fix_vobject(vcard): if ('fn' not in vcard.contents): logging.debug('vcard has no formatted name, reconstructing...') fname = vcard.contents['n'][0].valueRepr() fname = fname.strip() vcard.add('fn') vcard.fn.value = fname return vcard
[ "def", "fix_vobject", "(", "vcard", ")", ":", "if", "(", "'fn'", "not", "in", "vcard", ".", "contents", ")", ":", "logging", ".", "debug", "(", "'vcard has no formatted name, reconstructing...'", ")", "fname", "=", "vcard", ".", "contents", "[", "'n'", "]", "[", "0", "]", ".", "valueRepr", "(", ")", "fname", "=", "fname", ".", "strip", "(", ")", "vcard", ".", "add", "(", "'fn'", ")", "vcard", ".", "fn", ".", "value", "=", "fname", "return", "vcard" ]
trying to fix some more or less common errors in vcards for now only missing fn properties are handled :type vcard: vobject .
train
false
16,155
def normalize_domain(domain): assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components" if (not domain): return TRUE_DOMAIN result = [] expected = 1 op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2} for token in domain: if (expected == 0): result[0:0] = [AND_OPERATOR] expected = 1 result.append(token) if isinstance(token, (list, tuple)): expected -= 1 else: expected += (op_arity.get(token, 0) - 1) assert (expected == 0), ('This domain is syntactically not correct: %s' % domain) return result
[ "def", "normalize_domain", "(", "domain", ")", ":", "assert", "isinstance", "(", "domain", ",", "(", "list", ",", "tuple", ")", ")", ",", "\"Domains to normalize must have a 'domain' form: a list or tuple of domain components\"", "if", "(", "not", "domain", ")", ":", "return", "TRUE_DOMAIN", "result", "=", "[", "]", "expected", "=", "1", "op_arity", "=", "{", "NOT_OPERATOR", ":", "1", ",", "AND_OPERATOR", ":", "2", ",", "OR_OPERATOR", ":", "2", "}", "for", "token", "in", "domain", ":", "if", "(", "expected", "==", "0", ")", ":", "result", "[", "0", ":", "0", "]", "=", "[", "AND_OPERATOR", "]", "expected", "=", "1", "result", ".", "append", "(", "token", ")", "if", "isinstance", "(", "token", ",", "(", "list", ",", "tuple", ")", ")", ":", "expected", "-=", "1", "else", ":", "expected", "+=", "(", "op_arity", ".", "get", "(", "token", ",", "0", ")", "-", "1", ")", "assert", "(", "expected", "==", "0", ")", ",", "(", "'This domain is syntactically not correct: %s'", "%", "domain", ")", "return", "result" ]
returns a normalized version of domain_expr .
train
false
16,156
def sort_return_tuples(response, **options): if ((not response) or (not options['groups'])): return response n = options['groups'] return list(izip(*[response[i::n] for i in range(n)]))
[ "def", "sort_return_tuples", "(", "response", ",", "**", "options", ")", ":", "if", "(", "(", "not", "response", ")", "or", "(", "not", "options", "[", "'groups'", "]", ")", ")", ":", "return", "response", "n", "=", "options", "[", "'groups'", "]", "return", "list", "(", "izip", "(", "*", "[", "response", "[", "i", ":", ":", "n", "]", "for", "i", "in", "range", "(", "n", ")", "]", ")", ")" ]
if groups is specified .
train
true
16,158
def dictdoc(method): dict_method = getattr(dict, method.__name__) if hasattr(dict_method, '__doc__'): method.__doc__ = dict_method.__doc__ return method
[ "def", "dictdoc", "(", "method", ")", ":", "dict_method", "=", "getattr", "(", "dict", ",", "method", ".", "__name__", ")", "if", "hasattr", "(", "dict_method", ",", "'__doc__'", ")", ":", "method", ".", "__doc__", "=", "dict_method", ".", "__doc__", "return", "method" ]
a decorator making reuse of the ordinary dicts docstrings more concise .
train
false