id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
9,028
def prepare_bearer_uri(token, uri): return add_params_to_uri(uri, [(u'access_token', token)])
[ "def", "prepare_bearer_uri", "(", "token", ",", "uri", ")", ":", "return", "add_params_to_uri", "(", "uri", ",", "[", "(", "u'access_token'", ",", "token", ")", "]", ")" ]
add a bearer token_ to the request uri .
train
false
9,029
def get_comments_file(worksheet_path, archive, valid_files): sheet_codename = os.path.split(worksheet_path)[(-1)] rels_file = (((PACKAGE_WORKSHEET_RELS + '/') + sheet_codename) + '.rels') if (rels_file not in valid_files): return None rels_source = archive.read(rels_file) root = fromstring(rels_source) for i in root: if (i.attrib['Type'] == COMMENTS_NS): comments_file = os.path.split(i.attrib['Target'])[(-1)] comments_file = ((PACKAGE_XL + '/') + comments_file) if (comments_file in valid_files): return comments_file return None
[ "def", "get_comments_file", "(", "worksheet_path", ",", "archive", ",", "valid_files", ")", ":", "sheet_codename", "=", "os", ".", "path", ".", "split", "(", "worksheet_path", ")", "[", "(", "-", "1", ")", "]", "rels_file", "=", "(", "(", "(", "PACKAGE_WORKSHEET_RELS", "+", "'/'", ")", "+", "sheet_codename", ")", "+", "'.rels'", ")", "if", "(", "rels_file", "not", "in", "valid_files", ")", ":", "return", "None", "rels_source", "=", "archive", ".", "read", "(", "rels_file", ")", "root", "=", "fromstring", "(", "rels_source", ")", "for", "i", "in", "root", ":", "if", "(", "i", ".", "attrib", "[", "'Type'", "]", "==", "COMMENTS_NS", ")", ":", "comments_file", "=", "os", ".", "path", ".", "split", "(", "i", ".", "attrib", "[", "'Target'", "]", ")", "[", "(", "-", "1", ")", "]", "comments_file", "=", "(", "(", "PACKAGE_XL", "+", "'/'", ")", "+", "comments_file", ")", "if", "(", "comments_file", "in", "valid_files", ")", ":", "return", "comments_file", "return", "None" ]
returns the xml filename in the archive which contains the comments for the spreadsheet with codename sheet_codename .
train
false
9,030
def check_args(args): if (args.presharedkey and ((len(args.presharedkey) < 8) or (len(args.presharedkey) > 64))): sys.exit((((('[' + R) + '-') + W) + '] Pre-shared key must be between 8 and 63 printable characters.')) if (((args.jamminginterface and (not args.apinterface)) or ((not args.jamminginterface) and args.apinterface)) and (not (args.nojamming and args.apinterface))): sys.exit((((('[' + R) + '-') + W) + '] --apinterface (-aI) and --jamminginterface (-jI) (or --nojamming (-nJ)) are used in conjuction.')) if (args.nojamming and args.jamminginterface): sys.exit((((('[' + R) + '-') + W) + '] --nojamming (-nJ) and --jamminginterface (-jI) cannot work together.'))
[ "def", "check_args", "(", "args", ")", ":", "if", "(", "args", ".", "presharedkey", "and", "(", "(", "len", "(", "args", ".", "presharedkey", ")", "<", "8", ")", "or", "(", "len", "(", "args", ".", "presharedkey", ")", ">", "64", ")", ")", ")", ":", "sys", ".", "exit", "(", "(", "(", "(", "(", "'['", "+", "R", ")", "+", "'-'", ")", "+", "W", ")", "+", "'] Pre-shared key must be between 8 and 63 printable characters.'", ")", ")", "if", "(", "(", "(", "args", ".", "jamminginterface", "and", "(", "not", "args", ".", "apinterface", ")", ")", "or", "(", "(", "not", "args", ".", "jamminginterface", ")", "and", "args", ".", "apinterface", ")", ")", "and", "(", "not", "(", "args", ".", "nojamming", "and", "args", ".", "apinterface", ")", ")", ")", ":", "sys", ".", "exit", "(", "(", "(", "(", "(", "'['", "+", "R", ")", "+", "'-'", ")", "+", "W", ")", "+", "'] --apinterface (-aI) and --jamminginterface (-jI) (or --nojamming (-nJ)) are used in conjuction.'", ")", ")", "if", "(", "args", ".", "nojamming", "and", "args", ".", "jamminginterface", ")", ":", "sys", ".", "exit", "(", "(", "(", "(", "(", "'['", "+", "R", ")", "+", "'-'", ")", "+", "W", ")", "+", "'] --nojamming (-nJ) and --jamminginterface (-jI) cannot work together.'", ")", ")" ]
fail fast if paths we explicitly want to copy do not exist .
train
false
9,031
def utfstr(stuff): if isinstance(stuff, basestring): return stuff else: return str(stuff)
[ "def", "utfstr", "(", "stuff", ")", ":", "if", "isinstance", "(", "stuff", ",", "basestring", ")", ":", "return", "stuff", "else", ":", "return", "str", "(", "stuff", ")" ]
converts stuff to string and does without failing if stuff is a utf8 string .
train
false
9,032
def unregister_fsa_session_signals(): if (not has_flask_sqlalchemy): return version = parse_version(flask_sqlalchemy.__version__) if (version >= (2, 0)): return events = flask_sqlalchemy._SessionSignalEvents signal_names = ('before_commit', 'after_commit', 'after_rollback') for signal_name in signal_names: signal = getattr(events, 'session_signal_{0}'.format(signal_name)) event.remove(SessionBase, signal_name, signal)
[ "def", "unregister_fsa_session_signals", "(", ")", ":", "if", "(", "not", "has_flask_sqlalchemy", ")", ":", "return", "version", "=", "parse_version", "(", "flask_sqlalchemy", ".", "__version__", ")", "if", "(", "version", ">=", "(", "2", ",", "0", ")", ")", ":", "return", "events", "=", "flask_sqlalchemy", ".", "_SessionSignalEvents", "signal_names", "=", "(", "'before_commit'", ",", "'after_commit'", ",", "'after_rollback'", ")", "for", "signal_name", "in", "signal_names", ":", "signal", "=", "getattr", "(", "events", ",", "'session_signal_{0}'", ".", "format", "(", "signal_name", ")", ")", "event", ".", "remove", "(", "SessionBase", ",", "signal_name", ",", "signal", ")" ]
unregisters flask-sqlalchemy session commit and rollback signal handlers .
train
false
9,033
def with_ioloop(method, expect_success=True): def test_method(self): r = method(self) loop = self.io_loop if expect_success: self.pullstream.on_recv(self.on_message_succeed) else: self.pullstream.on_recv(self.on_message_fail) loop.call_later(1, self.attempt_connection) loop.call_later(1.2, self.send_msg) if expect_success: loop.call_later(2, self.on_test_timeout_fail) else: loop.call_later(2, self.on_test_timeout_succeed) loop.start() if self.fail_msg: self.fail(self.fail_msg) return r return test_method
[ "def", "with_ioloop", "(", "method", ",", "expect_success", "=", "True", ")", ":", "def", "test_method", "(", "self", ")", ":", "r", "=", "method", "(", "self", ")", "loop", "=", "self", ".", "io_loop", "if", "expect_success", ":", "self", ".", "pullstream", ".", "on_recv", "(", "self", ".", "on_message_succeed", ")", "else", ":", "self", ".", "pullstream", ".", "on_recv", "(", "self", ".", "on_message_fail", ")", "loop", ".", "call_later", "(", "1", ",", "self", ".", "attempt_connection", ")", "loop", ".", "call_later", "(", "1.2", ",", "self", ".", "send_msg", ")", "if", "expect_success", ":", "loop", ".", "call_later", "(", "2", ",", "self", ".", "on_test_timeout_fail", ")", "else", ":", "loop", ".", "call_later", "(", "2", ",", "self", ".", "on_test_timeout_succeed", ")", "loop", ".", "start", "(", ")", "if", "self", ".", "fail_msg", ":", "self", ".", "fail", "(", "self", ".", "fail_msg", ")", "return", "r", "return", "test_method" ]
decorator for running tests with an ioloop .
train
false
9,036
def demo_legacy_grammar(): from nltk.grammar import FeatureGrammar g = FeatureGrammar.fromstring(u"\n % start S\n S[sem=<hello>] -> 'hello'\n ") print((u'Reading grammar: %s' % g)) print((u'*' * 20)) for reading in interpret_sents([u'hello'], g, semkey=u'sem'): (syn, sem) = reading[0] print() print(u'output: ', sem)
[ "def", "demo_legacy_grammar", "(", ")", ":", "from", "nltk", ".", "grammar", "import", "FeatureGrammar", "g", "=", "FeatureGrammar", ".", "fromstring", "(", "u\"\\n % start S\\n S[sem=<hello>] -> 'hello'\\n \"", ")", "print", "(", "(", "u'Reading grammar: %s'", "%", "g", ")", ")", "print", "(", "(", "u'*'", "*", "20", ")", ")", "for", "reading", "in", "interpret_sents", "(", "[", "u'hello'", "]", ",", "g", ",", "semkey", "=", "u'sem'", ")", ":", "(", "syn", ",", "sem", ")", "=", "reading", "[", "0", "]", "print", "(", ")", "print", "(", "u'output: '", ",", "sem", ")" ]
check that interpret_sents() is compatible with legacy grammars that use a lowercase sem feature .
train
false
9,037
def to_utf8(obj): if isinstance(obj, str): try: return obj.decode('utf-8') except AttributeError: return obj try: if isinstance(obj, unicode): return obj else: return obj.__str__().decode('utf-8') except NameError: if isinstance(obj, bytes): return obj.decode('utf-8') else: return obj.__str__()
[ "def", "to_utf8", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "try", ":", "return", "obj", ".", "decode", "(", "'utf-8'", ")", "except", "AttributeError", ":", "return", "obj", "try", ":", "if", "isinstance", "(", "obj", ",", "unicode", ")", ":", "return", "obj", "else", ":", "return", "obj", ".", "__str__", "(", ")", ".", "decode", "(", "'utf-8'", ")", "except", "NameError", ":", "if", "isinstance", "(", "obj", ",", "bytes", ")", ":", "return", "obj", ".", "decode", "(", "'utf-8'", ")", "else", ":", "return", "obj", ".", "__str__", "(", ")" ]
return a unicode string representing a python object .
train
false
9,039
def getprog(): codetbl = db.code acid = request.vars.acid sid = request.vars.sid if sid: query = (((codetbl.sid == sid) & (codetbl.acid == acid)) & (codetbl.timestamp != None)) elif auth.user: query = (((codetbl.sid == auth.user.username) & (codetbl.acid == acid)) & (codetbl.timestamp != None)) else: query = None res = {} if query: result = db(query) res['acid'] = acid if (not result.isempty()): r = result.select(orderby=codetbl.id).last().code res['source'] = r if sid: res['sid'] = sid else: logging.debug(('Did not find anything to load for %s' % sid)) response.headers['content-type'] = 'application/json' return json.dumps([res])
[ "def", "getprog", "(", ")", ":", "codetbl", "=", "db", ".", "code", "acid", "=", "request", ".", "vars", ".", "acid", "sid", "=", "request", ".", "vars", ".", "sid", "if", "sid", ":", "query", "=", "(", "(", "(", "codetbl", ".", "sid", "==", "sid", ")", "&", "(", "codetbl", ".", "acid", "==", "acid", ")", ")", "&", "(", "codetbl", ".", "timestamp", "!=", "None", ")", ")", "elif", "auth", ".", "user", ":", "query", "=", "(", "(", "(", "codetbl", ".", "sid", "==", "auth", ".", "user", ".", "username", ")", "&", "(", "codetbl", ".", "acid", "==", "acid", ")", ")", "&", "(", "codetbl", ".", "timestamp", "!=", "None", ")", ")", "else", ":", "query", "=", "None", "res", "=", "{", "}", "if", "query", ":", "result", "=", "db", "(", "query", ")", "res", "[", "'acid'", "]", "=", "acid", "if", "(", "not", "result", ".", "isempty", "(", ")", ")", ":", "r", "=", "result", ".", "select", "(", "orderby", "=", "codetbl", ".", "id", ")", ".", "last", "(", ")", ".", "code", "res", "[", "'source'", "]", "=", "r", "if", "sid", ":", "res", "[", "'sid'", "]", "=", "sid", "else", ":", "logging", ".", "debug", "(", "(", "'Did not find anything to load for %s'", "%", "sid", ")", ")", "response", ".", "headers", "[", "'content-type'", "]", "=", "'application/json'", "return", "json", ".", "dumps", "(", "[", "res", "]", ")" ]
return the program code for a particular acid .
train
false
9,041
def GetBase64EncodedHTML5ZipFromUrl(url): response = urllib2.urlopen(url) return base64.b64encode(response.read())
[ "def", "GetBase64EncodedHTML5ZipFromUrl", "(", "url", ")", ":", "response", "=", "urllib2", ".", "urlopen", "(", "url", ")", "return", "base64", ".", "b64encode", "(", "response", ".", "read", "(", ")", ")" ]
retrieve zip file from the given url .
train
false
9,044
def load_test_config(): conf.load_test_config()
[ "def", "load_test_config", "(", ")", ":", "conf", ".", "load_test_config", "(", ")" ]
load the unit test configuration .
train
false
9,045
def subsequence_match(ref, typed, csc): if csc: return _subsequence_match_iter(ref, typed) else: return _subsequence_match_iter(ref.lower(), typed.lower())
[ "def", "subsequence_match", "(", "ref", ",", "typed", ",", "csc", ")", ":", "if", "csc", ":", "return", "_subsequence_match_iter", "(", "ref", ",", "typed", ")", "else", ":", "return", "_subsequence_match_iter", "(", "ref", ".", "lower", "(", ")", ",", "typed", ".", "lower", "(", ")", ")" ]
detects whether typed is a subsequence of ref .
train
false
9,046
def resource_name_package(name): if (not (PRN_SEPARATOR in name)): return None return name[:name.find(PRN_SEPARATOR)]
[ "def", "resource_name_package", "(", "name", ")", ":", "if", "(", "not", "(", "PRN_SEPARATOR", "in", "name", ")", ")", ":", "return", "None", "return", "name", "[", ":", "name", ".", "find", "(", "PRN_SEPARATOR", ")", "]" ]
pkg/typename -> pkg .
train
false
9,048
def query_package(module, name): (rc, out, err) = module.run_command(('%s -p -v' % PKGIN_PATH)) if (rc == 0): pflag = '-p' splitchar = ';' else: pflag = '' splitchar = ' ' (rc, out, err) = module.run_command(('%s %s search "^%s$"' % (PKGIN_PATH, pflag, name))) if (rc == 0): packages = out.split('\n') for package in packages: (pkgname_with_version, raw_state) = package.split(splitchar)[0:2] pkg_search_obj = re.search('^(.*?)\\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) if (not pkg_search_obj): continue pkgname_without_version = pkg_search_obj.group(1) if (name != pkgname_without_version): continue if (raw_state == '<'): return 'outdated' elif ((raw_state == '=') or (raw_state == '>')): return 'present' else: return False return False
[ "def", "query_package", "(", "module", ",", "name", ")", ":", "(", "rc", ",", "out", ",", "err", ")", "=", "module", ".", "run_command", "(", "(", "'%s -p -v'", "%", "PKGIN_PATH", ")", ")", "if", "(", "rc", "==", "0", ")", ":", "pflag", "=", "'-p'", "splitchar", "=", "';'", "else", ":", "pflag", "=", "''", "splitchar", "=", "' '", "(", "rc", ",", "out", ",", "err", ")", "=", "module", ".", "run_command", "(", "(", "'%s %s search \"^%s$\"'", "%", "(", "PKGIN_PATH", ",", "pflag", ",", "name", ")", ")", ")", "if", "(", "rc", "==", "0", ")", ":", "packages", "=", "out", ".", "split", "(", "'\\n'", ")", "for", "package", "in", "packages", ":", "(", "pkgname_with_version", ",", "raw_state", ")", "=", "package", ".", "split", "(", "splitchar", ")", "[", "0", ":", "2", "]", "pkg_search_obj", "=", "re", ".", "search", "(", "'^(.*?)\\\\-[0-9][0-9.]*(nb[0-9]+)*'", ",", "pkgname_with_version", ",", "re", ".", "M", ")", "if", "(", "not", "pkg_search_obj", ")", ":", "continue", "pkgname_without_version", "=", "pkg_search_obj", ".", "group", "(", "1", ")", "if", "(", "name", "!=", "pkgname_without_version", ")", ":", "continue", "if", "(", "raw_state", "==", "'<'", ")", ":", "return", "'outdated'", "elif", "(", "(", "raw_state", "==", "'='", ")", "or", "(", "raw_state", "==", "'>'", ")", ")", ":", "return", "'present'", "else", ":", "return", "False", "return", "False" ]
returns package info .
train
false
9,050
def load_key_file(path): try: key_file = path.open() except IOError as e: (code, failure) = e raise PathError('Private key file could not be opened.', e.filename, code, failure) keypair = ComparableKeyPair(keypair=KeyPair.load(key_file.read(), format=crypto.FILETYPE_PEM)) return keypair
[ "def", "load_key_file", "(", "path", ")", ":", "try", ":", "key_file", "=", "path", ".", "open", "(", ")", "except", "IOError", "as", "e", ":", "(", "code", ",", "failure", ")", "=", "e", "raise", "PathError", "(", "'Private key file could not be opened.'", ",", "e", ".", "filename", ",", "code", ",", "failure", ")", "keypair", "=", "ComparableKeyPair", "(", "keypair", "=", "KeyPair", ".", "load", "(", "key_file", ".", "read", "(", ")", ",", "format", "=", "crypto", ".", "FILETYPE_PEM", ")", ")", "return", "keypair" ]
load a private key from a specified path .
train
false
9,051
def merge_events(events, ids, new_id, replace_events=True): events = np.asarray(events) events_out = events.copy() idx_touched = [] for col in [1, 2]: for i in ids: mask = (events[:, col] == i) events_out[(mask, col)] = new_id idx_touched.append(np.where(mask)[0]) if (not replace_events): idx_touched = np.unique(np.concatenate(idx_touched)) events_out = np.concatenate((events_out, events[idx_touched]), axis=0) events_out = events_out[np.lexsort(events_out.T[::(-1)])] return events_out
[ "def", "merge_events", "(", "events", ",", "ids", ",", "new_id", ",", "replace_events", "=", "True", ")", ":", "events", "=", "np", ".", "asarray", "(", "events", ")", "events_out", "=", "events", ".", "copy", "(", ")", "idx_touched", "=", "[", "]", "for", "col", "in", "[", "1", ",", "2", "]", ":", "for", "i", "in", "ids", ":", "mask", "=", "(", "events", "[", ":", ",", "col", "]", "==", "i", ")", "events_out", "[", "(", "mask", ",", "col", ")", "]", "=", "new_id", "idx_touched", ".", "append", "(", "np", ".", "where", "(", "mask", ")", "[", "0", "]", ")", "if", "(", "not", "replace_events", ")", ":", "idx_touched", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "idx_touched", ")", ")", "events_out", "=", "np", ".", "concatenate", "(", "(", "events_out", ",", "events", "[", "idx_touched", "]", ")", ",", "axis", "=", "0", ")", "events_out", "=", "events_out", "[", "np", ".", "lexsort", "(", "events_out", ".", "T", "[", ":", ":", "(", "-", "1", ")", "]", ")", "]", "return", "events_out" ]
merge a set of events .
train
false
9,053
def send_mail_to_admins(sender, subject, body, make_sync_call=apiproxy_stub_map.MakeSyncCall, **kw): kw['sender'] = sender kw['subject'] = subject kw['body'] = body message = AdminEmailMessage(**kw) message.send(make_sync_call)
[ "def", "send_mail_to_admins", "(", "sender", ",", "subject", ",", "body", ",", "make_sync_call", "=", "apiproxy_stub_map", ".", "MakeSyncCall", ",", "**", "kw", ")", ":", "kw", "[", "'sender'", "]", "=", "sender", "kw", "[", "'subject'", "]", "=", "subject", "kw", "[", "'body'", "]", "=", "body", "message", "=", "AdminEmailMessage", "(", "**", "kw", ")", "message", ".", "send", "(", "make_sync_call", ")" ]
sends mail to admins on behalf of application .
train
false
9,055
def clear_all(tgt=None, tgt_type='glob', expr_form=None): if (expr_form is not None): salt.utils.warn_until('Fluorine', "the target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.") tgt_type = expr_form return _clear_cache(tgt, tgt_type, clear_pillar_flag=True, clear_grains_flag=True, clear_mine_flag=True)
[ "def", "clear_all", "(", "tgt", "=", "None", ",", "tgt_type", "=", "'glob'", ",", "expr_form", "=", "None", ")", ":", "if", "(", "expr_form", "is", "not", "None", ")", ":", "salt", ".", "utils", ".", "warn_until", "(", "'Fluorine'", ",", "\"the target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.\"", ")", "tgt_type", "=", "expr_form", "return", "_clear_cache", "(", "tgt", ",", "tgt_type", ",", "clear_pillar_flag", "=", "True", ",", "clear_grains_flag", "=", "True", ",", "clear_mine_flag", "=", "True", ")" ]
clears all the placeholder variables of keep prob .
train
false
9,057
def _get_default_unit_format(config): return u'cds'
[ "def", "_get_default_unit_format", "(", "config", ")", ":", "return", "u'cds'" ]
get the default unit format as specified in the votable spec .
train
false
9,058
def args_for_opt_dest_subset(option_parser, args, dests=None): for (dest, value) in _args_for_opt_dest_subset(option_parser, args, dests): (yield value)
[ "def", "args_for_opt_dest_subset", "(", "option_parser", ",", "args", ",", "dests", "=", "None", ")", ":", "for", "(", "dest", ",", "value", ")", "in", "_args_for_opt_dest_subset", "(", "option_parser", ",", "args", ",", "dests", ")", ":", "(", "yield", "value", ")" ]
for the given :py:class:optionparser and list of command line arguments *args* .
train
false
9,060
def invert_mapping(mapping): invert_map = {} for key in mapping.keys(): invert_map[key] = key for id in mapping[key]: invert_map[id] = key return invert_map
[ "def", "invert_mapping", "(", "mapping", ")", ":", "invert_map", "=", "{", "}", "for", "key", "in", "mapping", ".", "keys", "(", ")", ":", "invert_map", "[", "key", "]", "=", "key", "for", "id", "in", "mapping", "[", "key", "]", ":", "invert_map", "[", "id", "]", "=", "key", "return", "invert_map" ]
inverts a dictionary mapping .
train
false
9,061
def STOCHRSI(ds, count, timeperiod=(- (2 ** 31)), fastk_period=(- (2 ** 31)), fastd_period=(- (2 ** 31)), fastd_matype=0): ret = call_talib_with_ds(ds, count, talib.STOCHRSI, timeperiod, fastk_period, fastd_period, fastd_matype) if (ret is None): ret = (None, None) return ret
[ "def", "STOCHRSI", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ",", "fastk_period", "=", "(", "-", "(", "2", "**", "31", ")", ")", ",", "fastd_period", "=", "(", "-", "(", "2", "**", "31", ")", ")", ",", "fastd_matype", "=", "0", ")", ":", "ret", "=", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "STOCHRSI", ",", "timeperiod", ",", "fastk_period", ",", "fastd_period", ",", "fastd_matype", ")", "if", "(", "ret", "is", "None", ")", ":", "ret", "=", "(", "None", ",", "None", ")", "return", "ret" ]
stochastic relative strength index .
train
false
9,063
def table_from_samples(samples, start=0, stop=(-1), rate=44100): if (type(samples) == np.ndarray): samples = samples.tolist() if (type(samples) != list): raise TypeError('samples should be a list or np.array') if ((start, stop) != (0, (-1))): if (stop > start): samples = samples[(start * rate):(stop * rate)] elif start: samples = samples[(start * rate):] table = pyo.DataTable(size=len(samples), init=samples) return table
[ "def", "table_from_samples", "(", "samples", ",", "start", "=", "0", ",", "stop", "=", "(", "-", "1", ")", ",", "rate", "=", "44100", ")", ":", "if", "(", "type", "(", "samples", ")", "==", "np", ".", "ndarray", ")", ":", "samples", "=", "samples", ".", "tolist", "(", ")", "if", "(", "type", "(", "samples", ")", "!=", "list", ")", ":", "raise", "TypeError", "(", "'samples should be a list or np.array'", ")", "if", "(", "(", "start", ",", "stop", ")", "!=", "(", "0", ",", "(", "-", "1", ")", ")", ")", ":", "if", "(", "stop", ">", "start", ")", ":", "samples", "=", "samples", "[", "(", "start", "*", "rate", ")", ":", "(", "stop", "*", "rate", ")", "]", "elif", "start", ":", "samples", "=", "samples", "[", "(", "start", "*", "rate", ")", ":", "]", "table", "=", "pyo", ".", "DataTable", "(", "size", "=", "len", "(", "samples", ")", ",", "init", "=", "samples", ")", "return", "table" ]
return a pyo datatable constructed from samples .
train
false
9,064
def record_has(inrec, fieldvals): retval = False for field in fieldvals: if isinstance(inrec[field], str): set1 = {inrec[field]} else: set1 = set(inrec[field]) if (set1 & fieldvals[field]): retval = True break return retval
[ "def", "record_has", "(", "inrec", ",", "fieldvals", ")", ":", "retval", "=", "False", "for", "field", "in", "fieldvals", ":", "if", "isinstance", "(", "inrec", "[", "field", "]", ",", "str", ")", ":", "set1", "=", "{", "inrec", "[", "field", "]", "}", "else", ":", "set1", "=", "set", "(", "inrec", "[", "field", "]", ")", "if", "(", "set1", "&", "fieldvals", "[", "field", "]", ")", ":", "retval", "=", "True", "break", "return", "retval" ]
accepts a record .
train
false
9,065
def lookup_scopes(service_name): if (service_name in CLIENT_LOGIN_SCOPES): return CLIENT_LOGIN_SCOPES[service_name] return None
[ "def", "lookup_scopes", "(", "service_name", ")", ":", "if", "(", "service_name", "in", "CLIENT_LOGIN_SCOPES", ")", ":", "return", "CLIENT_LOGIN_SCOPES", "[", "service_name", "]", "return", "None" ]
finds the scope urls for the desired service .
train
false
9,066
def expose_api_anonymous(func, to_json=True): return expose_api(func, to_json=to_json, user_required=False)
[ "def", "expose_api_anonymous", "(", "func", ",", "to_json", "=", "True", ")", ":", "return", "expose_api", "(", "func", ",", "to_json", "=", "to_json", ",", "user_required", "=", "False", ")" ]
expose this function via the api but dont require a set user .
train
false
9,068
def _extract_doc_comment_simple(content, line, column, markers): align_column = (column - len(markers[0])) pos = content[line].find(markers[2], column) if (pos != (-1)): return (line, (pos + len(markers[2])), content[line][column:pos]) doc_comment = content[line][column:] line += 1 while (line < len(content)): pos = content[line].find(markers[2]) if (pos == (-1)): doc_comment += ('\n' if (content[line][align_column:] == '') else content[line][align_column:]) else: doc_comment += content[line][align_column:pos] return (line, (pos + len(markers[2])), doc_comment) line += 1 return None
[ "def", "_extract_doc_comment_simple", "(", "content", ",", "line", ",", "column", ",", "markers", ")", ":", "align_column", "=", "(", "column", "-", "len", "(", "markers", "[", "0", "]", ")", ")", "pos", "=", "content", "[", "line", "]", ".", "find", "(", "markers", "[", "2", "]", ",", "column", ")", "if", "(", "pos", "!=", "(", "-", "1", ")", ")", ":", "return", "(", "line", ",", "(", "pos", "+", "len", "(", "markers", "[", "2", "]", ")", ")", ",", "content", "[", "line", "]", "[", "column", ":", "pos", "]", ")", "doc_comment", "=", "content", "[", "line", "]", "[", "column", ":", "]", "line", "+=", "1", "while", "(", "line", "<", "len", "(", "content", ")", ")", ":", "pos", "=", "content", "[", "line", "]", ".", "find", "(", "markers", "[", "2", "]", ")", "if", "(", "pos", "==", "(", "-", "1", ")", ")", ":", "doc_comment", "+=", "(", "'\\n'", "if", "(", "content", "[", "line", "]", "[", "align_column", ":", "]", "==", "''", ")", "else", "content", "[", "line", "]", "[", "align_column", ":", "]", ")", "else", ":", "doc_comment", "+=", "content", "[", "line", "]", "[", "align_column", ":", "pos", "]", "return", "(", "line", ",", "(", "pos", "+", "len", "(", "markers", "[", "2", "]", ")", ")", ",", "doc_comment", ")", "line", "+=", "1", "return", "None" ]
extract a documentation that starts at given beginning with simple layout .
train
false
9,069
def test_round(method, prec, exprange, restricted_range, itr, stat): for op in all_unary(prec, 9999, itr): n = random.randrange(10) roundop = (op[0], n) t = TestSet(method, roundop) try: if (not convert(t)): continue callfuncs(t) verify(t, stat) except VerifyError as err: log(err)
[ "def", "test_round", "(", "method", ",", "prec", ",", "exprange", ",", "restricted_range", ",", "itr", ",", "stat", ")", ":", "for", "op", "in", "all_unary", "(", "prec", ",", "9999", ",", "itr", ")", ":", "n", "=", "random", ".", "randrange", "(", "10", ")", "roundop", "=", "(", "op", "[", "0", "]", ",", "n", ")", "t", "=", "TestSet", "(", "method", ",", "roundop", ")", "try", ":", "if", "(", "not", "convert", "(", "t", ")", ")", ":", "continue", "callfuncs", "(", "t", ")", "verify", "(", "t", ",", "stat", ")", "except", "VerifyError", "as", "err", ":", "log", "(", "err", ")" ]
iterate the __round__ method through many test cases .
train
false
9,070
def get_ancestor_paths_from_ent_key(ent_key): ancestor_list = [] tokens = str(ent_key).split(dbconstants.KIND_SEPARATOR) tokens = tokens[:(-2)] for num_elements in range(0, len(tokens)): ancestor = '' for token in tokens[0:(num_elements + 1)]: ancestor += (token + dbconstants.KIND_SEPARATOR) ancestor_list.append(ancestor) return ancestor_list
[ "def", "get_ancestor_paths_from_ent_key", "(", "ent_key", ")", ":", "ancestor_list", "=", "[", "]", "tokens", "=", "str", "(", "ent_key", ")", ".", "split", "(", "dbconstants", ".", "KIND_SEPARATOR", ")", "tokens", "=", "tokens", "[", ":", "(", "-", "2", ")", "]", "for", "num_elements", "in", "range", "(", "0", ",", "len", "(", "tokens", ")", ")", ":", "ancestor", "=", "''", "for", "token", "in", "tokens", "[", "0", ":", "(", "num_elements", "+", "1", ")", "]", ":", "ancestor", "+=", "(", "token", "+", "dbconstants", ".", "KIND_SEPARATOR", ")", "ancestor_list", ".", "append", "(", "ancestor", ")", "return", "ancestor_list" ]
get a list of key string for the ancestor portion of a composite key .
train
false
9,072
def get_paramfile(path): data = None if isinstance(path, six.string_types): for (prefix, function_spec) in PREFIX_MAP.items(): if path.startswith(prefix): (function, kwargs) = function_spec data = function(prefix, path, **kwargs) return data
[ "def", "get_paramfile", "(", "path", ")", ":", "data", "=", "None", "if", "isinstance", "(", "path", ",", "six", ".", "string_types", ")", ":", "for", "(", "prefix", ",", "function_spec", ")", "in", "PREFIX_MAP", ".", "items", "(", ")", ":", "if", "path", ".", "startswith", "(", "prefix", ")", ":", "(", "function", ",", "kwargs", ")", "=", "function_spec", "data", "=", "function", "(", "prefix", ",", "path", ",", "**", "kwargs", ")", "return", "data" ]
load parameter based on a resource uri .
train
false
9,073
def deepcopy_return_value_class_decorator(cls): class NewClass(cls, ): def __getattribute__(self, attr_name): obj = super(NewClass, self).__getattribute__(attr_name) if (hasattr(obj, '__call__') and (not attr_name.startswith('_')) and (not isinstance(obj, mock.Mock))): return deepcopy_return_value_method_decorator(obj) return obj return NewClass
[ "def", "deepcopy_return_value_class_decorator", "(", "cls", ")", ":", "class", "NewClass", "(", "cls", ",", ")", ":", "def", "__getattribute__", "(", "self", ",", "attr_name", ")", ":", "obj", "=", "super", "(", "NewClass", ",", "self", ")", ".", "__getattribute__", "(", "attr_name", ")", "if", "(", "hasattr", "(", "obj", ",", "'__call__'", ")", "and", "(", "not", "attr_name", ".", "startswith", "(", "'_'", ")", ")", "and", "(", "not", "isinstance", "(", "obj", ",", "mock", ".", "Mock", ")", ")", ")", ":", "return", "deepcopy_return_value_method_decorator", "(", "obj", ")", "return", "obj", "return", "NewClass" ]
wraps non-protected methods of a class with decorator .
train
false
9,074
def clear_db_env(*args, **kwargs): pass
[ "def", "clear_db_env", "(", "*", "args", ",", "**", "kwargs", ")", ":", "pass" ]
unset global configuration variables for database .
train
false
9,075
def valid_csrf_token(req, session_id, csrf_token): try: when = int(csrf_token.split('-')[0], 16) return ((when > (time.time() - CSRF_VALIDITY)) and (csrf_token == make_csrf_token(req, session_id, ts=when))) except (ValueError, IndexError): return False
[ "def", "valid_csrf_token", "(", "req", ",", "session_id", ",", "csrf_token", ")", ":", "try", ":", "when", "=", "int", "(", "csrf_token", ".", "split", "(", "'-'", ")", "[", "0", "]", ",", "16", ")", "return", "(", "(", "when", ">", "(", "time", ".", "time", "(", ")", "-", "CSRF_VALIDITY", ")", ")", "and", "(", "csrf_token", "==", "make_csrf_token", "(", "req", ",", "session_id", ",", "ts", "=", "when", ")", ")", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "return", "False" ]
check the validity of a csrf token .
train
false
9,076
def linebreaks(value): value = re.sub('\\r\\n|\\r|\\n', '\n', value) paras = re.split('\n{2,}', value) paras = [('<p>%s</p>' % p.strip().replace('\n', '<br />')) for p in paras] return '\n\n'.join(paras)
[ "def", "linebreaks", "(", "value", ")", ":", "value", "=", "re", ".", "sub", "(", "'\\\\r\\\\n|\\\\r|\\\\n'", ",", "'\\n'", ",", "value", ")", "paras", "=", "re", ".", "split", "(", "'\\n{2,}'", ",", "value", ")", "paras", "=", "[", "(", "'<p>%s</p>'", "%", "p", ".", "strip", "(", ")", ".", "replace", "(", "'\\n'", ",", "'<br />'", ")", ")", "for", "p", "in", "paras", "]", "return", "'\\n\\n'", ".", "join", "(", "paras", ")" ]
converts newlines into <p> and <br />s .
train
false
9,077
@commands(u'chairs') @example(u'.chairs Tyrope Jason elad') def chairs(bot, trigger): if (not ismeetingrunning(trigger.sender)): bot.say(u"Can't do that, start meeting first") return if (not trigger.group(2)): bot.say(u'Who are the chairs?') return if (trigger.nick.lower() == meetings_dict[trigger.sender][u'head']): meetings_dict[trigger.sender][u'chairs'] = trigger.group(2).lower().split(u' ') chairs_readable = trigger.group(2).lower().replace(u' ', u', ') logplain((u'Meeting chairs are: ' + chairs_readable), trigger.sender) logHTML_listitem((u'<span style="font-weight: bold">Meeting chairs are: </span>' + chairs_readable), trigger.sender) bot.say((u'\x02Meeting chairs are:\x0f ' + chairs_readable)) else: bot.say(u'Only meeting head can set chairs')
[ "@", "commands", "(", "u'chairs'", ")", "@", "example", "(", "u'.chairs Tyrope Jason elad'", ")", "def", "chairs", "(", "bot", ",", "trigger", ")", ":", "if", "(", "not", "ismeetingrunning", "(", "trigger", ".", "sender", ")", ")", ":", "bot", ".", "say", "(", "u\"Can't do that, start meeting first\"", ")", "return", "if", "(", "not", "trigger", ".", "group", "(", "2", ")", ")", ":", "bot", ".", "say", "(", "u'Who are the chairs?'", ")", "return", "if", "(", "trigger", ".", "nick", ".", "lower", "(", ")", "==", "meetings_dict", "[", "trigger", ".", "sender", "]", "[", "u'head'", "]", ")", ":", "meetings_dict", "[", "trigger", ".", "sender", "]", "[", "u'chairs'", "]", "=", "trigger", ".", "group", "(", "2", ")", ".", "lower", "(", ")", ".", "split", "(", "u' '", ")", "chairs_readable", "=", "trigger", ".", "group", "(", "2", ")", ".", "lower", "(", ")", ".", "replace", "(", "u' '", ",", "u', '", ")", "logplain", "(", "(", "u'Meeting chairs are: '", "+", "chairs_readable", ")", ",", "trigger", ".", "sender", ")", "logHTML_listitem", "(", "(", "u'<span style=\"font-weight: bold\">Meeting chairs are: </span>'", "+", "chairs_readable", ")", ",", "trigger", ".", "sender", ")", "bot", ".", "say", "(", "(", "u'\\x02Meeting chairs are:\\x0f '", "+", "chairs_readable", ")", ")", "else", ":", "bot", ".", "say", "(", "u'Only meeting head can set chairs'", ")" ]
set the meeting chairs .
train
false
9,081
def filter_matches(match_text, candidates, case_sensitive, sort_key=(lambda x: x)): if case_sensitive: case_transform = _identity else: case_transform = _lower if match_text: match_text = case_transform(match_text) matches = [r for r in candidates if (match_text in case_transform(r))] else: matches = list(candidates) matches.sort(key=(lambda x: sort_key(case_transform(x)))) return matches
[ "def", "filter_matches", "(", "match_text", ",", "candidates", ",", "case_sensitive", ",", "sort_key", "=", "(", "lambda", "x", ":", "x", ")", ")", ":", "if", "case_sensitive", ":", "case_transform", "=", "_identity", "else", ":", "case_transform", "=", "_lower", "if", "match_text", ":", "match_text", "=", "case_transform", "(", "match_text", ")", "matches", "=", "[", "r", "for", "r", "in", "candidates", "if", "(", "match_text", "in", "case_transform", "(", "r", ")", ")", "]", "else", ":", "matches", "=", "list", "(", "candidates", ")", "matches", ".", "sort", "(", "key", "=", "(", "lambda", "x", ":", "sort_key", "(", "case_transform", "(", "x", ")", ")", ")", ")", "return", "matches" ]
filter candidates and return the matches .
train
false
9,084
def eulerian_circuit(G, source=None): if (not is_eulerian(G)): raise nx.NetworkXError('G is not Eulerian.') g = G.__class__(G) if (source is None): v = arbitrary_element(g) else: v = source if g.is_directed(): degree = g.in_degree edges = g.in_edges get_vertex = itemgetter(0) else: degree = g.degree edges = g.edges get_vertex = itemgetter(1) vertex_stack = [v] last_vertex = None while vertex_stack: current_vertex = vertex_stack[(-1)] if (degree(current_vertex) == 0): if (last_vertex is not None): (yield (last_vertex, current_vertex)) last_vertex = current_vertex vertex_stack.pop() else: arbitrary_edge = next(edges(current_vertex)) vertex_stack.append(get_vertex(arbitrary_edge)) g.remove_edge(*arbitrary_edge)
[ "def", "eulerian_circuit", "(", "G", ",", "source", "=", "None", ")", ":", "if", "(", "not", "is_eulerian", "(", "G", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'G is not Eulerian.'", ")", "g", "=", "G", ".", "__class__", "(", "G", ")", "if", "(", "source", "is", "None", ")", ":", "v", "=", "arbitrary_element", "(", "g", ")", "else", ":", "v", "=", "source", "if", "g", ".", "is_directed", "(", ")", ":", "degree", "=", "g", ".", "in_degree", "edges", "=", "g", ".", "in_edges", "get_vertex", "=", "itemgetter", "(", "0", ")", "else", ":", "degree", "=", "g", ".", "degree", "edges", "=", "g", ".", "edges", "get_vertex", "=", "itemgetter", "(", "1", ")", "vertex_stack", "=", "[", "v", "]", "last_vertex", "=", "None", "while", "vertex_stack", ":", "current_vertex", "=", "vertex_stack", "[", "(", "-", "1", ")", "]", "if", "(", "degree", "(", "current_vertex", ")", "==", "0", ")", ":", "if", "(", "last_vertex", "is", "not", "None", ")", ":", "(", "yield", "(", "last_vertex", ",", "current_vertex", ")", ")", "last_vertex", "=", "current_vertex", "vertex_stack", ".", "pop", "(", ")", "else", ":", "arbitrary_edge", "=", "next", "(", "edges", "(", "current_vertex", ")", ")", "vertex_stack", ".", "append", "(", "get_vertex", "(", "arbitrary_edge", ")", ")", "g", ".", "remove_edge", "(", "*", "arbitrary_edge", ")" ]
returns an iterator over the edges of an eulerian circuit in g .
train
false
9,086
@login_required def show_receipt(request, ordernum): try: order = Order.objects.get(id=ordernum) except Order.DoesNotExist: raise Http404('Order not found!') if ((order.user != request.user) or (order.status not in ['purchased', 'refunded'])): raise Http404('Order not found!') if ('application/json' in request.META.get('HTTP_ACCEPT', '')): return _show_receipt_json(order) else: return _show_receipt_html(request, order)
[ "@", "login_required", "def", "show_receipt", "(", "request", ",", "ordernum", ")", ":", "try", ":", "order", "=", "Order", ".", "objects", ".", "get", "(", "id", "=", "ordernum", ")", "except", "Order", ".", "DoesNotExist", ":", "raise", "Http404", "(", "'Order not found!'", ")", "if", "(", "(", "order", ".", "user", "!=", "request", ".", "user", ")", "or", "(", "order", ".", "status", "not", "in", "[", "'purchased'", ",", "'refunded'", "]", ")", ")", ":", "raise", "Http404", "(", "'Order not found!'", ")", "if", "(", "'application/json'", "in", "request", ".", "META", ".", "get", "(", "'HTTP_ACCEPT'", ",", "''", ")", ")", ":", "return", "_show_receipt_json", "(", "order", ")", "else", ":", "return", "_show_receipt_html", "(", "request", ",", "order", ")" ]
displays a receipt for a particular order .
train
false
9,089
def impute_ros(observations, censorship, df=None, min_uncensored=2, max_fraction_censored=0.8, substitution_fraction=0.5, transform_in=numpy.log, transform_out=numpy.exp, as_array=True): if (df is None): df = pandas.DataFrame({'obs': observations, 'cen': censorship}) observations = 'obs' censorship = 'cen' N_observations = df.shape[0] N_censored = df[censorship].astype(int).sum() N_uncensored = (N_observations - N_censored) fraction_censored = (N_censored / N_observations) if (N_censored == 0): output = df[[observations, censorship]].copy() output.loc[:, 'final'] = df[observations] elif ((N_uncensored < min_uncensored) or (fraction_censored > max_fraction_censored)): output = df[[observations, censorship]].copy() output.loc[:, 'final'] = df[observations] output.loc[(df[censorship], 'final')] *= substitution_fraction else: output = _do_ros(df, observations, censorship, transform_in, transform_out) if as_array: output = output['final'].values return output
[ "def", "impute_ros", "(", "observations", ",", "censorship", ",", "df", "=", "None", ",", "min_uncensored", "=", "2", ",", "max_fraction_censored", "=", "0.8", ",", "substitution_fraction", "=", "0.5", ",", "transform_in", "=", "numpy", ".", "log", ",", "transform_out", "=", "numpy", ".", "exp", ",", "as_array", "=", "True", ")", ":", "if", "(", "df", "is", "None", ")", ":", "df", "=", "pandas", ".", "DataFrame", "(", "{", "'obs'", ":", "observations", ",", "'cen'", ":", "censorship", "}", ")", "observations", "=", "'obs'", "censorship", "=", "'cen'", "N_observations", "=", "df", ".", "shape", "[", "0", "]", "N_censored", "=", "df", "[", "censorship", "]", ".", "astype", "(", "int", ")", ".", "sum", "(", ")", "N_uncensored", "=", "(", "N_observations", "-", "N_censored", ")", "fraction_censored", "=", "(", "N_censored", "/", "N_observations", ")", "if", "(", "N_censored", "==", "0", ")", ":", "output", "=", "df", "[", "[", "observations", ",", "censorship", "]", "]", ".", "copy", "(", ")", "output", ".", "loc", "[", ":", ",", "'final'", "]", "=", "df", "[", "observations", "]", "elif", "(", "(", "N_uncensored", "<", "min_uncensored", ")", "or", "(", "fraction_censored", ">", "max_fraction_censored", ")", ")", ":", "output", "=", "df", "[", "[", "observations", ",", "censorship", "]", "]", ".", "copy", "(", ")", "output", ".", "loc", "[", ":", ",", "'final'", "]", "=", "df", "[", "observations", "]", "output", ".", "loc", "[", "(", "df", "[", "censorship", "]", ",", "'final'", ")", "]", "*=", "substitution_fraction", "else", ":", "output", "=", "_do_ros", "(", "df", ",", "observations", ",", "censorship", ",", "transform_in", ",", "transform_out", ")", "if", "as_array", ":", "output", "=", "output", "[", "'final'", "]", ".", "values", "return", "output" ]
impute censored dataset using regression on order statistics .
train
false
9,091
def art_for_asin(album): if album.asin: for index in AMAZON_INDICES: (yield (AMAZON_URL % (album.asin, index)))
[ "def", "art_for_asin", "(", "album", ")", ":", "if", "album", ".", "asin", ":", "for", "index", "in", "AMAZON_INDICES", ":", "(", "yield", "(", "AMAZON_URL", "%", "(", "album", ".", "asin", ",", "index", ")", ")", ")" ]
generate urls using amazon id string .
train
false
9,092
def setUnicodeValue(glyph, glyphList): if (glyph.name in glyphList): glyph.unicode = int(glyphList[glyph.name], 16) else: uvNameMatch = re.match('uni([\\dA-F]{4})$', glyph.name) if uvNameMatch: glyph.unicode = int(uvNameMatch.group(1), 16)
[ "def", "setUnicodeValue", "(", "glyph", ",", "glyphList", ")", ":", "if", "(", "glyph", ".", "name", "in", "glyphList", ")", ":", "glyph", ".", "unicode", "=", "int", "(", "glyphList", "[", "glyph", ".", "name", "]", ",", "16", ")", "else", ":", "uvNameMatch", "=", "re", ".", "match", "(", "'uni([\\\\dA-F]{4})$'", ",", "glyph", ".", "name", ")", "if", "uvNameMatch", ":", "glyph", ".", "unicode", "=", "int", "(", "uvNameMatch", ".", "group", "(", "1", ")", ",", "16", ")" ]
try to ensure glyph has a unicode value -- used by fdk to make otfs .
train
false
9,093
def entry_breadcrumbs(entry): date = entry.publication_date if is_aware(date): date = localtime(date) return [year_crumb(date), month_crumb(date), day_crumb(date), Crumb(entry.title)]
[ "def", "entry_breadcrumbs", "(", "entry", ")", ":", "date", "=", "entry", ".", "publication_date", "if", "is_aware", "(", "date", ")", ":", "date", "=", "localtime", "(", "date", ")", "return", "[", "year_crumb", "(", "date", ")", ",", "month_crumb", "(", "date", ")", ",", "day_crumb", "(", "date", ")", ",", "Crumb", "(", "entry", ".", "title", ")", "]" ]
breadcrumbs for an entry .
train
true
9,094
def test_dae_yaml(): limited_epoch_train(os.path.join(pylearn2.__path__[0], 'scripts/autoencoder_example/dae.yaml'))
[ "def", "test_dae_yaml", "(", ")", ":", "limited_epoch_train", "(", "os", ".", "path", ".", "join", "(", "pylearn2", ".", "__path__", "[", "0", "]", ",", "'scripts/autoencoder_example/dae.yaml'", ")", ")" ]
train a denoising autoencoder for a single epoch .
train
false
9,095
@must_be_logged_in def user_choose_mailing_lists(auth, **kwargs): user = auth.user json_data = escape_html(request.get_json()) if json_data: for (list_name, subscribe) in json_data.items(): if (list_name == settings.OSF_HELP_LIST): update_osf_help_mails_subscription(user=user, subscribe=subscribe) else: update_mailchimp_subscription(user, list_name, subscribe) else: raise HTTPError(http.BAD_REQUEST, data=dict(message_long="Must provide a dictionary of the format {'mailing list name': Boolean}")) user.save() all_mailing_lists = {} all_mailing_lists.update(user.mailchimp_mailing_lists) all_mailing_lists.update(user.osf_mailing_lists) return ({'message': 'Successfully updated mailing lists', 'result': all_mailing_lists}, 200)
[ "@", "must_be_logged_in", "def", "user_choose_mailing_lists", "(", "auth", ",", "**", "kwargs", ")", ":", "user", "=", "auth", ".", "user", "json_data", "=", "escape_html", "(", "request", ".", "get_json", "(", ")", ")", "if", "json_data", ":", "for", "(", "list_name", ",", "subscribe", ")", "in", "json_data", ".", "items", "(", ")", ":", "if", "(", "list_name", "==", "settings", ".", "OSF_HELP_LIST", ")", ":", "update_osf_help_mails_subscription", "(", "user", "=", "user", ",", "subscribe", "=", "subscribe", ")", "else", ":", "update_mailchimp_subscription", "(", "user", ",", "list_name", ",", "subscribe", ")", "else", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "dict", "(", "message_long", "=", "\"Must provide a dictionary of the format {'mailing list name': Boolean}\"", ")", ")", "user", ".", "save", "(", ")", "all_mailing_lists", "=", "{", "}", "all_mailing_lists", ".", "update", "(", "user", ".", "mailchimp_mailing_lists", ")", "all_mailing_lists", ".", "update", "(", "user", ".", "osf_mailing_lists", ")", "return", "(", "{", "'message'", ":", "'Successfully updated mailing lists'", ",", "'result'", ":", "all_mailing_lists", "}", ",", "200", ")" ]
update mailing list subscription on user model and in mailchimp example input: "open science framework general": true .
train
false
9,096
@events.route('/<int:event_id>/role-invite/decline/<hash>', methods=['GET', 'POST']) def user_role_invite_decline(event_id, hash): event = DataGetter.get_event(event_id) user = current_user role_invite = DataGetter.get_event_role_invite(event.id, hash, email=user.email) if role_invite: if role_invite.has_expired(): delete_from_db(role_invite, 'Deleted RoleInvite') flash('Sorry, the invitation link has expired.', 'error') return redirect(url_for('.details_view', event_id=event.id)) DataManager.decline_role_invite(role_invite) flash('You have declined the role invite.') return redirect(url_for('.details_view', event_id=event.id)) else: abort(404)
[ "@", "events", ".", "route", "(", "'/<int:event_id>/role-invite/decline/<hash>'", ",", "methods", "=", "[", "'GET'", ",", "'POST'", "]", ")", "def", "user_role_invite_decline", "(", "event_id", ",", "hash", ")", ":", "event", "=", "DataGetter", ".", "get_event", "(", "event_id", ")", "user", "=", "current_user", "role_invite", "=", "DataGetter", ".", "get_event_role_invite", "(", "event", ".", "id", ",", "hash", ",", "email", "=", "user", ".", "email", ")", "if", "role_invite", ":", "if", "role_invite", ".", "has_expired", "(", ")", ":", "delete_from_db", "(", "role_invite", ",", "'Deleted RoleInvite'", ")", "flash", "(", "'Sorry, the invitation link has expired.'", ",", "'error'", ")", "return", "redirect", "(", "url_for", "(", "'.details_view'", ",", "event_id", "=", "event", ".", "id", ")", ")", "DataManager", ".", "decline_role_invite", "(", "role_invite", ")", "flash", "(", "'You have declined the role invite.'", ")", "return", "redirect", "(", "url_for", "(", "'.details_view'", ",", "event_id", "=", "event", ".", "id", ")", ")", "else", ":", "abort", "(", "404", ")" ]
decline user-role invite for the event .
train
false
9,097
def _cmpBottom(a, b): return _cmpTop(a, b, what='bottom 10 rank')
[ "def", "_cmpBottom", "(", "a", ",", "b", ")", ":", "return", "_cmpTop", "(", "a", ",", "b", ",", "what", "=", "'bottom 10 rank'", ")" ]
compare function used to sort top 250/bottom 10 rank .
train
false
9,100
def getSimInfo(): try: mContext = autoclass('android.content.Context') pythonActivity = autoclass('org.renpy.android.PythonService') telephonyManager = cast('android.telephony.TelephonyManager', pythonActivity.mService.getSystemService(mContext.TELEPHONY_SERVICE)) phoneCount = telephonyManager.getPhoneCount() return phoneCount except Exception as e: return None
[ "def", "getSimInfo", "(", ")", ":", "try", ":", "mContext", "=", "autoclass", "(", "'android.content.Context'", ")", "pythonActivity", "=", "autoclass", "(", "'org.renpy.android.PythonService'", ")", "telephonyManager", "=", "cast", "(", "'android.telephony.TelephonyManager'", ",", "pythonActivity", ".", "mService", ".", "getSystemService", "(", "mContext", ".", "TELEPHONY_SERVICE", ")", ")", "phoneCount", "=", "telephonyManager", ".", "getPhoneCount", "(", ")", "return", "phoneCount", "except", "Exception", "as", "e", ":", "return", "None" ]
returns 0 if none of voice .
train
false
9,101
def _ContainsComments(node): if isinstance(node, pytree.Leaf): return (node.type == grammar_token.COMMENT) for child in node.children: if _ContainsComments(child): return True return False
[ "def", "_ContainsComments", "(", "node", ")", ":", "if", "isinstance", "(", "node", ",", "pytree", ".", "Leaf", ")", ":", "return", "(", "node", ".", "type", "==", "grammar_token", ".", "COMMENT", ")", "for", "child", "in", "node", ".", "children", ":", "if", "_ContainsComments", "(", "child", ")", ":", "return", "True", "return", "False" ]
return true if the list has a comment in it .
train
false
9,102
def signed_varint(i): if (i >= 0): return varint((i << 1)) return varint(((i << 1) ^ (~ 0)))
[ "def", "signed_varint", "(", "i", ")", ":", "if", "(", "i", ">=", "0", ")", ":", "return", "varint", "(", "(", "i", "<<", "1", ")", ")", "return", "varint", "(", "(", "(", "i", "<<", "1", ")", "^", "(", "~", "0", ")", ")", ")" ]
zig-zag encodes a signed integer into a varint .
train
false
9,103
def reload_from_cwd(module, reloader=None): if (reloader is None): reloader = reload with cwd_in_path(): return reloader(module)
[ "def", "reload_from_cwd", "(", "module", ",", "reloader", "=", "None", ")", ":", "if", "(", "reloader", "is", "None", ")", ":", "reloader", "=", "reload", "with", "cwd_in_path", "(", ")", ":", "return", "reloader", "(", "module", ")" ]
reload module .
train
false
9,104
def inodeusage(args=None): flags = _clean_flags(args, 'disk.inodeusage') cmd = 'df -iP' if flags: cmd += ' -{0}'.format(flags) ret = {} out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() for line in out: if line.startswith('Filesystem'): continue comps = line.split() if (not comps): continue try: if (__grains__['kernel'] == 'OpenBSD'): ret[comps[8]] = {'inodes': (int(comps[5]) + int(comps[6])), 'used': comps[5], 'free': comps[6], 'use': comps[7], 'filesystem': comps[0]} else: ret[comps[5]] = {'inodes': comps[1], 'used': comps[2], 'free': comps[3], 'use': comps[4], 'filesystem': comps[0]} except (IndexError, ValueError): log.error('Problem parsing inode usage information') ret = {} return ret
[ "def", "inodeusage", "(", "args", "=", "None", ")", ":", "flags", "=", "_clean_flags", "(", "args", ",", "'disk.inodeusage'", ")", "cmd", "=", "'df -iP'", "if", "flags", ":", "cmd", "+=", "' -{0}'", ".", "format", "(", "flags", ")", "ret", "=", "{", "}", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", ".", "splitlines", "(", ")", "for", "line", "in", "out", ":", "if", "line", ".", "startswith", "(", "'Filesystem'", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "if", "(", "not", "comps", ")", ":", "continue", "try", ":", "if", "(", "__grains__", "[", "'kernel'", "]", "==", "'OpenBSD'", ")", ":", "ret", "[", "comps", "[", "8", "]", "]", "=", "{", "'inodes'", ":", "(", "int", "(", "comps", "[", "5", "]", ")", "+", "int", "(", "comps", "[", "6", "]", ")", ")", ",", "'used'", ":", "comps", "[", "5", "]", ",", "'free'", ":", "comps", "[", "6", "]", ",", "'use'", ":", "comps", "[", "7", "]", ",", "'filesystem'", ":", "comps", "[", "0", "]", "}", "else", ":", "ret", "[", "comps", "[", "5", "]", "]", "=", "{", "'inodes'", ":", "comps", "[", "1", "]", ",", "'used'", ":", "comps", "[", "2", "]", ",", "'free'", ":", "comps", "[", "3", "]", ",", "'use'", ":", "comps", "[", "4", "]", ",", "'filesystem'", ":", "comps", "[", "0", "]", "}", "except", "(", "IndexError", ",", "ValueError", ")", ":", "log", ".", "error", "(", "'Problem parsing inode usage information'", ")", "ret", "=", "{", "}", "return", "ret" ]
return inode usage information for volumes mounted on this minion cli example: .
train
true
9,105
def getPegCenterXs(numberOfSteps, pegCenterX, stepX): pegCenterXs = [] for stepIndex in xrange(numberOfSteps): pegCenterXs.append(pegCenterX) pegCenterX += stepX return pegCenterXs
[ "def", "getPegCenterXs", "(", "numberOfSteps", ",", "pegCenterX", ",", "stepX", ")", ":", "pegCenterXs", "=", "[", "]", "for", "stepIndex", "in", "xrange", "(", "numberOfSteps", ")", ":", "pegCenterXs", ".", "append", "(", "pegCenterX", ")", "pegCenterX", "+=", "stepX", "return", "pegCenterXs" ]
get the peg center x list .
train
false
9,108
def dumb_css_parser(data): importIndex = data.find('@import') while (importIndex != (-1)): data = (data[0:importIndex] + data[(data.find(';', importIndex) + 1):]) importIndex = data.find('@import') elements = [x.split('{') for x in data.split('}') if ('{' in x.strip())] try: elements = dict([(a.strip(), dumb_property_dict(b)) for (a, b) in elements]) except ValueError: elements = {} return elements
[ "def", "dumb_css_parser", "(", "data", ")", ":", "importIndex", "=", "data", ".", "find", "(", "'@import'", ")", "while", "(", "importIndex", "!=", "(", "-", "1", ")", ")", ":", "data", "=", "(", "data", "[", "0", ":", "importIndex", "]", "+", "data", "[", "(", "data", ".", "find", "(", "';'", ",", "importIndex", ")", "+", "1", ")", ":", "]", ")", "importIndex", "=", "data", ".", "find", "(", "'@import'", ")", "elements", "=", "[", "x", ".", "split", "(", "'{'", ")", "for", "x", "in", "data", ".", "split", "(", "'}'", ")", "if", "(", "'{'", "in", "x", ".", "strip", "(", ")", ")", "]", "try", ":", "elements", "=", "dict", "(", "[", "(", "a", ".", "strip", "(", ")", ",", "dumb_property_dict", "(", "b", ")", ")", "for", "(", "a", ",", "b", ")", "in", "elements", "]", ")", "except", "ValueError", ":", "elements", "=", "{", "}", "return", "elements" ]
returns a hash of css selectors .
train
true
9,109
def is_repo_link(val): return val.startswith(u'weblate://')
[ "def", "is_repo_link", "(", "val", ")", ":", "return", "val", ".", "startswith", "(", "u'weblate://'", ")" ]
checks whether repository is just a link for other one .
train
false
9,110
@requires_application() def test_basics_desktop(): _test_basics('gl2') with Canvas(): _test_setting_parameters() _test_enabling_disabling() _test_setting_stuff() _test_object_creation_and_deletion() _test_fbo() try: gl.gl2._get_gl_func('foo', None, ()) except RuntimeError as exp: exp = str(exp) assert ('version' in exp) assert ('unknown' not in exp) gl.glFinish()
[ "@", "requires_application", "(", ")", "def", "test_basics_desktop", "(", ")", ":", "_test_basics", "(", "'gl2'", ")", "with", "Canvas", "(", ")", ":", "_test_setting_parameters", "(", ")", "_test_enabling_disabling", "(", ")", "_test_setting_stuff", "(", ")", "_test_object_creation_and_deletion", "(", ")", "_test_fbo", "(", ")", "try", ":", "gl", ".", "gl2", ".", "_get_gl_func", "(", "'foo'", ",", "None", ",", "(", ")", ")", "except", "RuntimeError", "as", "exp", ":", "exp", "=", "str", "(", "exp", ")", "assert", "(", "'version'", "in", "exp", ")", "assert", "(", "'unknown'", "not", "in", "exp", ")", "gl", ".", "glFinish", "(", ")" ]
test desktop gl backend for basic functionality .
train
false
9,111
def logical_and(image1, image2): image1.load() image2.load() return image1._new(image1.im.chop_and(image2.im))
[ "def", "logical_and", "(", "image1", ",", "image2", ")", ":", "image1", ".", "load", "(", ")", "image2", ".", "load", "(", ")", "return", "image1", ".", "_new", "(", "image1", ".", "im", ".", "chop_and", "(", "image2", ".", "im", ")", ")" ]
logical and between two images .
train
false
9,113
def getMatrixKey(row, column, prefix=''): return (((prefix + 'm') + str((row + 1))) + str((column + 1)))
[ "def", "getMatrixKey", "(", "row", ",", "column", ",", "prefix", "=", "''", ")", ":", "return", "(", "(", "(", "prefix", "+", "'m'", ")", "+", "str", "(", "(", "row", "+", "1", ")", ")", ")", "+", "str", "(", "(", "column", "+", "1", ")", ")", ")" ]
get the key string from row & column .
train
false
9,116
@webob.dec.wsgify @util.check_accept('application/json') def list_for_resource_provider(req): context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') try: resource_provider = objects.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound((_("Resource provider '%(rp_uuid)s' not found: %(error)s") % {'rp_uuid': uuid, 'error': exc}), json_formatter=util.json_error_formatter) allocations = objects.AllocationList.get_all_by_resource_provider_uuid(context, uuid) allocations_json = jsonutils.dumps(_serialize_allocations_for_resource_provider(allocations, resource_provider)) req.response.status = 200 req.response.body = encodeutils.to_utf8(allocations_json) req.response.content_type = 'application/json' return req.response
[ "@", "webob", ".", "dec", ".", "wsgify", "@", "util", ".", "check_accept", "(", "'application/json'", ")", "def", "list_for_resource_provider", "(", "req", ")", ":", "context", "=", "req", ".", "environ", "[", "'placement.context'", "]", "uuid", "=", "util", ".", "wsgi_path_item", "(", "req", ".", "environ", ",", "'uuid'", ")", "try", ":", "resource_provider", "=", "objects", ".", "ResourceProvider", ".", "get_by_uuid", "(", "context", ",", "uuid", ")", "except", "exception", ".", "NotFound", "as", "exc", ":", "raise", "webob", ".", "exc", ".", "HTTPNotFound", "(", "(", "_", "(", "\"Resource provider '%(rp_uuid)s' not found: %(error)s\"", ")", "%", "{", "'rp_uuid'", ":", "uuid", ",", "'error'", ":", "exc", "}", ")", ",", "json_formatter", "=", "util", ".", "json_error_formatter", ")", "allocations", "=", "objects", ".", "AllocationList", ".", "get_all_by_resource_provider_uuid", "(", "context", ",", "uuid", ")", "allocations_json", "=", "jsonutils", ".", "dumps", "(", "_serialize_allocations_for_resource_provider", "(", "allocations", ",", "resource_provider", ")", ")", "req", ".", "response", ".", "status", "=", "200", "req", ".", "response", ".", "body", "=", "encodeutils", ".", "to_utf8", "(", "allocations_json", ")", "req", ".", "response", ".", "content_type", "=", "'application/json'", "return", "req", ".", "response" ]
list allocations associated with a resource provider .
train
false
9,117
def xblock_primary_child_category(xblock): category = xblock.category if (category == 'course'): return 'chapter' elif (category == 'chapter'): return 'sequential' elif (category == 'sequential'): return 'vertical' return None
[ "def", "xblock_primary_child_category", "(", "xblock", ")", ":", "category", "=", "xblock", ".", "category", "if", "(", "category", "==", "'course'", ")", ":", "return", "'chapter'", "elif", "(", "category", "==", "'chapter'", ")", ":", "return", "'sequential'", "elif", "(", "category", "==", "'sequential'", ")", ":", "return", "'vertical'", "return", "None" ]
returns the primary child category for the specified xblock .
train
false
9,118
@app.teardown_appcontext def close_db(error): if hasattr(g, 'sqlite_db'): g.sqlite_db.close()
[ "@", "app", ".", "teardown_appcontext", "def", "close_db", "(", "error", ")", ":", "if", "hasattr", "(", "g", ",", "'sqlite_db'", ")", ":", "g", ".", "sqlite_db", ".", "close", "(", ")" ]
you might want to call odoo .
train
false
9,120
def _get_notifier(): global _notifier if (_notifier is None): host = (CONF.default_publisher_id or socket.gethostname()) try: transport = oslo_messaging.get_notification_transport(CONF) _notifier = oslo_messaging.Notifier(transport, ('identity.%s' % host)) except Exception: LOG.exception(_LE('Failed to construct notifier')) _notifier = False return _notifier
[ "def", "_get_notifier", "(", ")", ":", "global", "_notifier", "if", "(", "_notifier", "is", "None", ")", ":", "host", "=", "(", "CONF", ".", "default_publisher_id", "or", "socket", ".", "gethostname", "(", ")", ")", "try", ":", "transport", "=", "oslo_messaging", ".", "get_notification_transport", "(", "CONF", ")", "_notifier", "=", "oslo_messaging", ".", "Notifier", "(", "transport", ",", "(", "'identity.%s'", "%", "host", ")", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "_LE", "(", "'Failed to construct notifier'", ")", ")", "_notifier", "=", "False", "return", "_notifier" ]
check the context for the notifier and construct it if not present .
train
false
9,121
def to_records(df): from ...array.core import Array if (not isinstance(df, (DataFrame, Series))): raise TypeError('df must be either DataFrame or Series') name = ('to-records-' + tokenize(df)) dsk = {(name, i): (M.to_records, key) for (i, key) in enumerate(df._keys())} x = df._meta.to_records() chunks = (((np.nan,) * df.npartitions),) return Array(merge(df.dask, dsk), name, chunks, x.dtype)
[ "def", "to_records", "(", "df", ")", ":", "from", "...", "array", ".", "core", "import", "Array", "if", "(", "not", "isinstance", "(", "df", ",", "(", "DataFrame", ",", "Series", ")", ")", ")", ":", "raise", "TypeError", "(", "'df must be either DataFrame or Series'", ")", "name", "=", "(", "'to-records-'", "+", "tokenize", "(", "df", ")", ")", "dsk", "=", "{", "(", "name", ",", "i", ")", ":", "(", "M", ".", "to_records", ",", "key", ")", "for", "(", "i", ",", "key", ")", "in", "enumerate", "(", "df", ".", "_keys", "(", ")", ")", "}", "x", "=", "df", ".", "_meta", ".", "to_records", "(", ")", "chunks", "=", "(", "(", "(", "np", ".", "nan", ",", ")", "*", "df", ".", "npartitions", ")", ",", ")", "return", "Array", "(", "merge", "(", "df", ".", "dask", ",", "dsk", ")", ",", "name", ",", "chunks", ",", "x", ".", "dtype", ")" ]
create dask array from a dask dataframe warning: this creates a dask .
train
false
9,123
def milestones_achieved_by_user(user, namespace): if (not settings.FEATURES.get('MILESTONES_APP')): return None return milestones_api.get_user_milestones({'id': user.id}, namespace)
[ "def", "milestones_achieved_by_user", "(", "user", ",", "namespace", ")", ":", "if", "(", "not", "settings", ".", "FEATURES", ".", "get", "(", "'MILESTONES_APP'", ")", ")", ":", "return", "None", "return", "milestones_api", ".", "get_user_milestones", "(", "{", "'id'", ":", "user", ".", "id", "}", ",", "namespace", ")" ]
it would fetch list of milestones completed by user .
train
false
9,124
def p_command_for_bad_initial(p): p[0] = 'BAD INITIAL VALUE IN FOR STATEMENT'
[ "def", "p_command_for_bad_initial", "(", "p", ")", ":", "p", "[", "0", "]", "=", "'BAD INITIAL VALUE IN FOR STATEMENT'" ]
command : for id equals error to expr optstep .
train
false
9,125
def navigation_index(position): if (position is None): return None try: navigation_position = int(position.split('_', 1)[0]) except (ValueError, TypeError): LOGGER.exception(u'Bad position %r passed to navigation_index, will assume first position', position) navigation_position = 1 return navigation_position
[ "def", "navigation_index", "(", "position", ")", ":", "if", "(", "position", "is", "None", ")", ":", "return", "None", "try", ":", "navigation_position", "=", "int", "(", "position", ".", "split", "(", "'_'", ",", "1", ")", "[", "0", "]", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "LOGGER", ".", "exception", "(", "u'Bad position %r passed to navigation_index, will assume first position'", ",", "position", ")", "navigation_position", "=", "1", "return", "navigation_position" ]
get the navigation index from the position argument argument: position - result of position returned from call to path_to_location .
train
false
9,127
def url_path_join(*pieces): initial = pieces[0].startswith('/') final = pieces[(-1)].endswith('/') stripped = [s.strip('/') for s in pieces] result = '/'.join((s for s in stripped if s)) if initial: result = ('/' + result) if final: result = (result + '/') if (result == '//'): result = '/' return result
[ "def", "url_path_join", "(", "*", "pieces", ")", ":", "initial", "=", "pieces", "[", "0", "]", ".", "startswith", "(", "'/'", ")", "final", "=", "pieces", "[", "(", "-", "1", ")", "]", ".", "endswith", "(", "'/'", ")", "stripped", "=", "[", "s", ".", "strip", "(", "'/'", ")", "for", "s", "in", "pieces", "]", "result", "=", "'/'", ".", "join", "(", "(", "s", "for", "s", "in", "stripped", "if", "s", ")", ")", "if", "initial", ":", "result", "=", "(", "'/'", "+", "result", ")", "if", "final", ":", "result", "=", "(", "result", "+", "'/'", ")", "if", "(", "result", "==", "'//'", ")", ":", "result", "=", "'/'", "return", "result" ]
join components of url into a relative url use to prevent double slash when joining subpath .
train
false
9,128
def _normalize_utf8_keys(kwargs): if any(((type(key) is six.binary_type) for key in kwargs.keys())): dict_type = type(kwargs) return dict_type([(six.text_type(k), v) for (k, v) in kwargs.items()]) return kwargs
[ "def", "_normalize_utf8_keys", "(", "kwargs", ")", ":", "if", "any", "(", "(", "(", "type", "(", "key", ")", "is", "six", ".", "binary_type", ")", "for", "key", "in", "kwargs", ".", "keys", "(", ")", ")", ")", ":", "dict_type", "=", "type", "(", "kwargs", ")", "return", "dict_type", "(", "[", "(", "six", ".", "text_type", "(", "k", ")", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "kwargs", ".", "items", "(", ")", "]", ")", "return", "kwargs" ]
when kwargs are passed literally in a source file .
train
true
9,129
def memorized_datetime(seconds): try: return _datetime_cache[seconds] except KeyError: dt = (_epoch + timedelta(seconds=seconds)) _datetime_cache[seconds] = dt return dt
[ "def", "memorized_datetime", "(", "seconds", ")", ":", "try", ":", "return", "_datetime_cache", "[", "seconds", "]", "except", "KeyError", ":", "dt", "=", "(", "_epoch", "+", "timedelta", "(", "seconds", "=", "seconds", ")", ")", "_datetime_cache", "[", "seconds", "]", "=", "dt", "return", "dt" ]
create only one instance of each distinct datetime .
train
true
9,130
def displayable_links_js(request): links = [] if (u'mezzanine.pages' in settings.INSTALLED_APPS): from mezzanine.pages.models import Page is_page = (lambda obj: isinstance(obj, Page)) else: is_page = (lambda obj: False) for (url, obj) in Displayable.objects.url_map(for_user=request.user).items(): title = getattr(obj, u'titles', obj.title) real = hasattr(obj, u'id') page = is_page(obj) if real: verbose_name = (_(u'Page') if page else obj._meta.verbose_name) title = (u'%s: %s' % (verbose_name, title)) links.append((((not page) and real), {u'title': str(title), u'value': url})) sorted_links = sorted(links, key=(lambda link: (link[0], link[1][u'value']))) return HttpResponse(dumps([link[1] for link in sorted_links]))
[ "def", "displayable_links_js", "(", "request", ")", ":", "links", "=", "[", "]", "if", "(", "u'mezzanine.pages'", "in", "settings", ".", "INSTALLED_APPS", ")", ":", "from", "mezzanine", ".", "pages", ".", "models", "import", "Page", "is_page", "=", "(", "lambda", "obj", ":", "isinstance", "(", "obj", ",", "Page", ")", ")", "else", ":", "is_page", "=", "(", "lambda", "obj", ":", "False", ")", "for", "(", "url", ",", "obj", ")", "in", "Displayable", ".", "objects", ".", "url_map", "(", "for_user", "=", "request", ".", "user", ")", ".", "items", "(", ")", ":", "title", "=", "getattr", "(", "obj", ",", "u'titles'", ",", "obj", ".", "title", ")", "real", "=", "hasattr", "(", "obj", ",", "u'id'", ")", "page", "=", "is_page", "(", "obj", ")", "if", "real", ":", "verbose_name", "=", "(", "_", "(", "u'Page'", ")", "if", "page", "else", "obj", ".", "_meta", ".", "verbose_name", ")", "title", "=", "(", "u'%s: %s'", "%", "(", "verbose_name", ",", "title", ")", ")", "links", ".", "append", "(", "(", "(", "(", "not", "page", ")", "and", "real", ")", ",", "{", "u'title'", ":", "str", "(", "title", ")", ",", "u'value'", ":", "url", "}", ")", ")", "sorted_links", "=", "sorted", "(", "links", ",", "key", "=", "(", "lambda", "link", ":", "(", "link", "[", "0", "]", ",", "link", "[", "1", "]", "[", "u'value'", "]", ")", ")", ")", "return", "HttpResponse", "(", "dumps", "(", "[", "link", "[", "1", "]", "for", "link", "in", "sorted_links", "]", ")", ")" ]
renders a list of url/title pairs for all displayable subclass instances into json thats used to populate a list of links in tinymce .
train
false
9,133
def render_include(content): content = cstr(content) for i in xrange(5): if (u'{% include' in content): paths = re.findall(u'{% include\\s[\'"](.*)[\'"]\\s%}', content) if (not paths): frappe.throw(u'Invalid include path', InvalidIncludePath) for path in paths: (app, app_path) = path.split(u'/', 1) with open(frappe.get_app_path(app, app_path), u'r') as f: include = unicode(f.read(), u'utf-8') if path.endswith(u'.html'): include = html_to_js_template(path, include) content = re.sub(u'{{% include\\s[\'"]{0}[\'"]\\s%}}'.format(path), include, content) else: break return content
[ "def", "render_include", "(", "content", ")", ":", "content", "=", "cstr", "(", "content", ")", "for", "i", "in", "xrange", "(", "5", ")", ":", "if", "(", "u'{% include'", "in", "content", ")", ":", "paths", "=", "re", ".", "findall", "(", "u'{% include\\\\s[\\'\"](.*)[\\'\"]\\\\s%}'", ",", "content", ")", "if", "(", "not", "paths", ")", ":", "frappe", ".", "throw", "(", "u'Invalid include path'", ",", "InvalidIncludePath", ")", "for", "path", "in", "paths", ":", "(", "app", ",", "app_path", ")", "=", "path", ".", "split", "(", "u'/'", ",", "1", ")", "with", "open", "(", "frappe", ".", "get_app_path", "(", "app", ",", "app_path", ")", ",", "u'r'", ")", "as", "f", ":", "include", "=", "unicode", "(", "f", ".", "read", "(", ")", ",", "u'utf-8'", ")", "if", "path", ".", "endswith", "(", "u'.html'", ")", ":", "include", "=", "html_to_js_template", "(", "path", ",", "include", ")", "content", "=", "re", ".", "sub", "(", "u'{{% include\\\\s[\\'\"]{0}[\\'\"]\\\\s%}}'", ".", "format", "(", "path", ")", ",", "include", ",", "content", ")", "else", ":", "break", "return", "content" ]
render {% raw %}{% include "app/path/filename" %}{% endraw %} in js file .
train
false
9,134
def minify_image(data, minify_to=(1200, 1600), preserve_aspect_ratio=True): img = _data_to_image(data) (owidth, oheight) = img.size (nwidth, nheight) = minify_to if ((owidth <= nwidth) and (oheight <= nheight)): return img if preserve_aspect_ratio: (scaled, nwidth, nheight) = fit_image(owidth, oheight, nwidth, nheight) img.size = (nwidth, nheight) return img
[ "def", "minify_image", "(", "data", ",", "minify_to", "=", "(", "1200", ",", "1600", ")", ",", "preserve_aspect_ratio", "=", "True", ")", ":", "img", "=", "_data_to_image", "(", "data", ")", "(", "owidth", ",", "oheight", ")", "=", "img", ".", "size", "(", "nwidth", ",", "nheight", ")", "=", "minify_to", "if", "(", "(", "owidth", "<=", "nwidth", ")", "and", "(", "oheight", "<=", "nheight", ")", ")", ":", "return", "img", "if", "preserve_aspect_ratio", ":", "(", "scaled", ",", "nwidth", ",", "nheight", ")", "=", "fit_image", "(", "owidth", ",", "oheight", ",", "nwidth", ",", "nheight", ")", "img", ".", "size", "=", "(", "nwidth", ",", "nheight", ")", "return", "img" ]
minify image to specified size if image is bigger than specified size and return minified image .
train
false
9,135
def parse_identity(identity): try: identity = _identities[identity] except KeyError: raise ValueError(('Invalid identity value %r' % (identity,))) return identity
[ "def", "parse_identity", "(", "identity", ")", ":", "try", ":", "identity", "=", "_identities", "[", "identity", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "(", "'Invalid identity value %r'", "%", "(", "identity", ",", ")", ")", ")", "return", "identity" ]
parse an identity value and return the corresponding low-level value for numpy .
train
false
9,137
def sumofsq(x, axis=0): return np.sum((x ** 2), axis=0)
[ "def", "sumofsq", "(", "x", ",", "axis", "=", "0", ")", ":", "return", "np", ".", "sum", "(", "(", "x", "**", "2", ")", ",", "axis", "=", "0", ")" ]
helper function to calculate sum of squares along first axis .
train
false
9,138
def oo_filter_list(data, filter_attr=None): if (not isinstance(data, list)): raise errors.AnsibleFilterError('|failed expects to filter on a list') if (not isinstance(filter_attr, string_types)): raise errors.AnsibleFilterError('|failed expects filter_attr is a str or unicode') return [x for x in data if ((filter_attr in x) and x[filter_attr])]
[ "def", "oo_filter_list", "(", "data", ",", "filter_attr", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "data", ",", "list", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects to filter on a list'", ")", "if", "(", "not", "isinstance", "(", "filter_attr", ",", "string_types", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects filter_attr is a str or unicode'", ")", "return", "[", "x", "for", "x", "in", "data", "if", "(", "(", "filter_attr", "in", "x", ")", "and", "x", "[", "filter_attr", "]", ")", "]" ]
this returns a list .
train
false
9,139
@transaction.non_atomic_requests @require_POST @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') def get_students_who_may_enroll(request, course_id): course_key = CourseKey.from_string(course_id) query_features = ['email'] try: lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv(request, course_key, query_features) success_status = _('The enrollment report is being created. This report contains information about learners who can enroll in the course. To view the status of the report, see Pending Tasks below.') return JsonResponse({'status': success_status}) except AlreadyRunningError: already_running_status = _('This enrollment report is currently being created. To view the status of the report, see Pending Tasks below. You will be able to download the report when it is complete.') return JsonResponse({'status': already_running_status})
[ "@", "transaction", ".", "non_atomic_requests", "@", "require_POST", "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "def", "get_students_who_may_enroll", "(", "request", ",", "course_id", ")", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "query_features", "=", "[", "'email'", "]", "try", ":", "lms", ".", "djangoapps", ".", "instructor_task", ".", "api", ".", "submit_calculate_may_enroll_csv", "(", "request", ",", "course_key", ",", "query_features", ")", "success_status", "=", "_", "(", "'The enrollment report is being created. This report contains information about learners who can enroll in the course. To view the status of the report, see Pending Tasks below.'", ")", "return", "JsonResponse", "(", "{", "'status'", ":", "success_status", "}", ")", "except", "AlreadyRunningError", ":", "already_running_status", "=", "_", "(", "'This enrollment report is currently being created. To view the status of the report, see Pending Tasks below. You will be able to download the report when it is complete.'", ")", "return", "JsonResponse", "(", "{", "'status'", ":", "already_running_status", "}", ")" ]
initiate generation of a csv file containing information about students who may enroll in a course .
train
false
9,140
def wrap_errors(errors, function, args, kwargs): try: return (True, function(*args, **kwargs)) except errors as ex: return (False, ex)
[ "def", "wrap_errors", "(", "errors", ",", "function", ",", "args", ",", "kwargs", ")", ":", "try", ":", "return", "(", "True", ",", "function", "(", "*", "args", ",", "**", "kwargs", ")", ")", "except", "errors", "as", "ex", ":", "return", "(", "False", ",", "ex", ")" ]
ensure errors are not passed along .
train
false
9,141
@check_job_permission def task_attempt_counters(request, job, taskid, attemptid): job_link = JobLinkage(request.jt, job.jobId) task = job_link.get_task(taskid) attempt = task.get_attempt(attemptid) counters = {} if attempt: counters = attempt.counters return render('counters.html', request, {'counters': counters})
[ "@", "check_job_permission", "def", "task_attempt_counters", "(", "request", ",", "job", ",", "taskid", ",", "attemptid", ")", ":", "job_link", "=", "JobLinkage", "(", "request", ".", "jt", ",", "job", ".", "jobId", ")", "task", "=", "job_link", ".", "get_task", "(", "taskid", ")", "attempt", "=", "task", ".", "get_attempt", "(", "attemptid", ")", "counters", "=", "{", "}", "if", "attempt", ":", "counters", "=", "attempt", ".", "counters", "return", "render", "(", "'counters.html'", ",", "request", ",", "{", "'counters'", ":", "counters", "}", ")" ]
we get here from /jobs/jobid/tasks/taskid/attempts/attemptid/counters .
train
false
9,142
def filter(names, pat): result = [] pat = os.path.normcase(pat) match = _compile_pattern(pat) if (os.path is posixpath): for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result
[ "def", "filter", "(", "names", ",", "pat", ")", ":", "result", "=", "[", "]", "pat", "=", "os", ".", "path", ".", "normcase", "(", "pat", ")", "match", "=", "_compile_pattern", "(", "pat", ")", "if", "(", "os", ".", "path", "is", "posixpath", ")", ":", "for", "name", "in", "names", ":", "if", "match", "(", "name", ")", ":", "result", ".", "append", "(", "name", ")", "else", ":", "for", "name", "in", "names", ":", "if", "match", "(", "os", ".", "path", ".", "normcase", "(", "name", ")", ")", ":", "result", ".", "append", "(", "name", ")", "return", "result" ]
filters out unwanted items using the specified function .
train
false
9,143
def from_time(year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None): def str_or_stars(i, length): if (i is None): return ('*' * length) else: return str(i).rjust(length, '0') wmi_time = '' wmi_time += str_or_stars(year, 4) wmi_time += str_or_stars(month, 2) wmi_time += str_or_stars(day, 2) wmi_time += str_or_stars(hours, 2) wmi_time += str_or_stars(minutes, 2) wmi_time += str_or_stars(seconds, 2) wmi_time += '.' wmi_time += str_or_stars(microseconds, 6) if (timezone is None): wmi_time += '+' else: try: int(timezone) except ValueError: wmi_time += '+' else: if (timezone >= 0): wmi_time += '+' else: wmi_time += '-' timezone = abs(timezone) wmi_time += str_or_stars(timezone, 3) return wmi_time
[ "def", "from_time", "(", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hours", "=", "None", ",", "minutes", "=", "None", ",", "seconds", "=", "None", ",", "microseconds", "=", "None", ",", "timezone", "=", "None", ")", ":", "def", "str_or_stars", "(", "i", ",", "length", ")", ":", "if", "(", "i", "is", "None", ")", ":", "return", "(", "'*'", "*", "length", ")", "else", ":", "return", "str", "(", "i", ")", ".", "rjust", "(", "length", ",", "'0'", ")", "wmi_time", "=", "''", "wmi_time", "+=", "str_or_stars", "(", "year", ",", "4", ")", "wmi_time", "+=", "str_or_stars", "(", "month", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "day", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "hours", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "minutes", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "seconds", ",", "2", ")", "wmi_time", "+=", "'.'", "wmi_time", "+=", "str_or_stars", "(", "microseconds", ",", "6", ")", "if", "(", "timezone", "is", "None", ")", ":", "wmi_time", "+=", "'+'", "else", ":", "try", ":", "int", "(", "timezone", ")", "except", "ValueError", ":", "wmi_time", "+=", "'+'", "else", ":", "if", "(", "timezone", ">=", "0", ")", ":", "wmi_time", "+=", "'+'", "else", ":", "wmi_time", "+=", "'-'", "timezone", "=", "abs", "(", "timezone", ")", "wmi_time", "+=", "str_or_stars", "(", "timezone", ",", "3", ")", "return", "wmi_time" ]
convenience wrapper to take a series of date/time elements and return a wmi time of the form yyyymmddhhmmss .
train
true
9,144
@utils.decorator def non_transactional(func, args, kwds, allow_existing=True): from . import tasklets ctx = tasklets.get_context() if (not ctx.in_transaction()): return func(*args, **kwds) if (not allow_existing): raise datastore_errors.BadRequestError(('%s cannot be called within a transaction.' % func.__name__)) save_ctx = ctx while ctx.in_transaction(): ctx = ctx._parent_context if (ctx is None): raise datastore_errors.BadRequestError('Context without non-transactional ancestor') save_ds_conn = datastore._GetConnection() try: if hasattr(save_ctx, '_old_ds_conn'): datastore._SetConnection(save_ctx._old_ds_conn) tasklets.set_context(ctx) return func(*args, **kwds) finally: tasklets.set_context(save_ctx) datastore._SetConnection(save_ds_conn)
[ "@", "utils", ".", "decorator", "def", "non_transactional", "(", "func", ",", "args", ",", "kwds", ",", "allow_existing", "=", "True", ")", ":", "from", ".", "import", "tasklets", "ctx", "=", "tasklets", ".", "get_context", "(", ")", "if", "(", "not", "ctx", ".", "in_transaction", "(", ")", ")", ":", "return", "func", "(", "*", "args", ",", "**", "kwds", ")", "if", "(", "not", "allow_existing", ")", ":", "raise", "datastore_errors", ".", "BadRequestError", "(", "(", "'%s cannot be called within a transaction.'", "%", "func", ".", "__name__", ")", ")", "save_ctx", "=", "ctx", "while", "ctx", ".", "in_transaction", "(", ")", ":", "ctx", "=", "ctx", ".", "_parent_context", "if", "(", "ctx", "is", "None", ")", ":", "raise", "datastore_errors", ".", "BadRequestError", "(", "'Context without non-transactional ancestor'", ")", "save_ds_conn", "=", "datastore", ".", "_GetConnection", "(", ")", "try", ":", "if", "hasattr", "(", "save_ctx", ",", "'_old_ds_conn'", ")", ":", "datastore", ".", "_SetConnection", "(", "save_ctx", ".", "_old_ds_conn", ")", "tasklets", ".", "set_context", "(", "ctx", ")", "return", "func", "(", "*", "args", ",", "**", "kwds", ")", "finally", ":", "tasklets", ".", "set_context", "(", "save_ctx", ")", "datastore", ".", "_SetConnection", "(", "save_ds_conn", ")" ]
a decorator that ensures a function is run outside a transaction .
train
true
9,145
def tokens(doc): return (tok.lower() for tok in re.findall('\\w+', doc))
[ "def", "tokens", "(", "doc", ")", ":", "return", "(", "tok", ".", "lower", "(", ")", "for", "tok", "in", "re", ".", "findall", "(", "'\\\\w+'", ",", "doc", ")", ")" ]
extract tokens from doc .
train
false
9,146
def CreateXsrfToken(action): user_str = _MakeUserStr() token = base64.b64encode(''.join((chr(int((random.random() * 255))) for _ in range(0, 64)))) memcache.set(token, (user_str, action), time=XSRF_VALIDITY_TIME, namespace=MEMCACHE_NAMESPACE) return token
[ "def", "CreateXsrfToken", "(", "action", ")", ":", "user_str", "=", "_MakeUserStr", "(", ")", "token", "=", "base64", ".", "b64encode", "(", "''", ".", "join", "(", "(", "chr", "(", "int", "(", "(", "random", ".", "random", "(", ")", "*", "255", ")", ")", ")", "for", "_", "in", "range", "(", "0", ",", "64", ")", ")", ")", ")", "memcache", ".", "set", "(", "token", ",", "(", "user_str", ",", "action", ")", ",", "time", "=", "XSRF_VALIDITY_TIME", ",", "namespace", "=", "MEMCACHE_NAMESPACE", ")", "return", "token" ]
generate a token to be passed with a form for xsrf protection .
train
false
9,147
def parse_redaction_policy_from_file(filename): with open(filename) as f: s = f.read().strip() if (not s): return RedactionPolicy([]) scheme = json.loads(s) try: version = str(scheme.pop('version')) except KeyError: raise ValueError('Redaction policy is missing `version` field') if (version != '1'): raise ValueError(('unknown version `%s`' % version)) try: rules = scheme.pop('rules') except KeyError: raise ValueError('Redaction policy is missing `rules` field') rules = [parse_one_rule_from_dict(rule) for rule in rules] if scheme: raise ValueError(('Redaction policy contains unknown field(s): %s' % scheme.keys())) return RedactionPolicy(rules)
[ "def", "parse_redaction_policy_from_file", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "s", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "(", "not", "s", ")", ":", "return", "RedactionPolicy", "(", "[", "]", ")", "scheme", "=", "json", ".", "loads", "(", "s", ")", "try", ":", "version", "=", "str", "(", "scheme", ".", "pop", "(", "'version'", ")", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "'Redaction policy is missing `version` field'", ")", "if", "(", "version", "!=", "'1'", ")", ":", "raise", "ValueError", "(", "(", "'unknown version `%s`'", "%", "version", ")", ")", "try", ":", "rules", "=", "scheme", ".", "pop", "(", "'rules'", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "'Redaction policy is missing `rules` field'", ")", "rules", "=", "[", "parse_one_rule_from_dict", "(", "rule", ")", "for", "rule", "in", "rules", "]", "if", "scheme", ":", "raise", "ValueError", "(", "(", "'Redaction policy contains unknown field(s): %s'", "%", "scheme", ".", "keys", "(", ")", ")", ")", "return", "RedactionPolicy", "(", "rules", ")" ]
parse a file into a redactionpolicy .
train
false
9,148
def strip(path, count): path = '/'.join(path.split(os.sep)) return path.split('/', count)[(-1)]
[ "def", "strip", "(", "path", ",", "count", ")", ":", "path", "=", "'/'", ".", "join", "(", "path", ".", "split", "(", "os", ".", "sep", ")", ")", "return", "path", ".", "split", "(", "'/'", ",", "count", ")", "[", "(", "-", "1", ")", "]" ]
strip -> string return a copy of the string s with leading and trailing whitespace removed .
train
false
9,149
@conf.commands.register def is_promisc(ip, fake_bcast='ff:ff:00:00:00:00', **kargs): responses = srp1((Ether(dst=fake_bcast) / ARP(op='who-has', pdst=ip)), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) return (responses is not None)
[ "@", "conf", ".", "commands", ".", "register", "def", "is_promisc", "(", "ip", ",", "fake_bcast", "=", "'ff:ff:00:00:00:00'", ",", "**", "kargs", ")", ":", "responses", "=", "srp1", "(", "(", "Ether", "(", "dst", "=", "fake_bcast", ")", "/", "ARP", "(", "op", "=", "'who-has'", ",", "pdst", "=", "ip", ")", ")", ",", "type", "=", "ETH_P_ARP", ",", "iface_hint", "=", "ip", ",", "timeout", "=", "1", ",", "verbose", "=", "0", ",", "**", "kargs", ")", "return", "(", "responses", "is", "not", "None", ")" ]
try to guess if target is in promisc mode .
train
true
9,150
def auto_model(model): return getattr(model._meta, 'auto_created', False)
[ "def", "auto_model", "(", "model", ")", ":", "return", "getattr", "(", "model", ".", "_meta", ",", "'auto_created'", ",", "False", ")" ]
returns if the given model was automatically generated .
train
false
9,151
def test_replace_update_column_via_setitem_warnings_refcount(): t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert (len(w) == 1) assert ("replaced column 'a' and the number of references" in str(w[0].message))
[ "def", "test_replace_update_column_via_setitem_warnings_refcount", "(", ")", ":", "t", "=", "table", ".", "Table", "(", "[", "[", "1", ",", "2", ",", "3", "]", ",", "[", "4", ",", "5", ",", "6", "]", "]", ",", "names", "=", "[", "'a'", ",", "'b'", "]", ")", "ta", "=", "t", "[", "'a'", "]", "with", "catch_warnings", "(", ")", "as", "w", ":", "with", "table", ".", "conf", ".", "set_temp", "(", "'replace_warnings'", ",", "[", "'refcount'", ",", "'attributes'", ",", "'slice'", "]", ")", ":", "t", "[", "'a'", "]", "=", "[", "10", ",", "20", ",", "30", "]", "assert", "(", "len", "(", "w", ")", "==", "1", ")", "assert", "(", "\"replaced column 'a' and the number of references\"", "in", "str", "(", "w", "[", "0", "]", ".", "message", ")", ")" ]
test warnings related to table replace change in #5556: reference count changes .
train
false
9,152
def validate_manifest(filepath): manifest_data = return_json(filepath) dependencies = manifest_data['dependencies'] for (_, dependency) in dependencies.items(): for (_, dependency_contents) in dependency.items(): if ('downloadFormat' not in dependency_contents): raise Exception(('downloadFormat not specified in %s' % dependency_contents)) download_format = dependency_contents['downloadFormat'] test_manifest_syntax(download_format, dependency_contents)
[ "def", "validate_manifest", "(", "filepath", ")", ":", "manifest_data", "=", "return_json", "(", "filepath", ")", "dependencies", "=", "manifest_data", "[", "'dependencies'", "]", "for", "(", "_", ",", "dependency", ")", "in", "dependencies", ".", "items", "(", ")", ":", "for", "(", "_", ",", "dependency_contents", ")", "in", "dependency", ".", "items", "(", ")", ":", "if", "(", "'downloadFormat'", "not", "in", "dependency_contents", ")", ":", "raise", "Exception", "(", "(", "'downloadFormat not specified in %s'", "%", "dependency_contents", ")", ")", "download_format", "=", "dependency_contents", "[", "'downloadFormat'", "]", "test_manifest_syntax", "(", "download_format", ",", "dependency_contents", ")" ]
this validates syntax of the manifest .
train
false
9,153
def get_build_maps(): build_maps = {} for app_path in app_paths: path = os.path.join(app_path, u'public', u'build.json') if os.path.exists(path): with open(path) as f: try: for (target, sources) in json.loads(f.read()).iteritems(): source_paths = [] for source in sources: if isinstance(source, list): s = frappe.get_pymodule_path(source[0], *source[1].split(u'/')) else: s = os.path.join(app_path, source) source_paths.append(s) build_maps[target] = source_paths except ValueError as e: print path print u'JSON syntax error {0}'.format(str(e)) return build_maps
[ "def", "get_build_maps", "(", ")", ":", "build_maps", "=", "{", "}", "for", "app_path", "in", "app_paths", ":", "path", "=", "os", ".", "path", ".", "join", "(", "app_path", ",", "u'public'", ",", "u'build.json'", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "try", ":", "for", "(", "target", ",", "sources", ")", "in", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", ".", "iteritems", "(", ")", ":", "source_paths", "=", "[", "]", "for", "source", "in", "sources", ":", "if", "isinstance", "(", "source", ",", "list", ")", ":", "s", "=", "frappe", ".", "get_pymodule_path", "(", "source", "[", "0", "]", ",", "*", "source", "[", "1", "]", ".", "split", "(", "u'/'", ")", ")", "else", ":", "s", "=", "os", ".", "path", ".", "join", "(", "app_path", ",", "source", ")", "source_paths", ".", "append", "(", "s", ")", "build_maps", "[", "target", "]", "=", "source_paths", "except", "ValueError", "as", "e", ":", "print", "path", "print", "u'JSON syntax error {0}'", ".", "format", "(", "str", "(", "e", ")", ")", "return", "build_maps" ]
get all build .
train
false
9,156
def safe_float(x): if ('?' in x): return np.nan else: return float(x)
[ "def", "safe_float", "(", "x", ")", ":", "if", "(", "'?'", "in", "x", ")", ":", "return", "np", ".", "nan", "else", ":", "return", "float", "(", "x", ")" ]
given a string x .
train
false
9,157
def vsstrrepr(expr, **settings): p = VectorStrReprPrinter(settings) return p.doprint(expr)
[ "def", "vsstrrepr", "(", "expr", ",", "**", "settings", ")", ":", "p", "=", "VectorStrReprPrinter", "(", "settings", ")", "return", "p", ".", "doprint", "(", "expr", ")" ]
function for displaying expression representations with vector printing enabled .
train
false
9,158
@register.tag def recursetree(parser, token): bits = token.contents.split() if (len(bits) != 2): raise template.TemplateSyntaxError((_(u'%s tag requires a queryset') % bits[0])) queryset_var = template.Variable(bits[1]) template_nodes = parser.parse((u'endrecursetree',)) parser.delete_first_token() return RecurseTreeNode(template_nodes, queryset_var)
[ "@", "register", ".", "tag", "def", "recursetree", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "(", "len", "(", "bits", ")", "!=", "2", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "_", "(", "u'%s tag requires a queryset'", ")", "%", "bits", "[", "0", "]", ")", ")", "queryset_var", "=", "template", ".", "Variable", "(", "bits", "[", "1", "]", ")", "template_nodes", "=", "parser", ".", "parse", "(", "(", "u'endrecursetree'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "RecurseTreeNode", "(", "template_nodes", ",", "queryset_var", ")" ]
iterates over the nodes in the tree .
train
false
9,159
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0): if (keyword in diffs): vals = diffs[keyword] for (idx, val) in enumerate(vals): if (val is None): continue if (idx == 0): dup = '' else: dup = '[{}]'.format((idx + 1)) fileobj.write(indent(u(' Keyword {:8}{} has different {}:\n').format(keyword, dup, attr), ind)) report_diff_values(fileobj, val[0], val[1], ind=(ind + 1))
[ "def", "report_diff_keyword_attr", "(", "fileobj", ",", "attr", ",", "diffs", ",", "keyword", ",", "ind", "=", "0", ")", ":", "if", "(", "keyword", "in", "diffs", ")", ":", "vals", "=", "diffs", "[", "keyword", "]", "for", "(", "idx", ",", "val", ")", "in", "enumerate", "(", "vals", ")", ":", "if", "(", "val", "is", "None", ")", ":", "continue", "if", "(", "idx", "==", "0", ")", ":", "dup", "=", "''", "else", ":", "dup", "=", "'[{}]'", ".", "format", "(", "(", "idx", "+", "1", ")", ")", "fileobj", ".", "write", "(", "indent", "(", "u", "(", "' Keyword {:8}{} has different {}:\\n'", ")", ".", "format", "(", "keyword", ",", "dup", ",", "attr", ")", ",", "ind", ")", ")", "report_diff_values", "(", "fileobj", ",", "val", "[", "0", "]", ",", "val", "[", "1", "]", ",", "ind", "=", "(", "ind", "+", "1", ")", ")" ]
write a diff between two header keyword values or comments to the specified file-like object .
train
false
9,160
@contextlib.contextmanager def _ignore_deprecated_imports(ignore=True): if ignore: with warnings.catch_warnings(): warnings.filterwarnings('ignore', '.+ (module|package)', DeprecationWarning) (yield) else: (yield)
[ "@", "contextlib", ".", "contextmanager", "def", "_ignore_deprecated_imports", "(", "ignore", "=", "True", ")", ":", "if", "ignore", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "'.+ (module|package)'", ",", "DeprecationWarning", ")", "(", "yield", ")", "else", ":", "(", "yield", ")" ]
context manager to suppress package and module deprecation warnings when importing them .
train
false
9,162
def avg_pool_3d(incoming, kernel_size, strides=None, padding='same', name='AvgPool3D'): input_shape = utils.get_incoming_shape(incoming) assert (len(input_shape) == 5), 'Incoming Tensor shape must be 5-D' kernel = utils.autoformat_kernel_3d(kernel_size) strides = utils.autoformat_stride_3d(strides) padding = utils.autoformat_padding(padding) with tf.name_scope(name) as scope: inference = tf.nn.avg_pool3d(incoming, kernel, strides, padding) tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference) inference.scope = scope tf.add_to_collection(((tf.GraphKeys.LAYER_TENSOR + '/') + name), inference) return inference
[ "def", "avg_pool_3d", "(", "incoming", ",", "kernel_size", ",", "strides", "=", "None", ",", "padding", "=", "'same'", ",", "name", "=", "'AvgPool3D'", ")", ":", "input_shape", "=", "utils", ".", "get_incoming_shape", "(", "incoming", ")", "assert", "(", "len", "(", "input_shape", ")", "==", "5", ")", ",", "'Incoming Tensor shape must be 5-D'", "kernel", "=", "utils", ".", "autoformat_kernel_3d", "(", "kernel_size", ")", "strides", "=", "utils", ".", "autoformat_stride_3d", "(", "strides", ")", "padding", "=", "utils", ".", "autoformat_padding", "(", "padding", ")", "with", "tf", ".", "name_scope", "(", "name", ")", "as", "scope", ":", "inference", "=", "tf", ".", "nn", ".", "avg_pool3d", "(", "incoming", ",", "kernel", ",", "strides", ",", "padding", ")", "tf", ".", "add_to_collection", "(", "tf", ".", "GraphKeys", ".", "ACTIVATIONS", ",", "inference", ")", "inference", ".", "scope", "=", "scope", "tf", ".", "add_to_collection", "(", "(", "(", "tf", ".", "GraphKeys", ".", "LAYER_TENSOR", "+", "'/'", ")", "+", "name", ")", ",", "inference", ")", "return", "inference" ]
average pooling 3d .
train
false
9,163
def bfs_predecessors(G, source): for (s, t) in bfs_edges(G, source): (yield (t, s))
[ "def", "bfs_predecessors", "(", "G", ",", "source", ")", ":", "for", "(", "s", ",", "t", ")", "in", "bfs_edges", "(", "G", ",", "source", ")", ":", "(", "yield", "(", "t", ",", "s", ")", ")" ]
returns an iterator of predecessors in breadth-first-search from source .
train
false
9,164
def test_empty(empty_history): items = [] (stream, _data, user_data) = tabhistory.serialize(items) qtutils.deserialize_stream(stream, empty_history) assert (empty_history.count() == 0) assert (empty_history.currentItemIndex() == 0) assert (not user_data)
[ "def", "test_empty", "(", "empty_history", ")", ":", "items", "=", "[", "]", "(", "stream", ",", "_data", ",", "user_data", ")", "=", "tabhistory", ".", "serialize", "(", "items", ")", "qtutils", ".", "deserialize_stream", "(", "stream", ",", "empty_history", ")", "assert", "(", "empty_history", ".", "count", "(", ")", "==", "0", ")", "assert", "(", "empty_history", ".", "currentItemIndex", "(", ")", "==", "0", ")", "assert", "(", "not", "user_data", ")" ]
check tabhistory .
train
false
9,165
def ruby_metrics(registry, xml_parent, data): metrics = XML.SubElement(xml_parent, 'hudson.plugins.rubyMetrics.rcov.RcovPublisher') report_dir = data.get('report-dir', '') XML.SubElement(metrics, 'reportDir').text = report_dir targets = XML.SubElement(metrics, 'targets') if ('target' in data): for t in data['target']: if (not (('code-coverage' in t) or ('total-coverage' in t))): raise JenkinsJobsException('Unrecognized target name') el = XML.SubElement(targets, 'hudson.plugins.rubyMetrics.rcov.model.MetricTarget') if ('total-coverage' in t): XML.SubElement(el, 'metric').text = 'TOTAL_COVERAGE' else: XML.SubElement(el, 'metric').text = 'CODE_COVERAGE' for (threshold_name, threshold_value) in next(iter(t.values())).items(): elname = threshold_name.lower() XML.SubElement(el, elname).text = str(threshold_value) else: raise JenkinsJobsException('Coverage metric targets must be set')
[ "def", "ruby_metrics", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "metrics", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.rubyMetrics.rcov.RcovPublisher'", ")", "report_dir", "=", "data", ".", "get", "(", "'report-dir'", ",", "''", ")", "XML", ".", "SubElement", "(", "metrics", ",", "'reportDir'", ")", ".", "text", "=", "report_dir", "targets", "=", "XML", ".", "SubElement", "(", "metrics", ",", "'targets'", ")", "if", "(", "'target'", "in", "data", ")", ":", "for", "t", "in", "data", "[", "'target'", "]", ":", "if", "(", "not", "(", "(", "'code-coverage'", "in", "t", ")", "or", "(", "'total-coverage'", "in", "t", ")", ")", ")", ":", "raise", "JenkinsJobsException", "(", "'Unrecognized target name'", ")", "el", "=", "XML", ".", "SubElement", "(", "targets", ",", "'hudson.plugins.rubyMetrics.rcov.model.MetricTarget'", ")", "if", "(", "'total-coverage'", "in", "t", ")", ":", "XML", ".", "SubElement", "(", "el", ",", "'metric'", ")", ".", "text", "=", "'TOTAL_COVERAGE'", "else", ":", "XML", ".", "SubElement", "(", "el", ",", "'metric'", ")", ".", "text", "=", "'CODE_COVERAGE'", "for", "(", "threshold_name", ",", "threshold_value", ")", "in", "next", "(", "iter", "(", "t", ".", "values", "(", ")", ")", ")", ".", "items", "(", ")", ":", "elname", "=", "threshold_name", ".", "lower", "(", ")", "XML", ".", "SubElement", "(", "el", ",", "elname", ")", ".", "text", "=", "str", "(", "threshold_value", ")", "else", ":", "raise", "JenkinsJobsException", "(", "'Coverage metric targets must be set'", ")" ]
yaml: ruby-metrics rcov plugin parses rcov html report files and shows it in jenkins with a trend graph .
train
false
9,167
def test_stratified_validation_k_fold(): skip_if_no_sklearn() from pylearn2.cross_validation.subset_iterators import StratifiedValidationKFold n = 30 y = np.concatenate((np.zeros((n / 2), dtype=int), np.ones((n / 2), dtype=int))) cv = StratifiedValidationKFold(y) for (train, valid, test) in cv: assert (np.unique(np.concatenate((train, valid, test))).size == n) assert (valid.size == (n / cv.n_folds)) assert (test.size == (n / cv.n_folds)) assert (np.count_nonzero(y[valid]) == ((n / 2) * (1.0 / cv.n_folds))) assert (np.count_nonzero(y[test]) == ((n / 2) * (1.0 / cv.n_folds)))
[ "def", "test_stratified_validation_k_fold", "(", ")", ":", "skip_if_no_sklearn", "(", ")", "from", "pylearn2", ".", "cross_validation", ".", "subset_iterators", "import", "StratifiedValidationKFold", "n", "=", "30", "y", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "n", "/", "2", ")", ",", "dtype", "=", "int", ")", ",", "np", ".", "ones", "(", "(", "n", "/", "2", ")", ",", "dtype", "=", "int", ")", ")", ")", "cv", "=", "StratifiedValidationKFold", "(", "y", ")", "for", "(", "train", ",", "valid", ",", "test", ")", "in", "cv", ":", "assert", "(", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "(", "train", ",", "valid", ",", "test", ")", ")", ")", ".", "size", "==", "n", ")", "assert", "(", "valid", ".", "size", "==", "(", "n", "/", "cv", ".", "n_folds", ")", ")", "assert", "(", "test", ".", "size", "==", "(", "n", "/", "cv", ".", "n_folds", ")", ")", "assert", "(", "np", ".", "count_nonzero", "(", "y", "[", "valid", "]", ")", "==", "(", "(", "n", "/", "2", ")", "*", "(", "1.0", "/", "cv", ".", "n_folds", ")", ")", ")", "assert", "(", "np", ".", "count_nonzero", "(", "y", "[", "test", "]", ")", "==", "(", "(", "n", "/", "2", ")", "*", "(", "1.0", "/", "cv", ".", "n_folds", ")", ")", ")" ]
test stratifiedvalidationkfold .
train
false
9,168
def is_auth(nodes): cmd = ['pcs', 'cluster', 'auth'] cmd += nodes return __salt__['cmd.run_all'](cmd, stdin='\n\n', output_loglevel='trace', python_shell=False)
[ "def", "is_auth", "(", "nodes", ")", ":", "cmd", "=", "[", "'pcs'", ",", "'cluster'", ",", "'auth'", "]", "cmd", "+=", "nodes", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "stdin", "=", "'\\n\\n'", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")" ]
check if nodes are already authorized nodes a list of nodes to be checked for authorization to the cluster cli example: .
train
true