id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
42,392
def countZipFileEntries(filename): warnings.warn('countZipFileEntries is deprecated.', DeprecationWarning, 2) zf = zipfile.ZipFile(filename) return len(zf.namelist())
[ "def", "countZipFileEntries", "(", "filename", ")", ":", "warnings", ".", "warn", "(", "'countZipFileEntries is deprecated.'", ",", "DeprecationWarning", ",", "2", ")", "zf", "=", "zipfile", ".", "ZipFile", "(", "filename", ")", "return", "len", "(", "zf", ".", "namelist", "(", ")", ")" ]
count the number of entries in a zip archive .
train
false
42,393
def get_atomic_groups(**filter_data): return rpc_utils.prepare_for_serialization(models.AtomicGroup.list_objects(filter_data))
[ "def", "get_atomic_groups", "(", "**", "filter_data", ")", ":", "return", "rpc_utils", ".", "prepare_for_serialization", "(", "models", ".", "AtomicGroup", ".", "list_objects", "(", "filter_data", ")", ")" ]
get atomic groups .
train
false
42,394
def _format_row_with_out_of_dateness(readout_locale, eng_slug, eng_title, slug, title, visits, significance, needs_review): if slug: locale = readout_locale if needs_review: (status, view_name, status_class) = REVIEW_STATUSES[needs_review] else: (status, view_name, status_class) = SIGNIFICANCE_STATUSES.get(significance, REVIEW_STATUSES[needs_review]) status_url = (reverse(view_name, args=[slug], locale=locale) if view_name else '') else: slug = eng_slug title = eng_title locale = settings.WIKI_DEFAULT_LANGUAGE status = _(u'Translation Needed') status_url = reverse('wiki.translate', args=[slug], locale=readout_locale) status_class = 'untranslated' return dict(title=title, url=reverse('wiki.document', args=[slug], locale=locale), visits=visits, status=status, status_class=status_class, status_url=status_url)
[ "def", "_format_row_with_out_of_dateness", "(", "readout_locale", ",", "eng_slug", ",", "eng_title", ",", "slug", ",", "title", ",", "visits", ",", "significance", ",", "needs_review", ")", ":", "if", "slug", ":", "locale", "=", "readout_locale", "if", "needs_review", ":", "(", "status", ",", "view_name", ",", "status_class", ")", "=", "REVIEW_STATUSES", "[", "needs_review", "]", "else", ":", "(", "status", ",", "view_name", ",", "status_class", ")", "=", "SIGNIFICANCE_STATUSES", ".", "get", "(", "significance", ",", "REVIEW_STATUSES", "[", "needs_review", "]", ")", "status_url", "=", "(", "reverse", "(", "view_name", ",", "args", "=", "[", "slug", "]", ",", "locale", "=", "locale", ")", "if", "view_name", "else", "''", ")", "else", ":", "slug", "=", "eng_slug", "title", "=", "eng_title", "locale", "=", "settings", ".", "WIKI_DEFAULT_LANGUAGE", "status", "=", "_", "(", "u'Translation Needed'", ")", "status_url", "=", "reverse", "(", "'wiki.translate'", ",", "args", "=", "[", "slug", "]", ",", "locale", "=", "readout_locale", ")", "status_class", "=", "'untranslated'", "return", "dict", "(", "title", "=", "title", ",", "url", "=", "reverse", "(", "'wiki.document'", ",", "args", "=", "[", "slug", "]", ",", "locale", "=", "locale", ")", ",", "visits", "=", "visits", ",", "status", "=", "status", ",", "status_class", "=", "status_class", ",", "status_url", "=", "status_url", ")" ]
format a row for a readout that has the traffic-light-style categorization of how seriously out of date a translation is .
train
false
42,398
def MakePmfFromItems(t, label=None): return Pmf(dict(t), label=label)
[ "def", "MakePmfFromItems", "(", "t", ",", "label", "=", "None", ")", ":", "return", "Pmf", "(", "dict", "(", "t", ")", ",", "label", "=", "label", ")" ]
makes a pmf from a sequence of value-probability pairs args: t: sequence of value-probability pairs label: string label for this pmf returns: pmf object .
train
false
42,399
def restore_saved_module(module): if (not (module in monkey.saved)): return _module = __import__(module) for attr in monkey.saved[module]: if hasattr(_module, attr): setattr(_module, attr, monkey.saved[module][attr])
[ "def", "restore_saved_module", "(", "module", ")", ":", "if", "(", "not", "(", "module", "in", "monkey", ".", "saved", ")", ")", ":", "return", "_module", "=", "__import__", "(", "module", ")", "for", "attr", "in", "monkey", ".", "saved", "[", "module", "]", ":", "if", "hasattr", "(", "_module", ",", "attr", ")", ":", "setattr", "(", "_module", ",", "attr", ",", "monkey", ".", "saved", "[", "module", "]", "[", "attr", "]", ")" ]
gevent monkey patch keeps a list of all patched modules .
train
false
42,401
def get_machine_zone(): return get_metadata_path('instance/zone').split('/')[(-1)]
[ "def", "get_machine_zone", "(", ")", ":", "return", "get_metadata_path", "(", "'instance/zone'", ")", ".", "split", "(", "'/'", ")", "[", "(", "-", "1", ")", "]" ]
returns the zone that the gce instance running this code is running within .
train
false
42,402
def scriptpath(scriptname='interleave-reads.py'): path = os.path.join(os.path.dirname(__file__), '../scripts') if os.path.exists(os.path.join(path, scriptname)): return path path = os.path.join(os.path.dirname(__file__), '../../EGG-INFO/scripts') if os.path.exists(os.path.join(path, scriptname)): return path for path in os.environ['PATH'].split(':'): if os.path.exists(os.path.join(path, scriptname)): return path
[ "def", "scriptpath", "(", "scriptname", "=", "'interleave-reads.py'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'../scripts'", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "scriptname", ")", ")", ":", "return", "path", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'../../EGG-INFO/scripts'", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "scriptname", ")", ")", ":", "return", "path", "for", "path", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "':'", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "scriptname", ")", ")", ":", "return", "path" ]
return the path to the scripts .
train
false
42,404
def task_state(args): dag = get_dag(args) task = dag.get_task(task_id=args.task_id) ti = TaskInstance(task, args.execution_date) print(ti.current_state())
[ "def", "task_state", "(", "args", ")", ":", "dag", "=", "get_dag", "(", "args", ")", "task", "=", "dag", ".", "get_task", "(", "task_id", "=", "args", ".", "task_id", ")", "ti", "=", "TaskInstance", "(", "task", ",", "args", ".", "execution_date", ")", "print", "(", "ti", ".", "current_state", "(", ")", ")" ]
returns the state of a taskinstance at the command line .
train
true
42,405
def format_as(use_list, use_tuple, outputs): assert (not (use_list and use_tuple)), 'Both flags cannot be simultaneously True' if ((use_list or use_tuple) and (not isinstance(outputs, (list, tuple)))): if use_list: return [outputs] else: return (outputs,) elif ((not (use_list or use_tuple)) and isinstance(outputs, (list, tuple))): assert (len(outputs) == 1), 'Wrong arguments. Expected a one element list' return outputs[0] elif (use_list or use_tuple): if use_list: return list(outputs) else: return tuple(outputs) else: return outputs
[ "def", "format_as", "(", "use_list", ",", "use_tuple", ",", "outputs", ")", ":", "assert", "(", "not", "(", "use_list", "and", "use_tuple", ")", ")", ",", "'Both flags cannot be simultaneously True'", "if", "(", "(", "use_list", "or", "use_tuple", ")", "and", "(", "not", "isinstance", "(", "outputs", ",", "(", "list", ",", "tuple", ")", ")", ")", ")", ":", "if", "use_list", ":", "return", "[", "outputs", "]", "else", ":", "return", "(", "outputs", ",", ")", "elif", "(", "(", "not", "(", "use_list", "or", "use_tuple", ")", ")", "and", "isinstance", "(", "outputs", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "assert", "(", "len", "(", "outputs", ")", "==", "1", ")", ",", "'Wrong arguments. Expected a one element list'", "return", "outputs", "[", "0", "]", "elif", "(", "use_list", "or", "use_tuple", ")", ":", "if", "use_list", ":", "return", "list", "(", "outputs", ")", "else", ":", "return", "tuple", "(", "outputs", ")", "else", ":", "return", "outputs" ]
formats the outputs according to the flags use_list and use_tuple .
train
false
42,406
def make_increasing_ohlc(open, high, low, close, dates, **kwargs): (flat_increase_x, flat_increase_y, text_increase) = _OHLC(open, high, low, close, dates).get_increase() if ('name' in kwargs): showlegend = True else: kwargs.setdefault('name', 'Increasing') showlegend = False kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR, width=1)) kwargs.setdefault('text', text_increase) ohlc_incr = dict(type='scatter', x=flat_increase_x, y=flat_increase_y, mode='lines', showlegend=showlegend, **kwargs) return ohlc_incr
[ "def", "make_increasing_ohlc", "(", "open", ",", "high", ",", "low", ",", "close", ",", "dates", ",", "**", "kwargs", ")", ":", "(", "flat_increase_x", ",", "flat_increase_y", ",", "text_increase", ")", "=", "_OHLC", "(", "open", ",", "high", ",", "low", ",", "close", ",", "dates", ")", ".", "get_increase", "(", ")", "if", "(", "'name'", "in", "kwargs", ")", ":", "showlegend", "=", "True", "else", ":", "kwargs", ".", "setdefault", "(", "'name'", ",", "'Increasing'", ")", "showlegend", "=", "False", "kwargs", ".", "setdefault", "(", "'line'", ",", "dict", "(", "color", "=", "_DEFAULT_INCREASING_COLOR", ",", "width", "=", "1", ")", ")", "kwargs", ".", "setdefault", "(", "'text'", ",", "text_increase", ")", "ohlc_incr", "=", "dict", "(", "type", "=", "'scatter'", ",", "x", "=", "flat_increase_x", ",", "y", "=", "flat_increase_y", ",", "mode", "=", "'lines'", ",", "showlegend", "=", "showlegend", ",", "**", "kwargs", ")", "return", "ohlc_incr" ]
makes increasing ohlc sticks _make_increasing_ohlc() and _make_decreasing_ohlc separate the increasing trace from the decreasing trace so kwargs can be passed separately to increasing or decreasing traces when direction is set to increasing or decreasing in figurefactory .
train
false
42,407
def elevation_along_path(client, path, samples): if (type(path) is str): path = ('enc:%s' % path) else: path = convert.shortest_path(path) params = {'path': path, 'samples': samples} return client._get('/maps/api/elevation/json', params)['results']
[ "def", "elevation_along_path", "(", "client", ",", "path", ",", "samples", ")", ":", "if", "(", "type", "(", "path", ")", "is", "str", ")", ":", "path", "=", "(", "'enc:%s'", "%", "path", ")", "else", ":", "path", "=", "convert", ".", "shortest_path", "(", "path", ")", "params", "=", "{", "'path'", ":", "path", ",", "'samples'", ":", "samples", "}", "return", "client", ".", "_get", "(", "'/maps/api/elevation/json'", ",", "params", ")", "[", "'results'", "]" ]
provides elevation data sampled along a path on the surface of the earth .
train
false
42,408
def do_not_report_as_logging_caller(func): _caller_code_to_skip_in_logging_stack.add(func.func_code) return func
[ "def", "do_not_report_as_logging_caller", "(", "func", ")", ":", "_caller_code_to_skip_in_logging_stack", ".", "add", "(", "func", ".", "func_code", ")", "return", "func" ]
decorator to annotate functions we will tell logging not to log .
train
false
42,409
def toggle_menuclass(cssclass='pressed', menuid='headermenu'): positions = dict(index='', what='-108px -115px', download='-211px -115px', who='-315px -115px', support='-418px -115px', documentation='-520px -115px') if (request.function in positions.keys()): jscript = ("\n <script>\n $(document).ready(function(){\n $('.%(menuid)s a').removeClass('%(cssclass)s');\n $('.%(function)s').toggleClass('%(cssclass)s').css('background-position','%(cssposition)s')\n\n });\n </script>\n " % dict(cssclass=cssclass, menuid=menuid, function=request.function, cssposition=positions[request.function])) return XML(jscript) else: return ''
[ "def", "toggle_menuclass", "(", "cssclass", "=", "'pressed'", ",", "menuid", "=", "'headermenu'", ")", ":", "positions", "=", "dict", "(", "index", "=", "''", ",", "what", "=", "'-108px -115px'", ",", "download", "=", "'-211px -115px'", ",", "who", "=", "'-315px -115px'", ",", "support", "=", "'-418px -115px'", ",", "documentation", "=", "'-520px -115px'", ")", "if", "(", "request", ".", "function", "in", "positions", ".", "keys", "(", ")", ")", ":", "jscript", "=", "(", "\"\\n <script>\\n $(document).ready(function(){\\n $('.%(menuid)s a').removeClass('%(cssclass)s');\\n $('.%(function)s').toggleClass('%(cssclass)s').css('background-position','%(cssposition)s')\\n\\n });\\n </script>\\n \"", "%", "dict", "(", "cssclass", "=", "cssclass", ",", "menuid", "=", "menuid", ",", "function", "=", "request", ".", "function", ",", "cssposition", "=", "positions", "[", "request", ".", "function", "]", ")", ")", "return", "XML", "(", "jscript", ")", "else", ":", "return", "''" ]
this function changes the menu class to put pressed appearance .
train
false
42,411
def _format_lineno(session, line): if (session == 0): return str(line) return ('%s#%s' % (session, line))
[ "def", "_format_lineno", "(", "session", ",", "line", ")", ":", "if", "(", "session", "==", "0", ")", ":", "return", "str", "(", "line", ")", "return", "(", "'%s#%s'", "%", "(", "session", ",", "line", ")", ")" ]
helper function to format line numbers properly .
train
false
42,412
def format_rfc3339(datetime_instance=None): return (datetime_instance.isoformat('T') + 'Z')
[ "def", "format_rfc3339", "(", "datetime_instance", "=", "None", ")", ":", "return", "(", "datetime_instance", ".", "isoformat", "(", "'T'", ")", "+", "'Z'", ")" ]
formats a datetime per rfc 3339 .
train
false
42,413
def lazystr(text): from django.utils.encoding import force_text return lazy(force_text, str)(text)
[ "def", "lazystr", "(", "text", ")", ":", "from", "django", ".", "utils", ".", "encoding", "import", "force_text", "return", "lazy", "(", "force_text", ",", "str", ")", "(", "text", ")" ]
shortcut for the common case of a lazy callable that returns str .
train
false
42,414
def _raise_mod_power(x, s, p, f): from sympy.polys.domains import ZZ f_f = gf_diff(f, p, ZZ) alpha = gf_value(f_f, x) beta = ((- gf_value(f, x)) // (p ** s)) return linear_congruence(alpha, beta, p)
[ "def", "_raise_mod_power", "(", "x", ",", "s", ",", "p", ",", "f", ")", ":", "from", "sympy", ".", "polys", ".", "domains", "import", "ZZ", "f_f", "=", "gf_diff", "(", "f", ",", "p", ",", "ZZ", ")", "alpha", "=", "gf_value", "(", "f_f", ",", "x", ")", "beta", "=", "(", "(", "-", "gf_value", "(", "f", ",", "x", ")", ")", "//", "(", "p", "**", "s", ")", ")", "return", "linear_congruence", "(", "alpha", ",", "beta", ",", "p", ")" ]
used in gf_csolve to generate solutions of f(x) cong 0 mod(p**) from the solutions of f(x) cong 0 mod .
train
false
42,415
def convert_sv_line(line): line = line.strip() line = line.replace('\xef\x80\xa0', '') line = ''.join((convert_sv_char(c) for c in line)) line = line.replace('B3', ' ').replace('oA', '').replace('o', '').replace('\x00', '') return line
[ "def", "convert_sv_line", "(", "line", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "line", "=", "line", ".", "replace", "(", "'\\xef\\x80\\xa0'", ",", "''", ")", "line", "=", "''", ".", "join", "(", "(", "convert_sv_char", "(", "c", ")", "for", "c", "in", "line", ")", ")", "line", "=", "line", ".", "replace", "(", "'B3'", ",", "' '", ")", ".", "replace", "(", "'oA'", ",", "''", ")", ".", "replace", "(", "'o'", ",", "''", ")", ".", "replace", "(", "'\\x00'", ",", "''", ")", "return", "line" ]
convert a single line of the garbled vote text .
train
false
42,417
@register.simple_tag def simple_two_params(one, two): return ('simple_two_params - Expected result: %s, %s' % (one, two))
[ "@", "register", ".", "simple_tag", "def", "simple_two_params", "(", "one", ",", "two", ")", ":", "return", "(", "'simple_two_params - Expected result: %s, %s'", "%", "(", "one", ",", "two", ")", ")" ]
expected simple_two_params __doc__ .
train
false
42,418
def _addHandlerRef(handler): _acquireLock() try: _handlerList.append(weakref.ref(handler, _removeHandlerRef)) finally: _releaseLock()
[ "def", "_addHandlerRef", "(", "handler", ")", ":", "_acquireLock", "(", ")", "try", ":", "_handlerList", ".", "append", "(", "weakref", ".", "ref", "(", "handler", ",", "_removeHandlerRef", ")", ")", "finally", ":", "_releaseLock", "(", ")" ]
add a handler to the internal cleanup list using a weak reference .
train
false
42,419
def escapeString(string): toEscapeChars = ['\\', '(', ')'] escapedValue = '' for i in range(len(string)): if ((string[i] in toEscapeChars) and ((i == 0) or (string[(i - 1)] != '\\'))): if (string[i] == '\\'): if ((len(string) > (i + 1)) and re.match('[0-7]', string[(i + 1)])): escapedValue += string[i] else: escapedValue += ('\\' + string[i]) else: escapedValue += ('\\' + string[i]) elif (string[i] == '\r'): escapedValue += '\\r' elif (string[i] == '\n'): escapedValue += '\\n' elif (string[i] == ' DCTB '): escapedValue += '\\t' elif (string[i] == '\x08'): escapedValue += '\\b' elif (string[i] == '\x0c'): escapedValue += '\\f' else: escapedValue += string[i] return escapedValue
[ "def", "escapeString", "(", "string", ")", ":", "toEscapeChars", "=", "[", "'\\\\'", ",", "'('", ",", "')'", "]", "escapedValue", "=", "''", "for", "i", "in", "range", "(", "len", "(", "string", ")", ")", ":", "if", "(", "(", "string", "[", "i", "]", "in", "toEscapeChars", ")", "and", "(", "(", "i", "==", "0", ")", "or", "(", "string", "[", "(", "i", "-", "1", ")", "]", "!=", "'\\\\'", ")", ")", ")", ":", "if", "(", "string", "[", "i", "]", "==", "'\\\\'", ")", ":", "if", "(", "(", "len", "(", "string", ")", ">", "(", "i", "+", "1", ")", ")", "and", "re", ".", "match", "(", "'[0-7]'", ",", "string", "[", "(", "i", "+", "1", ")", "]", ")", ")", ":", "escapedValue", "+=", "string", "[", "i", "]", "else", ":", "escapedValue", "+=", "(", "'\\\\'", "+", "string", "[", "i", "]", ")", "else", ":", "escapedValue", "+=", "(", "'\\\\'", "+", "string", "[", "i", "]", ")", "elif", "(", "string", "[", "i", "]", "==", "'\\r'", ")", ":", "escapedValue", "+=", "'\\\\r'", "elif", "(", "string", "[", "i", "]", "==", "'\\n'", ")", ":", "escapedValue", "+=", "'\\\\n'", "elif", "(", "string", "[", "i", "]", "==", "' DCTB '", ")", ":", "escapedValue", "+=", "'\\\\t'", "elif", "(", "string", "[", "i", "]", "==", "'\\x08'", ")", ":", "escapedValue", "+=", "'\\\\b'", "elif", "(", "string", "[", "i", "]", "==", "'\\x0c'", ")", ":", "escapedValue", "+=", "'\\\\f'", "else", ":", "escapedValue", "+=", "string", "[", "i", "]", "return", "escapedValue" ]
escape the given string .
train
false
42,420
def detokenize(token_rules, words): string = ' '.join(words) for subtoks in token_rules: string = string.replace(subtoks.replace('<SEP>', ' '), subtoks) positions = [] i = 0 for chunk in string.split(): subtoks = chunk.split('<SEP>') positions.append(tuple(range(i, (i + len(subtoks))))) i += len(subtoks) return positions
[ "def", "detokenize", "(", "token_rules", ",", "words", ")", ":", "string", "=", "' '", ".", "join", "(", "words", ")", "for", "subtoks", "in", "token_rules", ":", "string", "=", "string", ".", "replace", "(", "subtoks", ".", "replace", "(", "'<SEP>'", ",", "' '", ")", ",", "subtoks", ")", "positions", "=", "[", "]", "i", "=", "0", "for", "chunk", "in", "string", ".", "split", "(", ")", ":", "subtoks", "=", "chunk", ".", "split", "(", "'<SEP>'", ")", "positions", ".", "append", "(", "tuple", "(", "range", "(", "i", ",", "(", "i", "+", "len", "(", "subtoks", ")", ")", ")", ")", ")", "i", "+=", "len", "(", "subtoks", ")", "return", "positions" ]
to align with treebanks .
train
false
42,421
def test_empty_db(client): rv = client.get('/') assert ('No entries here so far' in rv.data)
[ "def", "test_empty_db", "(", "client", ")", ":", "rv", "=", "client", ".", "get", "(", "'/'", ")", "assert", "(", "'No entries here so far'", "in", "rv", ".", "data", ")" ]
start with a blank database .
train
false
42,422
def test_no_shared_var_graph(): a = tensor.fmatrix() b = tensor.fmatrix() f = theano.function([a, b], [(a + b)], mode=mode_with_gpu) l = f.maker.fgraph.toposort() assert (len(l) == 4) assert numpy.any((isinstance(x.op, cuda.GpuElemwise) for x in l)) assert numpy.any((isinstance(x.op, cuda.GpuFromHost) for x in l)) assert numpy.any((isinstance(x.op, cuda.HostFromGpu) for x in l))
[ "def", "test_no_shared_var_graph", "(", ")", ":", "a", "=", "tensor", ".", "fmatrix", "(", ")", "b", "=", "tensor", ".", "fmatrix", "(", ")", "f", "=", "theano", ".", "function", "(", "[", "a", ",", "b", "]", ",", "[", "(", "a", "+", "b", ")", "]", ",", "mode", "=", "mode_with_gpu", ")", "l", "=", "f", ".", "maker", ".", "fgraph", ".", "toposort", "(", ")", "assert", "(", "len", "(", "l", ")", "==", "4", ")", "assert", "numpy", ".", "any", "(", "(", "isinstance", "(", "x", ".", "op", ",", "cuda", ".", "GpuElemwise", ")", "for", "x", "in", "l", ")", ")", "assert", "numpy", ".", "any", "(", "(", "isinstance", "(", "x", ".", "op", ",", "cuda", ".", "GpuFromHost", ")", "for", "x", "in", "l", ")", ")", "assert", "numpy", ".", "any", "(", "(", "isinstance", "(", "x", ".", "op", ",", "cuda", ".", "HostFromGpu", ")", "for", "x", "in", "l", ")", ")" ]
test that the inputtogpuoptimizer optimizer make graph that dont have shared variable compiled too .
train
false
42,424
def calculate_nfmap(train_toks, encoding): nfset = set() for (tok, _) in train_toks: for label in encoding.labels(): nfset.add(sum((val for (id, val) in encoding.encode(tok, label)))) return dict(((nf, i) for (i, nf) in enumerate(nfset)))
[ "def", "calculate_nfmap", "(", "train_toks", ",", "encoding", ")", ":", "nfset", "=", "set", "(", ")", "for", "(", "tok", ",", "_", ")", "in", "train_toks", ":", "for", "label", "in", "encoding", ".", "labels", "(", ")", ":", "nfset", ".", "add", "(", "sum", "(", "(", "val", "for", "(", "id", ",", "val", ")", "in", "encoding", ".", "encode", "(", "tok", ",", "label", ")", ")", ")", ")", "return", "dict", "(", "(", "(", "nf", ",", "i", ")", "for", "(", "i", ",", "nf", ")", "in", "enumerate", "(", "nfset", ")", ")", ")" ]
construct a map that can be used to compress nf .
train
false
42,426
def addFacesByLoop(faces, indexedLoop): if (len(indexedLoop) < 3): return lastNormal = None for (pointIndex, point) in enumerate(indexedLoop): center = indexedLoop[((pointIndex + 1) % len(indexedLoop))] end = indexedLoop[((pointIndex + 2) % len(indexedLoop))] normal = euclidean.getNormalWeighted(point, center, end) if (abs(normal) > 0.0): if (lastNormal != None): if (lastNormal.dot(normal) < 0.0): addFacesByConcaveLoop(faces, indexedLoop) return lastNormal = normal addFacesByConvex(faces, indexedLoop)
[ "def", "addFacesByLoop", "(", "faces", ",", "indexedLoop", ")", ":", "if", "(", "len", "(", "indexedLoop", ")", "<", "3", ")", ":", "return", "lastNormal", "=", "None", "for", "(", "pointIndex", ",", "point", ")", "in", "enumerate", "(", "indexedLoop", ")", ":", "center", "=", "indexedLoop", "[", "(", "(", "pointIndex", "+", "1", ")", "%", "len", "(", "indexedLoop", ")", ")", "]", "end", "=", "indexedLoop", "[", "(", "(", "pointIndex", "+", "2", ")", "%", "len", "(", "indexedLoop", ")", ")", "]", "normal", "=", "euclidean", ".", "getNormalWeighted", "(", "point", ",", "center", ",", "end", ")", "if", "(", "abs", "(", "normal", ")", ">", "0.0", ")", ":", "if", "(", "lastNormal", "!=", "None", ")", ":", "if", "(", "lastNormal", ".", "dot", "(", "normal", ")", "<", "0.0", ")", ":", "addFacesByConcaveLoop", "(", "faces", ",", "indexedLoop", ")", "return", "lastNormal", "=", "normal", "addFacesByConvex", "(", "faces", ",", "indexedLoop", ")" ]
add faces from a polygon which may be concave .
train
false
42,428
def proj_plane_pixel_area(wcs): psm = wcs.celestial.pixel_scale_matrix if (psm.shape != (2, 2)): raise ValueError(u'Pixel area is defined only for 2D pixels.') return np.abs(np.linalg.det(psm))
[ "def", "proj_plane_pixel_area", "(", "wcs", ")", ":", "psm", "=", "wcs", ".", "celestial", ".", "pixel_scale_matrix", "if", "(", "psm", ".", "shape", "!=", "(", "2", ",", "2", ")", ")", ":", "raise", "ValueError", "(", "u'Pixel area is defined only for 2D pixels.'", ")", "return", "np", ".", "abs", "(", "np", ".", "linalg", ".", "det", "(", "psm", ")", ")" ]
for a **celestial** wcs returns pixel area of the image pixel at the crpix location once it is projected onto the "plane of intermediate world coordinates" as defined in greisen & calabretta 2002 .
train
false
42,429
def list_policy_versions(policyName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) vers = [] for ret in salt.utils.boto3.paged_call(conn.list_policy_versions, marker_flag='nextMarker', marker_arg='marker', policyName=policyName): vers.extend(ret['policyVersions']) if (not bool(vers)): log.warning('No versions found') return {'policyVersions': vers} except ClientError as e: return {'error': salt.utils.boto3.get_error(e)}
[ "def", "list_policy_versions", "(", "policyName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "vers", "=", "[", "]", "for", "ret", "in", "salt", ".", "utils", ".", "boto3", ".", "paged_call", "(", "conn", ".", "list_policy_versions", ",", "marker_flag", "=", "'nextMarker'", ",", "marker_arg", "=", "'marker'", ",", "policyName", "=", "policyName", ")", ":", "vers", ".", "extend", "(", "ret", "[", "'policyVersions'", "]", ")", "if", "(", "not", "bool", "(", "vers", ")", ")", ":", "log", ".", "warning", "(", "'No versions found'", ")", "return", "{", "'policyVersions'", ":", "vers", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
list the versions available for the given policy .
train
false
42,433
def getEvaluatedDictionaryByCopyKeys(copyKeys, elementNode): evaluatedDictionary = {} for key in elementNode.attributes.keys(): if (key in copyKeys): evaluatedDictionary[key] = elementNode.attributes[key] else: addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key) return evaluatedDictionary
[ "def", "getEvaluatedDictionaryByCopyKeys", "(", "copyKeys", ",", "elementNode", ")", ":", "evaluatedDictionary", "=", "{", "}", "for", "key", "in", "elementNode", ".", "attributes", ".", "keys", "(", ")", ":", "if", "(", "key", "in", "copyKeys", ")", ":", "evaluatedDictionary", "[", "key", "]", "=", "elementNode", ".", "attributes", "[", "key", "]", "else", ":", "addValueToEvaluatedDictionary", "(", "elementNode", ",", "evaluatedDictionary", ",", "key", ")", "return", "evaluatedDictionary" ]
get the evaluated dictionary by copykeys .
train
false
42,434
def _validate_validator(obj_type, validator): reference_dict = VALIDATOR_SPECS[obj_type] assert (('id' in validator) and (validator['id'] in reference_dict)) customization_keys = validator.keys() customization_keys.remove('id') assert (set(customization_keys) == set(reference_dict[validator['id']].keys())) for key in customization_keys: value = validator[key] schema = reference_dict[validator['id']][key] try: schema_utils.normalize_against_schema(value, schema) except Exception as e: raise AssertionError(e) validator_fn = schema_utils._Validators.get(validator['id']) assert (set(inspect.getargspec(validator_fn).args) == set((customization_keys + ['obj'])))
[ "def", "_validate_validator", "(", "obj_type", ",", "validator", ")", ":", "reference_dict", "=", "VALIDATOR_SPECS", "[", "obj_type", "]", "assert", "(", "(", "'id'", "in", "validator", ")", "and", "(", "validator", "[", "'id'", "]", "in", "reference_dict", ")", ")", "customization_keys", "=", "validator", ".", "keys", "(", ")", "customization_keys", ".", "remove", "(", "'id'", ")", "assert", "(", "set", "(", "customization_keys", ")", "==", "set", "(", "reference_dict", "[", "validator", "[", "'id'", "]", "]", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "customization_keys", ":", "value", "=", "validator", "[", "key", "]", "schema", "=", "reference_dict", "[", "validator", "[", "'id'", "]", "]", "[", "key", "]", "try", ":", "schema_utils", ".", "normalize_against_schema", "(", "value", ",", "schema", ")", "except", "Exception", "as", "e", ":", "raise", "AssertionError", "(", "e", ")", "validator_fn", "=", "schema_utils", ".", "_Validators", ".", "get", "(", "validator", "[", "'id'", "]", ")", "assert", "(", "set", "(", "inspect", ".", "getargspec", "(", "validator_fn", ")", ".", "args", ")", "==", "set", "(", "(", "customization_keys", "+", "[", "'obj'", "]", ")", ")", ")" ]
validates the value of a validator field .
train
false
42,435
def connect_logs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.logs.layer1 import CloudWatchLogsConnection return CloudWatchLogsConnection(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs)
[ "def", "connect_logs", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "logs", ".", "layer1", "import", "CloudWatchLogsConnection", "return", "CloudWatchLogsConnection", "(", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "**", "kwargs", ")" ]
connect to amazon cloudwatch logs :type aws_access_key_id: string .
train
false
42,436
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): if (not isinstance(codec_options, CodecOptions)): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = (len(data) - 1) while (position < end): obj_size = _UNPACK_INT(data[position:(position + 4)])[0] elements = data[position:(position + obj_size)] position += obj_size (yield _bson_to_dict(elements, codec_options))
[ "def", "decode_iter", "(", "data", ",", "codec_options", "=", "DEFAULT_CODEC_OPTIONS", ")", ":", "if", "(", "not", "isinstance", "(", "codec_options", ",", "CodecOptions", ")", ")", ":", "raise", "_CODEC_OPTIONS_TYPE_ERROR", "position", "=", "0", "end", "=", "(", "len", "(", "data", ")", "-", "1", ")", "while", "(", "position", "<", "end", ")", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", "position", ":", "(", "position", "+", "4", ")", "]", ")", "[", "0", "]", "elements", "=", "data", "[", "position", ":", "(", "position", "+", "obj_size", ")", "]", "position", "+=", "obj_size", "(", "yield", "_bson_to_dict", "(", "elements", ",", "codec_options", ")", ")" ]
decode bson data to multiple documents as a generator .
train
true
42,437
@register.filter('escape', is_safe=True) @stringfilter def escape_filter(value): return mark_for_escaping(value)
[ "@", "register", ".", "filter", "(", "'escape'", ",", "is_safe", "=", "True", ")", "@", "stringfilter", "def", "escape_filter", "(", "value", ")", ":", "return", "mark_for_escaping", "(", "value", ")" ]
marks the value as a string that should not be auto-escaped .
train
false
42,440
def _consume_entries(logger): return list(logger.list_entries())
[ "def", "_consume_entries", "(", "logger", ")", ":", "return", "list", "(", "logger", ".", "list_entries", "(", ")", ")" ]
consume all log entries from logger iterator .
train
false
42,441
@receiver(score_set) def submissions_score_set_handler(sender, **kwargs): points_possible = kwargs['points_possible'] points_earned = kwargs['points_earned'] course_id = kwargs['course_id'] usage_id = kwargs['item_id'] user = user_by_anonymous_id(kwargs['anonymous_user_id']) if (user is None): return PROBLEM_WEIGHTED_SCORE_CHANGED.send(sender=None, weighted_earned=points_earned, weighted_possible=points_possible, user_id=user.id, anonymous_user_id=kwargs['anonymous_user_id'], course_id=course_id, usage_id=usage_id, modified=kwargs['created_at'], score_db_table=ScoreDatabaseTableEnum.submissions)
[ "@", "receiver", "(", "score_set", ")", "def", "submissions_score_set_handler", "(", "sender", ",", "**", "kwargs", ")", ":", "points_possible", "=", "kwargs", "[", "'points_possible'", "]", "points_earned", "=", "kwargs", "[", "'points_earned'", "]", "course_id", "=", "kwargs", "[", "'course_id'", "]", "usage_id", "=", "kwargs", "[", "'item_id'", "]", "user", "=", "user_by_anonymous_id", "(", "kwargs", "[", "'anonymous_user_id'", "]", ")", "if", "(", "user", "is", "None", ")", ":", "return", "PROBLEM_WEIGHTED_SCORE_CHANGED", ".", "send", "(", "sender", "=", "None", ",", "weighted_earned", "=", "points_earned", ",", "weighted_possible", "=", "points_possible", ",", "user_id", "=", "user", ".", "id", ",", "anonymous_user_id", "=", "kwargs", "[", "'anonymous_user_id'", "]", ",", "course_id", "=", "course_id", ",", "usage_id", "=", "usage_id", ",", "modified", "=", "kwargs", "[", "'created_at'", "]", ",", "score_db_table", "=", "ScoreDatabaseTableEnum", ".", "submissions", ")" ]
consume the score_set signal defined in the submissions api .
train
false
42,442
def is_class_sealed(klass): mro = inspect.getmro(klass) new = False if (mro[(-1)] is object): mro = mro[:(-1)] new = True for kls in mro: if (new and ('__dict__' in kls.__dict__)): return False if (not hasattr(kls, '__slots__')): return False return True
[ "def", "is_class_sealed", "(", "klass", ")", ":", "mro", "=", "inspect", ".", "getmro", "(", "klass", ")", "new", "=", "False", "if", "(", "mro", "[", "(", "-", "1", ")", "]", "is", "object", ")", ":", "mro", "=", "mro", "[", ":", "(", "-", "1", ")", "]", "new", "=", "True", "for", "kls", "in", "mro", ":", "if", "(", "new", "and", "(", "'__dict__'", "in", "kls", ".", "__dict__", ")", ")", ":", "return", "False", "if", "(", "not", "hasattr", "(", "kls", ",", "'__slots__'", ")", ")", ":", "return", "False", "return", "True" ]
whether or not the supplied class can accept dynamic properties .
train
true
42,443
@app.route('/get', methods=('GET',)) def view_get(): return jsonify(get_dict('url', 'args', 'headers', 'origin'))
[ "@", "app", ".", "route", "(", "'/get'", ",", "methods", "=", "(", "'GET'", ",", ")", ")", "def", "view_get", "(", ")", ":", "return", "jsonify", "(", "get_dict", "(", "'url'", ",", "'args'", ",", "'headers'", ",", "'origin'", ")", ")" ]
returns get data .
train
false
42,444
def add_hook_log(node, github, action, path, date, committer, include_urls=False, sha=None, save=False): github_data = {'user': github.user, 'repo': github.repo} urls = {} if include_urls: url = node.web_url_for('addon_view_or_download_file', path=path, provider=SHORT_NAME) urls = {'view': '{0}?ref={1}'.format(url, sha), 'download': '{0}?action=download&ref={1}'.format(url, sha)} node.add_log(action=action, params={'project': node.parent_id, 'node': node._id, 'path': path, 'github': github_data, 'urls': urls}, auth=None, foreign_user=committer, log_date=date, save=save)
[ "def", "add_hook_log", "(", "node", ",", "github", ",", "action", ",", "path", ",", "date", ",", "committer", ",", "include_urls", "=", "False", ",", "sha", "=", "None", ",", "save", "=", "False", ")", ":", "github_data", "=", "{", "'user'", ":", "github", ".", "user", ",", "'repo'", ":", "github", ".", "repo", "}", "urls", "=", "{", "}", "if", "include_urls", ":", "url", "=", "node", ".", "web_url_for", "(", "'addon_view_or_download_file'", ",", "path", "=", "path", ",", "provider", "=", "SHORT_NAME", ")", "urls", "=", "{", "'view'", ":", "'{0}?ref={1}'", ".", "format", "(", "url", ",", "sha", ")", ",", "'download'", ":", "'{0}?action=download&ref={1}'", ".", "format", "(", "url", ",", "sha", ")", "}", "node", ".", "add_log", "(", "action", "=", "action", ",", "params", "=", "{", "'project'", ":", "node", ".", "parent_id", ",", "'node'", ":", "node", ".", "_id", ",", "'path'", ":", "path", ",", "'github'", ":", "github_data", ",", "'urls'", ":", "urls", "}", ",", "auth", "=", "None", ",", "foreign_user", "=", "committer", ",", "log_date", "=", "date", ",", "save", "=", "save", ")" ]
add log event for commit from webhook payload .
train
false
42,445
def RebuildProxy(func, token, serializer, kwds): server = getattr(process.current_process(), '_manager_server', None) if (server and (server.address == token.address)): util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if (token.id not in server.id_to_local_proxy_obj): server.id_to_local_proxy_obj[token.id] = server.id_to_obj[token.id] incref = (kwds.pop('incref', True) and (not getattr(process.current_process(), '_inheriting', False))) return func(token, serializer, incref=incref, **kwds)
[ "def", "RebuildProxy", "(", "func", ",", "token", ",", "serializer", ",", "kwds", ")", ":", "server", "=", "getattr", "(", "process", ".", "current_process", "(", ")", ",", "'_manager_server'", ",", "None", ")", "if", "(", "server", "and", "(", "server", ".", "address", "==", "token", ".", "address", ")", ")", ":", "util", ".", "debug", "(", "'Rebuild a proxy owned by manager, token=%r'", ",", "token", ")", "kwds", "[", "'manager_owned'", "]", "=", "True", "if", "(", "token", ".", "id", "not", "in", "server", ".", "id_to_local_proxy_obj", ")", ":", "server", ".", "id_to_local_proxy_obj", "[", "token", ".", "id", "]", "=", "server", ".", "id_to_obj", "[", "token", ".", "id", "]", "incref", "=", "(", "kwds", ".", "pop", "(", "'incref'", ",", "True", ")", "and", "(", "not", "getattr", "(", "process", ".", "current_process", "(", ")", ",", "'_inheriting'", ",", "False", ")", ")", ")", "return", "func", "(", "token", ",", "serializer", ",", "incref", "=", "incref", ",", "**", "kwds", ")" ]
function used for unpickling proxy objects .
train
false
42,446
@periodic_task(run_every=timedelta(hours=24)) def invalidate_group_membership(): from mozillians.groups.models import Group, GroupMembership groups = Group.objects.filter(invalidation_days__isnull=False) for group in groups: curator_ids = group.curators.all().values_list('id', flat=True) memberships = group.groupmembership_set.filter(status=GroupMembership.MEMBER).exclude(userprofile__id__in=curator_ids) last_update = (datetime.now() - timedelta(days=group.invalidation_days)) memberships = memberships.filter(updated_on__lte=last_update) for member in memberships: group.remove_member(member.userprofile)
[ "@", "periodic_task", "(", "run_every", "=", "timedelta", "(", "hours", "=", "24", ")", ")", "def", "invalidate_group_membership", "(", ")", ":", "from", "mozillians", ".", "groups", ".", "models", "import", "Group", ",", "GroupMembership", "groups", "=", "Group", ".", "objects", ".", "filter", "(", "invalidation_days__isnull", "=", "False", ")", "for", "group", "in", "groups", ":", "curator_ids", "=", "group", ".", "curators", ".", "all", "(", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", "memberships", "=", "group", ".", "groupmembership_set", ".", "filter", "(", "status", "=", "GroupMembership", ".", "MEMBER", ")", ".", "exclude", "(", "userprofile__id__in", "=", "curator_ids", ")", "last_update", "=", "(", "datetime", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "group", ".", "invalidation_days", ")", ")", "memberships", "=", "memberships", ".", "filter", "(", "updated_on__lte", "=", "last_update", ")", "for", "member", "in", "memberships", ":", "group", ".", "remove_member", "(", "member", ".", "userprofile", ")" ]
for groups with defined invalidation_days we need to invalidate user membership after timedelta .
train
false
42,447
def get_permission_message(permission_code): default_message = _('Insufficient rights to access this directory.') return {'suggest': _('Insufficient rights to access suggestion mode.'), 'translate': _('Insufficient rights to access translation mode.'), 'review': _('Insufficient rights to access review mode.')}.get(permission_code, default_message)
[ "def", "get_permission_message", "(", "permission_code", ")", ":", "default_message", "=", "_", "(", "'Insufficient rights to access this directory.'", ")", "return", "{", "'suggest'", ":", "_", "(", "'Insufficient rights to access suggestion mode.'", ")", ",", "'translate'", ":", "_", "(", "'Insufficient rights to access translation mode.'", ")", ",", "'review'", ":", "_", "(", "'Insufficient rights to access review mode.'", ")", "}", ".", "get", "(", "permission_code", ",", "default_message", ")" ]
returns a human-readable message when permission_code is not met by the current context .
train
false
42,448
def test_dummy_user_service_exception(): user_service = UserService() with assert_raises(NotImplementedError): user_service.get_current_user()
[ "def", "test_dummy_user_service_exception", "(", ")", ":", "user_service", "=", "UserService", "(", ")", "with", "assert_raises", "(", "NotImplementedError", ")", ":", "user_service", ".", "get_current_user", "(", ")" ]
tests notimplemented error raised by userservice when not instantiated with kwarg get_current_user .
train
false
42,449
def iter_format(nitems, testobj='ndarray'): for t in iter_mode(nitems, testobj): (yield t) if (testobj != 'ndarray'): raise StopIteration (yield struct_items(nitems, testobj))
[ "def", "iter_format", "(", "nitems", ",", "testobj", "=", "'ndarray'", ")", ":", "for", "t", "in", "iter_mode", "(", "nitems", ",", "testobj", ")", ":", "(", "yield", "t", ")", "if", "(", "testobj", "!=", "'ndarray'", ")", ":", "raise", "StopIteration", "(", "yield", "struct_items", "(", "nitems", ",", "testobj", ")", ")" ]
yield for all possible modes and format characters plus one random compound format string .
train
false
42,451
def log_unsupported_driver_warning(driver): if (not driver.supported): LOG.warning(_LW('Volume driver (%(driver_name)s %(version)s) is currently unsupported and may be removed in the next release of OpenStack. Use at your own risk.'), {'driver_name': driver.__class__.__name__, 'version': driver.get_version()}, resource={'type': 'driver', 'id': driver.__class__.__name__})
[ "def", "log_unsupported_driver_warning", "(", "driver", ")", ":", "if", "(", "not", "driver", ".", "supported", ")", ":", "LOG", ".", "warning", "(", "_LW", "(", "'Volume driver (%(driver_name)s %(version)s) is currently unsupported and may be removed in the next release of OpenStack. Use at your own risk.'", ")", ",", "{", "'driver_name'", ":", "driver", ".", "__class__", ".", "__name__", ",", "'version'", ":", "driver", ".", "get_version", "(", ")", "}", ",", "resource", "=", "{", "'type'", ":", "'driver'", ",", "'id'", ":", "driver", ".", "__class__", ".", "__name__", "}", ")" ]
annoy the log about unsupported drivers .
train
false
42,452
def database_exists(name, **kwargs): with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): res = query(("SHOW DATABASES LIKE '%(name)s';" % {'name': name}), **kwargs) return (res.succeeded and (res == name))
[ "def", "database_exists", "(", "name", ",", "**", "kwargs", ")", ":", "with", "settings", "(", "hide", "(", "'running'", ",", "'stdout'", ",", "'stderr'", ",", "'warnings'", ")", ",", "warn_only", "=", "True", ")", ":", "res", "=", "query", "(", "(", "\"SHOW DATABASES LIKE '%(name)s';\"", "%", "{", "'name'", ":", "name", "}", ")", ",", "**", "kwargs", ")", "return", "(", "res", ".", "succeeded", "and", "(", "res", "==", "name", ")", ")" ]
check if a mysql database exists .
train
true
42,453
def string_metadata(registry, xml_parent, data): pdef = base_metadata(registry, xml_parent, data, 'metadata-string') value = data.get('value', '') XML.SubElement(pdef, 'value').text = value
[ "def", "string_metadata", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "pdef", "=", "base_metadata", "(", "registry", ",", "xml_parent", ",", "data", ",", "'metadata-string'", ")", "value", "=", "data", ".", "get", "(", "'value'", ",", "''", ")", "XML", ".", "SubElement", "(", "pdef", ",", "'value'", ")", ".", "text", "=", "value" ]
yaml: string a string metadata .
train
false
42,454
def _do_studio_prompt_action(intent, action): assert (intent in ['warning', 'error', 'confirmation', 'announcement', 'step-required', 'help', 'mini']) assert (action in ['primary', 'secondary']) world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent)) action_css = 'li.nav-item > button.action-{}'.format(action) world.trigger_event(action_css, event='focus') world.browser.execute_script("$('{}').click()".format(action_css)) world.wait_for_ajax_complete() world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
[ "def", "_do_studio_prompt_action", "(", "intent", ",", "action", ")", ":", "assert", "(", "intent", "in", "[", "'warning'", ",", "'error'", ",", "'confirmation'", ",", "'announcement'", ",", "'step-required'", ",", "'help'", ",", "'mini'", "]", ")", "assert", "(", "action", "in", "[", "'primary'", ",", "'secondary'", "]", ")", "world", ".", "wait_for_present", "(", "'div.wrapper-prompt.is-shown#prompt-{}'", ".", "format", "(", "intent", ")", ")", "action_css", "=", "'li.nav-item > button.action-{}'", ".", "format", "(", "action", ")", "world", ".", "trigger_event", "(", "action_css", ",", "event", "=", "'focus'", ")", "world", ".", "browser", ".", "execute_script", "(", "\"$('{}').click()\"", ".", "format", "(", "action_css", ")", ")", "world", ".", "wait_for_ajax_complete", "(", ")", "world", ".", "wait_for_present", "(", "'div.wrapper-prompt.is-hiding#prompt-{}'", ".", "format", "(", "intent", ")", ")" ]
wait for a studio prompt to appear and press the specified action button see common/js/components/views/feedback_prompt .
train
false
42,456
def test_inputless_model(): class TestModel(Model, ): inputs = () outputs = (u'y',) a = Parameter() @staticmethod def evaluate(a): return a m = TestModel(1) assert (m.a == 1) assert (m() == 1) m = TestModel([1, 2, 3], model_set_axis=False) assert (len(m) == 1) assert np.all((m() == [1, 2, 3])) m = TestModel(a=[1, 2, 3], model_set_axis=0) assert (len(m) == 3) assert np.all((m() == [1, 2, 3])) m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0) assert (len(m) == 2) assert np.all((m() == [[1, 2, 3], [4, 5, 6]]))
[ "def", "test_inputless_model", "(", ")", ":", "class", "TestModel", "(", "Model", ",", ")", ":", "inputs", "=", "(", ")", "outputs", "=", "(", "u'y'", ",", ")", "a", "=", "Parameter", "(", ")", "@", "staticmethod", "def", "evaluate", "(", "a", ")", ":", "return", "a", "m", "=", "TestModel", "(", "1", ")", "assert", "(", "m", ".", "a", "==", "1", ")", "assert", "(", "m", "(", ")", "==", "1", ")", "m", "=", "TestModel", "(", "[", "1", ",", "2", ",", "3", "]", ",", "model_set_axis", "=", "False", ")", "assert", "(", "len", "(", "m", ")", "==", "1", ")", "assert", "np", ".", "all", "(", "(", "m", "(", ")", "==", "[", "1", ",", "2", ",", "3", "]", ")", ")", "m", "=", "TestModel", "(", "a", "=", "[", "1", ",", "2", ",", "3", "]", ",", "model_set_axis", "=", "0", ")", "assert", "(", "len", "(", "m", ")", "==", "3", ")", "assert", "np", ".", "all", "(", "(", "m", "(", ")", "==", "[", "1", ",", "2", ",", "3", "]", ")", ")", "m", "=", "TestModel", "(", "a", "=", "[", "[", "1", ",", "2", ",", "3", "]", ",", "[", "4", ",", "5", ",", "6", "]", "]", ",", "model_set_axis", "=", "0", ")", "assert", "(", "len", "(", "m", ")", "==", "2", ")", "assert", "np", ".", "all", "(", "(", "m", "(", ")", "==", "[", "[", "1", ",", "2", ",", "3", "]", ",", "[", "4", ",", "5", ",", "6", "]", "]", ")", ")" ]
regression test for URL#issuecomment-101821641 .
train
false
42,457
def test_same_but_different(): AreEqual({(-10): 0, (-10L): 1}, {(-10): 1})
[ "def", "test_same_but_different", "(", ")", ":", "AreEqual", "(", "{", "(", "-", "10", ")", ":", "0", ",", "(", "-", "10", "L", ")", ":", "1", "}", ",", "{", "(", "-", "10", ")", ":", "1", "}", ")" ]
test case checks that when two values who are logically different but share hash code & equality result in only a single entry .
train
false
42,458
def get_embargo_response(request, course_id, user): redirect_url = redirect_if_blocked(course_id, user=user, ip_address=get_ip(request), url=request.path) if redirect_url: return Response(status=status.HTTP_403_FORBIDDEN, data={'message': u"Users from this location cannot access the course '{course_id}'.".format(course_id=course_id), 'user_message_url': request.build_absolute_uri(redirect_url)})
[ "def", "get_embargo_response", "(", "request", ",", "course_id", ",", "user", ")", ":", "redirect_url", "=", "redirect_if_blocked", "(", "course_id", ",", "user", "=", "user", ",", "ip_address", "=", "get_ip", "(", "request", ")", ",", "url", "=", "request", ".", "path", ")", "if", "redirect_url", ":", "return", "Response", "(", "status", "=", "status", ".", "HTTP_403_FORBIDDEN", ",", "data", "=", "{", "'message'", ":", "u\"Users from this location cannot access the course '{course_id}'.\"", ".", "format", "(", "course_id", "=", "course_id", ")", ",", "'user_message_url'", ":", "request", ".", "build_absolute_uri", "(", "redirect_url", ")", "}", ")" ]
check whether any country access rules block the user from enrollment .
train
false
42,459
def digit(uni_char, default_value=None): uni_char = unicod(uni_char) if (default_value is not None): return unicodedata.digit(uni_char, default_value) else: return unicodedata.digit(uni_char)
[ "def", "digit", "(", "uni_char", ",", "default_value", "=", "None", ")", ":", "uni_char", "=", "unicod", "(", "uni_char", ")", "if", "(", "default_value", "is", "not", "None", ")", ":", "return", "unicodedata", ".", "digit", "(", "uni_char", ",", "default_value", ")", "else", ":", "return", "unicodedata", ".", "digit", "(", "uni_char", ")" ]
returns the digit value assigned to the unicode character uni_char as integer .
train
true
42,460
def advance_some(clock): clock.advance(1)
[ "def", "advance_some", "(", "clock", ")", ":", "clock", ".", "advance", "(", "1", ")" ]
move the clock forward by a little time .
train
false
42,461
def plugin_aware_extension_middleware_factory(global_config, **local_config): def _factory(app): ext_mgr = PluginAwareExtensionManager.get_instance() return ExtensionMiddleware(app, ext_mgr=ext_mgr) return _factory
[ "def", "plugin_aware_extension_middleware_factory", "(", "global_config", ",", "**", "local_config", ")", ":", "def", "_factory", "(", "app", ")", ":", "ext_mgr", "=", "PluginAwareExtensionManager", ".", "get_instance", "(", ")", "return", "ExtensionMiddleware", "(", "app", ",", "ext_mgr", "=", "ext_mgr", ")", "return", "_factory" ]
paste factory .
train
false
42,462
def stubout_is_snapshot(stubs): def f(*args): return True stubs.Set(vm_utils, 'is_snapshot', f)
[ "def", "stubout_is_snapshot", "(", "stubs", ")", ":", "def", "f", "(", "*", "args", ")", ":", "return", "True", "stubs", ".", "Set", "(", "vm_utils", ",", "'is_snapshot'", ",", "f", ")" ]
always returns true xenapi fake driver does not create vmrefs for snapshots .
train
false
42,463
def item_condition(item): return u'new'
[ "def", "item_condition", "(", "item", ")", ":", "return", "u'new'" ]
allowed values: new .
train
false
42,465
def _get_user_via_api_key(api_key): user = User(Participant._from_thing('api_key', api_key)) if user.participant: p = user.participant today = date.today() if (p.old_auth_usage != today): Participant.db.run('\n UPDATE participants\n SET old_auth_usage = %s\n WHERE id = %s\n ', (today, p.id)) return user
[ "def", "_get_user_via_api_key", "(", "api_key", ")", ":", "user", "=", "User", "(", "Participant", ".", "_from_thing", "(", "'api_key'", ",", "api_key", ")", ")", "if", "user", ".", "participant", ":", "p", "=", "user", ".", "participant", "today", "=", "date", ".", "today", "(", ")", "if", "(", "p", ".", "old_auth_usage", "!=", "today", ")", ":", "Participant", ".", "db", ".", "run", "(", "'\\n UPDATE participants\\n SET old_auth_usage = %s\\n WHERE id = %s\\n '", ",", "(", "today", ",", "p", ".", "id", ")", ")", "return", "user" ]
given an api_key .
train
false
42,468
def module_manifest(path): if (not path): return None for manifest_name in MANIFEST_NAMES: if os.path.isfile(opj(path, manifest_name)): return opj(path, manifest_name)
[ "def", "module_manifest", "(", "path", ")", ":", "if", "(", "not", "path", ")", ":", "return", "None", "for", "manifest_name", "in", "MANIFEST_NAMES", ":", "if", "os", ".", "path", ".", "isfile", "(", "opj", "(", "path", ",", "manifest_name", ")", ")", ":", "return", "opj", "(", "path", ",", "manifest_name", ")" ]
returns path to module manifest if one can be found under path .
train
false
42,470
def invalid_args(func, argdict): (args, _, keywords, _) = inspect.getargspec(func) if keywords: return set() return (set(argdict) - set(args))
[ "def", "invalid_args", "(", "func", ",", "argdict", ")", ":", "(", "args", ",", "_", ",", "keywords", ",", "_", ")", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "keywords", ":", "return", "set", "(", ")", "return", "(", "set", "(", "argdict", ")", "-", "set", "(", "args", ")", ")" ]
given a function and a dictionary of arguments .
train
false
42,471
def is_special_file(path): mode = os.stat(path).st_mode if stat.S_ISCHR(mode): return True if stat.S_ISBLK(mode): return True if stat.S_ISFIFO(mode): return True if stat.S_ISSOCK(mode): return True return False
[ "def", "is_special_file", "(", "path", ")", ":", "mode", "=", "os", ".", "stat", "(", "path", ")", ".", "st_mode", "if", "stat", ".", "S_ISCHR", "(", "mode", ")", ":", "return", "True", "if", "stat", ".", "S_ISBLK", "(", "mode", ")", ":", "return", "True", "if", "stat", ".", "S_ISFIFO", "(", "mode", ")", ":", "return", "True", "if", "stat", ".", "S_ISSOCK", "(", "mode", ")", ":", "return", "True", "return", "False" ]
this function checks to see if a special file .
train
true
42,474
def _average_precision(y_true, y_score): pos_label = np.unique(y_true)[1] n_pos = np.sum((y_true == pos_label)) order = np.argsort(y_score)[::(-1)] y_score = y_score[order] y_true = y_true[order] score = 0 for i in range(len(y_score)): if (y_true[i] == pos_label): prec = 0 for j in range(0, (i + 1)): if (y_true[j] == pos_label): prec += 1.0 prec /= (i + 1.0) score += prec return (score / n_pos)
[ "def", "_average_precision", "(", "y_true", ",", "y_score", ")", ":", "pos_label", "=", "np", ".", "unique", "(", "y_true", ")", "[", "1", "]", "n_pos", "=", "np", ".", "sum", "(", "(", "y_true", "==", "pos_label", ")", ")", "order", "=", "np", ".", "argsort", "(", "y_score", ")", "[", ":", ":", "(", "-", "1", ")", "]", "y_score", "=", "y_score", "[", "order", "]", "y_true", "=", "y_true", "[", "order", "]", "score", "=", "0", "for", "i", "in", "range", "(", "len", "(", "y_score", ")", ")", ":", "if", "(", "y_true", "[", "i", "]", "==", "pos_label", ")", ":", "prec", "=", "0", "for", "j", "in", "range", "(", "0", ",", "(", "i", "+", "1", ")", ")", ":", "if", "(", "y_true", "[", "j", "]", "==", "pos_label", ")", ":", "prec", "+=", "1.0", "prec", "/=", "(", "i", "+", "1.0", ")", "score", "+=", "prec", "return", "(", "score", "/", "n_pos", ")" ]
alternative implementation to check for correctness of average_precision_score .
train
false
42,476
def _as_dtype_value(tyargs, args): return [np.dtype(str(ty)).type(val) for (ty, val) in zip(tyargs, args)]
[ "def", "_as_dtype_value", "(", "tyargs", ",", "args", ")", ":", "return", "[", "np", ".", "dtype", "(", "str", "(", "ty", ")", ")", ".", "type", "(", "val", ")", "for", "(", "ty", ",", "val", ")", "in", "zip", "(", "tyargs", ",", "args", ")", "]" ]
convert python values into numpy scalar objects .
train
false
42,477
def format_s3_location(user, key, authurl, bucket, obj): scheme = 's3' if authurl.startswith('https://'): scheme = 's3+https' authurl = authurl[8:] elif authurl.startswith('http://'): authurl = authurl[7:] authurl = authurl.strip('/') return ('%s://%s:%s@%s/%s/%s' % (scheme, user, key, authurl, bucket, obj))
[ "def", "format_s3_location", "(", "user", ",", "key", ",", "authurl", ",", "bucket", ",", "obj", ")", ":", "scheme", "=", "'s3'", "if", "authurl", ".", "startswith", "(", "'https://'", ")", ":", "scheme", "=", "'s3+https'", "authurl", "=", "authurl", "[", "8", ":", "]", "elif", "authurl", ".", "startswith", "(", "'http://'", ")", ":", "authurl", "=", "authurl", "[", "7", ":", "]", "authurl", "=", "authurl", ".", "strip", "(", "'/'", ")", "return", "(", "'%s://%s:%s@%s/%s/%s'", "%", "(", "scheme", ",", "user", ",", "key", ",", "authurl", ",", "bucket", ",", "obj", ")", ")" ]
helper method that returns a s3 store uri given the component pieces .
train
false
42,478
def format_alignment(align1, align2, score, begin, end): s = [] s.append(('%s\n' % align1)) s.append(('%s%s\n' % ((' ' * begin), ('|' * (end - begin))))) s.append(('%s\n' % align2)) s.append((' Score=%g\n' % score)) return ''.join(s)
[ "def", "format_alignment", "(", "align1", ",", "align2", ",", "score", ",", "begin", ",", "end", ")", ":", "s", "=", "[", "]", "s", ".", "append", "(", "(", "'%s\\n'", "%", "align1", ")", ")", "s", ".", "append", "(", "(", "'%s%s\\n'", "%", "(", "(", "' '", "*", "begin", ")", ",", "(", "'|'", "*", "(", "end", "-", "begin", ")", ")", ")", ")", ")", "s", ".", "append", "(", "(", "'%s\\n'", "%", "align2", ")", ")", "s", ".", "append", "(", "(", "' Score=%g\\n'", "%", "score", ")", ")", "return", "''", ".", "join", "(", "s", ")" ]
format_alignment -> string format the alignment prettily into a string .
train
false
42,480
def read_int16(fid): return _unpack_simple(fid, '>i2', np.int16)
[ "def", "read_int16", "(", "fid", ")", ":", "return", "_unpack_simple", "(", "fid", ",", "'>i2'", ",", "np", ".", "int16", ")" ]
read 16bit integer from bti file .
train
false
42,481
def perfTest(lossy=True): topo = SingleSwitchTopo(n=4, lossy=lossy) net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoStaticArp=True) net.start() info('Dumping host connections\n') dumpNodeConnections(net.hosts) info('Testing bandwidth between h1 and h4\n') (h1, h4) = net.getNodeByName('h1', 'h4') net.iperf((h1, h4), l4Type='UDP') net.stop()
[ "def", "perfTest", "(", "lossy", "=", "True", ")", ":", "topo", "=", "SingleSwitchTopo", "(", "n", "=", "4", ",", "lossy", "=", "lossy", ")", "net", "=", "Mininet", "(", "topo", "=", "topo", ",", "host", "=", "CPULimitedHost", ",", "link", "=", "TCLink", ",", "autoStaticArp", "=", "True", ")", "net", ".", "start", "(", ")", "info", "(", "'Dumping host connections\\n'", ")", "dumpNodeConnections", "(", "net", ".", "hosts", ")", "info", "(", "'Testing bandwidth between h1 and h4\\n'", ")", "(", "h1", ",", "h4", ")", "=", "net", ".", "getNodeByName", "(", "'h1'", ",", "'h4'", ")", "net", ".", "iperf", "(", "(", "h1", ",", "h4", ")", ",", "l4Type", "=", "'UDP'", ")", "net", ".", "stop", "(", ")" ]
create network and run simple performance test .
train
false
42,482
def _new_mem_buf(buffer=None): if (buffer is None): bio = _lib.BIO_new(_lib.BIO_s_mem()) free = _lib.BIO_free else: data = _ffi.new('char[]', buffer) bio = _lib.BIO_new_mem_buf(data, len(buffer)) def free(bio, ref=data): return _lib.BIO_free(bio) if (bio == _ffi.NULL): _raise_current_error() bio = _ffi.gc(bio, free) return bio
[ "def", "_new_mem_buf", "(", "buffer", "=", "None", ")", ":", "if", "(", "buffer", "is", "None", ")", ":", "bio", "=", "_lib", ".", "BIO_new", "(", "_lib", ".", "BIO_s_mem", "(", ")", ")", "free", "=", "_lib", ".", "BIO_free", "else", ":", "data", "=", "_ffi", ".", "new", "(", "'char[]'", ",", "buffer", ")", "bio", "=", "_lib", ".", "BIO_new_mem_buf", "(", "data", ",", "len", "(", "buffer", ")", ")", "def", "free", "(", "bio", ",", "ref", "=", "data", ")", ":", "return", "_lib", ".", "BIO_free", "(", "bio", ")", "if", "(", "bio", "==", "_ffi", ".", "NULL", ")", ":", "_raise_current_error", "(", ")", "bio", "=", "_ffi", ".", "gc", "(", "bio", ",", "free", ")", "return", "bio" ]
allocate a new openssl memory bio .
train
true
42,483
def _replace_parenthesized_ambigs(seq, rev_ambig_values): opening = seq.find('(') while (opening > (-1)): closing = seq.find(')') if (closing < 0): raise NexusError(('Missing closing parenthesis in: ' + seq)) elif (closing < opening): raise NexusError(('Missing opening parenthesis in: ' + seq)) ambig = ''.join(sorted(seq[(opening + 1):closing])) ambig_code = rev_ambig_values[ambig.upper()] if (ambig != ambig.upper()): ambig_code = ambig_code.lower() seq = ((seq[:opening] + ambig_code) + seq[(closing + 1):]) opening = seq.find('(') return seq
[ "def", "_replace_parenthesized_ambigs", "(", "seq", ",", "rev_ambig_values", ")", ":", "opening", "=", "seq", ".", "find", "(", "'('", ")", "while", "(", "opening", ">", "(", "-", "1", ")", ")", ":", "closing", "=", "seq", ".", "find", "(", "')'", ")", "if", "(", "closing", "<", "0", ")", ":", "raise", "NexusError", "(", "(", "'Missing closing parenthesis in: '", "+", "seq", ")", ")", "elif", "(", "closing", "<", "opening", ")", ":", "raise", "NexusError", "(", "(", "'Missing opening parenthesis in: '", "+", "seq", ")", ")", "ambig", "=", "''", ".", "join", "(", "sorted", "(", "seq", "[", "(", "opening", "+", "1", ")", ":", "closing", "]", ")", ")", "ambig_code", "=", "rev_ambig_values", "[", "ambig", ".", "upper", "(", ")", "]", "if", "(", "ambig", "!=", "ambig", ".", "upper", "(", ")", ")", ":", "ambig_code", "=", "ambig_code", ".", "lower", "(", ")", "seq", "=", "(", "(", "seq", "[", ":", "opening", "]", "+", "ambig_code", ")", "+", "seq", "[", "(", "closing", "+", "1", ")", ":", "]", ")", "opening", "=", "seq", ".", "find", "(", "'('", ")", "return", "seq" ]
replaces ambigs in xxxxxx format by iupac ambiguity code .
train
false
42,485
def _ask(fact, obj): assumptions = obj._assumptions handler_map = obj._prop_handler assumptions._tell(fact, None) try: evaluate = handler_map[fact] except KeyError: pass else: a = evaluate(obj) if (a is not None): assumptions.deduce_all_facts(((fact, a),)) return a prereq = list(_assume_rules.prereq[fact]) shuffle(prereq) for pk in prereq: if (pk in assumptions): continue if (pk in handler_map): _ask(pk, obj) ret_val = assumptions.get(fact) if (ret_val is not None): return ret_val return None
[ "def", "_ask", "(", "fact", ",", "obj", ")", ":", "assumptions", "=", "obj", ".", "_assumptions", "handler_map", "=", "obj", ".", "_prop_handler", "assumptions", ".", "_tell", "(", "fact", ",", "None", ")", "try", ":", "evaluate", "=", "handler_map", "[", "fact", "]", "except", "KeyError", ":", "pass", "else", ":", "a", "=", "evaluate", "(", "obj", ")", "if", "(", "a", "is", "not", "None", ")", ":", "assumptions", ".", "deduce_all_facts", "(", "(", "(", "fact", ",", "a", ")", ",", ")", ")", "return", "a", "prereq", "=", "list", "(", "_assume_rules", ".", "prereq", "[", "fact", "]", ")", "shuffle", "(", "prereq", ")", "for", "pk", "in", "prereq", ":", "if", "(", "pk", "in", "assumptions", ")", ":", "continue", "if", "(", "pk", "in", "handler_map", ")", ":", "_ask", "(", "pk", ",", "obj", ")", "ret_val", "=", "assumptions", ".", "get", "(", "fact", ")", "if", "(", "ret_val", "is", "not", "None", ")", ":", "return", "ret_val", "return", "None" ]
find the truth value for a property of an object .
train
false
42,486
def get_scale_docs(): docs = [] for name in get_scale_names(): scale_class = _scale_mapping[name] docs.append((" '%s'" % name)) docs.append('') class_docs = dedent(scale_class.__init__.__doc__) class_docs = ''.join([(' %s\n' % x) for x in class_docs.split('\n')]) docs.append(class_docs) docs.append('') return '\n'.join(docs)
[ "def", "get_scale_docs", "(", ")", ":", "docs", "=", "[", "]", "for", "name", "in", "get_scale_names", "(", ")", ":", "scale_class", "=", "_scale_mapping", "[", "name", "]", "docs", ".", "append", "(", "(", "\" '%s'\"", "%", "name", ")", ")", "docs", ".", "append", "(", "''", ")", "class_docs", "=", "dedent", "(", "scale_class", ".", "__init__", ".", "__doc__", ")", "class_docs", "=", "''", ".", "join", "(", "[", "(", "' %s\\n'", "%", "x", ")", "for", "x", "in", "class_docs", ".", "split", "(", "'\\n'", ")", "]", ")", "docs", ".", "append", "(", "class_docs", ")", "docs", ".", "append", "(", "''", ")", "return", "'\\n'", ".", "join", "(", "docs", ")" ]
helper function for generating docstrings related to scales .
train
false
42,487
def imread(fname, dtype=None, img_num=None, **kwargs): if isinstance(fname, string_types): with open(fname, 'rb') as f: im = Image.open(f) return pil_to_ndarray(im, dtype=dtype, img_num=img_num) else: im = Image.open(fname) return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
[ "def", "imread", "(", "fname", ",", "dtype", "=", "None", ",", "img_num", "=", "None", ",", "**", "kwargs", ")", ":", "if", "isinstance", "(", "fname", ",", "string_types", ")", ":", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "f", ":", "im", "=", "Image", ".", "open", "(", "f", ")", "return", "pil_to_ndarray", "(", "im", ",", "dtype", "=", "dtype", ",", "img_num", "=", "img_num", ")", "else", ":", "im", "=", "Image", ".", "open", "(", "fname", ")", "return", "pil_to_ndarray", "(", "im", ",", "dtype", "=", "dtype", ",", "img_num", "=", "img_num", ")" ]
return image file in *fname* as :class:numpy .
train
false
42,488
def gen_items(n, fmt, obj): if (n == 0): return gen_item(fmt, obj) lst = ([0] * n) for i in range(n): lst[i] = gen_item(fmt, obj) return lst
[ "def", "gen_items", "(", "n", ",", "fmt", ",", "obj", ")", ":", "if", "(", "n", "==", "0", ")", ":", "return", "gen_item", "(", "fmt", ",", "obj", ")", "lst", "=", "(", "[", "0", "]", "*", "n", ")", "for", "i", "in", "range", "(", "n", ")", ":", "lst", "[", "i", "]", "=", "gen_item", "(", "fmt", ",", "obj", ")", "return", "lst" ]
return a list of random items .
train
false
42,489
def get_schema_documents(models, default_namespace=None): if (default_namespace is None): default_namespace = models[0].get_namespace() fake_app = FakeApplication() fake_app.tns = default_namespace fake_app.services = [] interface = Interface(fake_app) for m in models: m.resolve_namespace(m, default_namespace) interface.add_class(m) interface.populate_interface(fake_app) document = XmlSchema(interface) document.build_interface_document() return document.get_interface_document()
[ "def", "get_schema_documents", "(", "models", ",", "default_namespace", "=", "None", ")", ":", "if", "(", "default_namespace", "is", "None", ")", ":", "default_namespace", "=", "models", "[", "0", "]", ".", "get_namespace", "(", ")", "fake_app", "=", "FakeApplication", "(", ")", "fake_app", ".", "tns", "=", "default_namespace", "fake_app", ".", "services", "=", "[", "]", "interface", "=", "Interface", "(", "fake_app", ")", "for", "m", "in", "models", ":", "m", ".", "resolve_namespace", "(", "m", ",", "default_namespace", ")", "interface", ".", "add_class", "(", "m", ")", "interface", ".", "populate_interface", "(", "fake_app", ")", "document", "=", "XmlSchema", "(", "interface", ")", "document", ".", "build_interface_document", "(", ")", "return", "document", ".", "get_interface_document", "(", ")" ]
returns the schema documents in a dict whose keys are namespace prefixes and values are element objects .
train
false
42,490
def _inside_contour(pos, contour): npos = len(pos) (x, y) = pos[:, :2].T check_mask = np.ones(npos, dtype=bool) check_mask[((((x < np.min(x)) | (y < np.min(y))) | (x > np.max(x))) | (y > np.max(y)))] = False critval = 0.1 sel = np.where(check_mask)[0] for this_sel in sel: contourx = (contour[:, 0] - pos[(this_sel, 0)]) contoury = (contour[:, 1] - pos[(this_sel, 1)]) angle = np.arctan2(contoury, contourx) angle = np.unwrap(angle) total = np.sum(np.diff(angle)) check_mask[this_sel] = (np.abs(total) > critval) return check_mask
[ "def", "_inside_contour", "(", "pos", ",", "contour", ")", ":", "npos", "=", "len", "(", "pos", ")", "(", "x", ",", "y", ")", "=", "pos", "[", ":", ",", ":", "2", "]", ".", "T", "check_mask", "=", "np", ".", "ones", "(", "npos", ",", "dtype", "=", "bool", ")", "check_mask", "[", "(", "(", "(", "(", "x", "<", "np", ".", "min", "(", "x", ")", ")", "|", "(", "y", "<", "np", ".", "min", "(", "y", ")", ")", ")", "|", "(", "x", ">", "np", ".", "max", "(", "x", ")", ")", ")", "|", "(", "y", ">", "np", ".", "max", "(", "y", ")", ")", ")", "]", "=", "False", "critval", "=", "0.1", "sel", "=", "np", ".", "where", "(", "check_mask", ")", "[", "0", "]", "for", "this_sel", "in", "sel", ":", "contourx", "=", "(", "contour", "[", ":", ",", "0", "]", "-", "pos", "[", "(", "this_sel", ",", "0", ")", "]", ")", "contoury", "=", "(", "contour", "[", ":", ",", "1", "]", "-", "pos", "[", "(", "this_sel", ",", "1", ")", "]", ")", "angle", "=", "np", ".", "arctan2", "(", "contoury", ",", "contourx", ")", "angle", "=", "np", ".", "unwrap", "(", "angle", ")", "total", "=", "np", ".", "sum", "(", "np", ".", "diff", "(", "angle", ")", ")", "check_mask", "[", "this_sel", "]", "=", "(", "np", ".", "abs", "(", "total", ")", ">", "critval", ")", "return", "check_mask" ]
check if points are inside a contour .
train
false
42,492
def memorized_ttinfo(*args): try: return _ttinfo_cache[args] except KeyError: ttinfo = (memorized_timedelta(args[0]), memorized_timedelta(args[1]), args[2]) _ttinfo_cache[args] = ttinfo return ttinfo
[ "def", "memorized_ttinfo", "(", "*", "args", ")", ":", "try", ":", "return", "_ttinfo_cache", "[", "args", "]", "except", "KeyError", ":", "ttinfo", "=", "(", "memorized_timedelta", "(", "args", "[", "0", "]", ")", ",", "memorized_timedelta", "(", "args", "[", "1", "]", ")", ",", "args", "[", "2", "]", ")", "_ttinfo_cache", "[", "args", "]", "=", "ttinfo", "return", "ttinfo" ]
create only one instance of each distinct tuple .
train
true
42,493
def worker_get(context, **filters): query = _worker_query(context, **filters) worker = (query.first() if query else None) if (not worker): raise exception.WorkerNotFound(**filters) return worker
[ "def", "worker_get", "(", "context", ",", "**", "filters", ")", ":", "query", "=", "_worker_query", "(", "context", ",", "**", "filters", ")", "worker", "=", "(", "query", ".", "first", "(", ")", "if", "query", "else", "None", ")", "if", "(", "not", "worker", ")", ":", "raise", "exception", ".", "WorkerNotFound", "(", "**", "filters", ")", "return", "worker" ]
get a worker or raise exception if it does not exist .
train
false
42,495
def get_systemmap(): map = ('/boot/System.map-%s' % os.uname()[2]) if os.path.isfile(map): return map map = ('/lib/modules/%s/build/System.map' % os.uname()[2]) if os.path.isfile(map): return map return None
[ "def", "get_systemmap", "(", ")", ":", "map", "=", "(", "'/boot/System.map-%s'", "%", "os", ".", "uname", "(", ")", "[", "2", "]", ")", "if", "os", ".", "path", ".", "isfile", "(", "map", ")", ":", "return", "map", "map", "=", "(", "'/lib/modules/%s/build/System.map'", "%", "os", ".", "uname", "(", ")", "[", "2", "]", ")", "if", "os", ".", "path", ".", "isfile", "(", "map", ")", ":", "return", "map", "return", "None" ]
return the full path to system .
train
false
42,496
def variables(i, o): return variables_and_orphans(i, o)[0]
[ "def", "variables", "(", "i", ",", "o", ")", ":", "return", "variables_and_orphans", "(", "i", ",", "o", ")", "[", "0", "]" ]
extracts list of variables within input and output nodes via dfs travesal parameters i : list input variables .
train
false
42,497
def pages_to_show(paginator, page, pages_wanted=None, max_pages_wanted=9): page = int(page) page_precedence_order = [page, 1, paginator.num_pages, (page + 1), (page - 1), (page + 2), (page - 2), 2, (paginator.num_pages - 1)] if (pages_wanted is None): pages_wanted = [] pages_wanted = (set(pages_wanted) or set(page_precedence_order[:max_pages_wanted])) pages_to_show = set(paginator.page_range).intersection(pages_wanted) pages_to_show = sorted(pages_to_show) skip_pages = [x[1] for x in zip(pages_to_show[:(-1)], pages_to_show[1:]) if ((x[1] - x[0]) != 1)] for i in skip_pages: pages_to_show.insert(pages_to_show.index(i), (-1)) return pages_to_show
[ "def", "pages_to_show", "(", "paginator", ",", "page", ",", "pages_wanted", "=", "None", ",", "max_pages_wanted", "=", "9", ")", ":", "page", "=", "int", "(", "page", ")", "page_precedence_order", "=", "[", "page", ",", "1", ",", "paginator", ".", "num_pages", ",", "(", "page", "+", "1", ")", ",", "(", "page", "-", "1", ")", ",", "(", "page", "+", "2", ")", ",", "(", "page", "-", "2", ")", ",", "2", ",", "(", "paginator", ".", "num_pages", "-", "1", ")", "]", "if", "(", "pages_wanted", "is", "None", ")", ":", "pages_wanted", "=", "[", "]", "pages_wanted", "=", "(", "set", "(", "pages_wanted", ")", "or", "set", "(", "page_precedence_order", "[", ":", "max_pages_wanted", "]", ")", ")", "pages_to_show", "=", "set", "(", "paginator", ".", "page_range", ")", ".", "intersection", "(", "pages_wanted", ")", "pages_to_show", "=", "sorted", "(", "pages_to_show", ")", "skip_pages", "=", "[", "x", "[", "1", "]", "for", "x", "in", "zip", "(", "pages_to_show", "[", ":", "(", "-", "1", ")", "]", ",", "pages_to_show", "[", "1", ":", "]", ")", "if", "(", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ")", "!=", "1", ")", "]", "for", "i", "in", "skip_pages", ":", "pages_to_show", ".", "insert", "(", "pages_to_show", ".", "index", "(", "i", ")", ",", "(", "-", "1", ")", ")", "return", "pages_to_show" ]
function to select first two pages .
train
false
42,498
@logic.validate(logic.schema.default_autocomplete_schema) def user_autocomplete(context, data_dict): model = context['model'] user = context['user'] _check_access('user_autocomplete', context, data_dict) q = data_dict['q'] limit = data_dict.get('limit', 20) query = model.User.search(q) query = query.filter((model.User.state != model.State.DELETED)) query = query.limit(limit) user_list = [] for user in query.all(): result_dict = {} for k in ['id', 'name', 'fullname']: result_dict[k] = getattr(user, k) user_list.append(result_dict) return user_list
[ "@", "logic", ".", "validate", "(", "logic", ".", "schema", ".", "default_autocomplete_schema", ")", "def", "user_autocomplete", "(", "context", ",", "data_dict", ")", ":", "model", "=", "context", "[", "'model'", "]", "user", "=", "context", "[", "'user'", "]", "_check_access", "(", "'user_autocomplete'", ",", "context", ",", "data_dict", ")", "q", "=", "data_dict", "[", "'q'", "]", "limit", "=", "data_dict", ".", "get", "(", "'limit'", ",", "20", ")", "query", "=", "model", ".", "User", ".", "search", "(", "q", ")", "query", "=", "query", ".", "filter", "(", "(", "model", ".", "User", ".", "state", "!=", "model", ".", "State", ".", "DELETED", ")", ")", "query", "=", "query", ".", "limit", "(", "limit", ")", "user_list", "=", "[", "]", "for", "user", "in", "query", ".", "all", "(", ")", ":", "result_dict", "=", "{", "}", "for", "k", "in", "[", "'id'", ",", "'name'", ",", "'fullname'", "]", ":", "result_dict", "[", "k", "]", "=", "getattr", "(", "user", ",", "k", ")", "user_list", ".", "append", "(", "result_dict", ")", "return", "user_list" ]
return a list of user names that contain a string .
train
false
42,499
def _iter(root, term): if (sys.version_info < (2, 7)): return root.getiterator(term) else: return root.iter(term)
[ "def", "_iter", "(", "root", ",", "term", ")", ":", "if", "(", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ")", ":", "return", "root", ".", "getiterator", "(", "term", ")", "else", ":", "return", "root", ".", "iter", "(", "term", ")" ]
checks for python2 .
train
true
42,500
def cartcat(s_list1, s_list2): prod = itertools.product(s_list1, s_list2) return map(partial(apply, operator.add), prod)
[ "def", "cartcat", "(", "s_list1", ",", "s_list2", ")", ":", "prod", "=", "itertools", ".", "product", "(", "s_list1", ",", "s_list2", ")", "return", "map", "(", "partial", "(", "apply", ",", "operator", ".", "add", ")", ",", "prod", ")" ]
given two lists of strings .
train
false
42,501
def rfftfreq(n, d=1.0): if ((not isinstance(n, int)) or (n < 0)): raise ValueError(('n = %s is not valid. n must be a nonnegative integer.' % n)) return ((arange(1, (n + 1), dtype=int) // 2) / float((n * d)))
[ "def", "rfftfreq", "(", "n", ",", "d", "=", "1.0", ")", ":", "if", "(", "(", "not", "isinstance", "(", "n", ",", "int", ")", ")", "or", "(", "n", "<", "0", ")", ")", ":", "raise", "ValueError", "(", "(", "'n = %s is not valid. n must be a nonnegative integer.'", "%", "n", ")", ")", "return", "(", "(", "arange", "(", "1", ",", "(", "n", "+", "1", ")", ",", "dtype", "=", "int", ")", "//", "2", ")", "/", "float", "(", "(", "n", "*", "d", ")", ")", ")" ]
dft sample frequencies .
train
false
42,502
def cm2deg(cm, monitor, correctFlat=False): if (not isinstance(monitor, monitors.Monitor)): msg = 'cm2deg requires a monitors.Monitor object as the second argument but received %s' raise ValueError((msg % str(type(monitor)))) dist = monitor.getDistance() if (dist is None): msg = 'Monitor %s has no known distance (SEE MONITOR CENTER)' raise ValueError((msg % monitor.name)) if correctFlat: return np.arctan(np.radians((cm / dist))) else: return (cm / (dist * 0.017455))
[ "def", "cm2deg", "(", "cm", ",", "monitor", ",", "correctFlat", "=", "False", ")", ":", "if", "(", "not", "isinstance", "(", "monitor", ",", "monitors", ".", "Monitor", ")", ")", ":", "msg", "=", "'cm2deg requires a monitors.Monitor object as the second argument but received %s'", "raise", "ValueError", "(", "(", "msg", "%", "str", "(", "type", "(", "monitor", ")", ")", ")", ")", "dist", "=", "monitor", ".", "getDistance", "(", ")", "if", "(", "dist", "is", "None", ")", ":", "msg", "=", "'Monitor %s has no known distance (SEE MONITOR CENTER)'", "raise", "ValueError", "(", "(", "msg", "%", "monitor", ".", "name", ")", ")", "if", "correctFlat", ":", "return", "np", ".", "arctan", "(", "np", ".", "radians", "(", "(", "cm", "/", "dist", ")", ")", ")", "else", ":", "return", "(", "cm", "/", "(", "dist", "*", "0.017455", ")", ")" ]
convert size in cm to size in degrees for a given monitor object .
train
false
42,505
def fetch_gcs(uuid): fetch_url = constants.UrlfetchTestIdentifiers.GCS_URL return fetch(fetch_url)
[ "def", "fetch_gcs", "(", "uuid", ")", ":", "fetch_url", "=", "constants", ".", "UrlfetchTestIdentifiers", ".", "GCS_URL", "return", "fetch", "(", "fetch_url", ")" ]
fetches an image from google cloud storage .
train
false
42,509
def from64(number): if (not ((type(number) is types.LongType) or (type(number) is types.IntType))): raise TypeError('You must pass a long or an int') if (48 <= number <= 57): return (number - 48) if (65 <= number <= 90): return (number - 55) if (97 <= number <= 122): return (number - 61) if (number == 45): return 62 if (number == 95): return 63 raise ValueError(('Invalid Base64 value: %i' % number))
[ "def", "from64", "(", "number", ")", ":", "if", "(", "not", "(", "(", "type", "(", "number", ")", "is", "types", ".", "LongType", ")", "or", "(", "type", "(", "number", ")", "is", "types", ".", "IntType", ")", ")", ")", ":", "raise", "TypeError", "(", "'You must pass a long or an int'", ")", "if", "(", "48", "<=", "number", "<=", "57", ")", ":", "return", "(", "number", "-", "48", ")", "if", "(", "65", "<=", "number", "<=", "90", ")", ":", "return", "(", "number", "-", "55", ")", "if", "(", "97", "<=", "number", "<=", "122", ")", ":", "return", "(", "number", "-", "61", ")", "if", "(", "number", "==", "45", ")", ":", "return", "62", "if", "(", "number", "==", "95", ")", ":", "return", "63", "raise", "ValueError", "(", "(", "'Invalid Base64 value: %i'", "%", "number", ")", ")" ]
converts an ordinal character value in the range of 0-9 .
train
false
42,510
def create_course_enrollment(username, course_id, mode, is_active): course_key = CourseKey.from_string(course_id) try: user = User.objects.get(username=username) except User.DoesNotExist: msg = u"Not user with username '{username}' found.".format(username=username) log.warn(msg) raise UserNotFoundError(msg) try: enrollment = CourseEnrollment.enroll(user, course_key, check_access=True) return _update_enrollment(enrollment, is_active=is_active, mode=mode) except NonExistentCourseError as err: raise CourseNotFoundError(err.message) except EnrollmentClosedError as err: raise CourseEnrollmentClosedError(err.message) except CourseFullError as err: raise CourseEnrollmentFullError(err.message) except AlreadyEnrolledError as err: enrollment = get_course_enrollment(username, course_id) raise CourseEnrollmentExistsError(err.message, enrollment)
[ "def", "create_course_enrollment", "(", "username", ",", "course_id", ",", "mode", ",", "is_active", ")", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "try", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "except", "User", ".", "DoesNotExist", ":", "msg", "=", "u\"Not user with username '{username}' found.\"", ".", "format", "(", "username", "=", "username", ")", "log", ".", "warn", "(", "msg", ")", "raise", "UserNotFoundError", "(", "msg", ")", "try", ":", "enrollment", "=", "CourseEnrollment", ".", "enroll", "(", "user", ",", "course_key", ",", "check_access", "=", "True", ")", "return", "_update_enrollment", "(", "enrollment", ",", "is_active", "=", "is_active", ",", "mode", "=", "mode", ")", "except", "NonExistentCourseError", "as", "err", ":", "raise", "CourseNotFoundError", "(", "err", ".", "message", ")", "except", "EnrollmentClosedError", "as", "err", ":", "raise", "CourseEnrollmentClosedError", "(", "err", ".", "message", ")", "except", "CourseFullError", "as", "err", ":", "raise", "CourseEnrollmentFullError", "(", "err", ".", "message", ")", "except", "AlreadyEnrolledError", "as", "err", ":", "enrollment", "=", "get_course_enrollment", "(", "username", ",", "course_id", ")", "raise", "CourseEnrollmentExistsError", "(", "err", ".", "message", ",", "enrollment", ")" ]
create a new course enrollment for the given user .
train
false
42,511
def modify_profiler(id, **data): models.Profiler.smart_get(id).update_object(data)
[ "def", "modify_profiler", "(", "id", ",", "**", "data", ")", ":", "models", ".", "Profiler", ".", "smart_get", "(", "id", ")", ".", "update_object", "(", "data", ")" ]
modify profiler .
train
false
42,512
def _handle_sort_key(model_name, sort_key=None): sort_keys_extra = {'meter': ['user_id', 'project_id'], 'resource': ['user_id', 'project_id', 'timestamp']} sort_keys = sort_keys_extra[model_name] if (not sort_key): return sort_keys try: sort_keys.remove(sort_key) except ValueError: pass finally: sort_keys.insert(0, sort_key) return sort_keys
[ "def", "_handle_sort_key", "(", "model_name", ",", "sort_key", "=", "None", ")", ":", "sort_keys_extra", "=", "{", "'meter'", ":", "[", "'user_id'", ",", "'project_id'", "]", ",", "'resource'", ":", "[", "'user_id'", ",", "'project_id'", ",", "'timestamp'", "]", "}", "sort_keys", "=", "sort_keys_extra", "[", "model_name", "]", "if", "(", "not", "sort_key", ")", ":", "return", "sort_keys", "try", ":", "sort_keys", ".", "remove", "(", "sort_key", ")", "except", "ValueError", ":", "pass", "finally", ":", "sort_keys", ".", "insert", "(", "0", ",", "sort_key", ")", "return", "sort_keys" ]
generate sort keys according to the passed in sort key from user .
train
false
42,513
def cmp_structure(llst, rlst, lslices, rslices): lshape = slice_shape(llst, lslices) rshape = slice_shape(rlst, rslices) if (len(lshape) != len(rshape)): return (-1) for i in range(len(lshape)): if (lshape[i] != rshape[i]): return (-1) if (lshape[i] == 0): return 0 return 0
[ "def", "cmp_structure", "(", "llst", ",", "rlst", ",", "lslices", ",", "rslices", ")", ":", "lshape", "=", "slice_shape", "(", "llst", ",", "lslices", ")", "rshape", "=", "slice_shape", "(", "rlst", ",", "rslices", ")", "if", "(", "len", "(", "lshape", ")", "!=", "len", "(", "rshape", ")", ")", ":", "return", "(", "-", "1", ")", "for", "i", "in", "range", "(", "len", "(", "lshape", ")", ")", ":", "if", "(", "lshape", "[", "i", "]", "!=", "rshape", "[", "i", "]", ")", ":", "return", "(", "-", "1", ")", "if", "(", "lshape", "[", "i", "]", "==", "0", ")", ":", "return", "0", "return", "0" ]
compare the structure of llst[lslices] and rlst[rslices] .
train
false
42,515
def _ll_nbt(y, X, beta, alph, C=0): Q = 0 mu = np.exp(np.dot(X, beta)) size = ((1 / alph) * (mu ** Q)) prob = (size / (size + mu)) ll = (nbinom.logpmf(y, size, prob) - np.log((1 - nbinom.cdf(C, size, prob)))) return ll
[ "def", "_ll_nbt", "(", "y", ",", "X", ",", "beta", ",", "alph", ",", "C", "=", "0", ")", ":", "Q", "=", "0", "mu", "=", "np", ".", "exp", "(", "np", ".", "dot", "(", "X", ",", "beta", ")", ")", "size", "=", "(", "(", "1", "/", "alph", ")", "*", "(", "mu", "**", "Q", ")", ")", "prob", "=", "(", "size", "/", "(", "size", "+", "mu", ")", ")", "ll", "=", "(", "nbinom", ".", "logpmf", "(", "y", ",", "size", ",", "prob", ")", "-", "np", ".", "log", "(", "(", "1", "-", "nbinom", ".", "cdf", "(", "C", ",", "size", ",", "prob", ")", ")", ")", ")", "return", "ll" ]
negative binomial truncated densities for count models : .
train
false
42,516
def get_daterange_or_404(start, end): dates = DateForm(data={'start': start, 'end': end}) if (not dates.is_valid()): logger.info('Dates parsed were not valid.') raise http.Http404 return (dates.cleaned_data['start'], dates.cleaned_data['end'])
[ "def", "get_daterange_or_404", "(", "start", ",", "end", ")", ":", "dates", "=", "DateForm", "(", "data", "=", "{", "'start'", ":", "start", ",", "'end'", ":", "end", "}", ")", "if", "(", "not", "dates", ".", "is_valid", "(", ")", ")", ":", "logger", ".", "info", "(", "'Dates parsed were not valid.'", ")", "raise", "http", ".", "Http404", "return", "(", "dates", ".", "cleaned_data", "[", "'start'", "]", ",", "dates", ".", "cleaned_data", "[", "'end'", "]", ")" ]
parse and validate a pair of yyyymmdd date strings .
train
false
42,517
def flag_default(name): return constants.CLI_DEFAULTS[name]
[ "def", "flag_default", "(", "name", ")", ":", "return", "constants", ".", "CLI_DEFAULTS", "[", "name", "]" ]
default value for cli flag .
train
false
42,518
def to_utc(time_str): return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
[ "def", "to_utc", "(", "time_str", ")", ":", "return", "pd", ".", "Timestamp", "(", "time_str", ",", "tz", "=", "'US/Eastern'", ")", ".", "tz_convert", "(", "'UTC'", ")" ]
convert a string in us/eastern time to utc .
train
false
42,521
def find_binary_iter(name, path_to_bin=None, env_vars=(), searchpath=(), binary_names=None, url=None, verbose=False): for file in find_file_iter((path_to_bin or name), env_vars, searchpath, binary_names, url, verbose): (yield file)
[ "def", "find_binary_iter", "(", "name", ",", "path_to_bin", "=", "None", ",", "env_vars", "=", "(", ")", ",", "searchpath", "=", "(", ")", ",", "binary_names", "=", "None", ",", "url", "=", "None", ",", "verbose", "=", "False", ")", ":", "for", "file", "in", "find_file_iter", "(", "(", "path_to_bin", "or", "name", ")", ",", "env_vars", ",", "searchpath", ",", "binary_names", ",", "url", ",", "verbose", ")", ":", "(", "yield", "file", ")" ]
search for a file to be used by nltk .
train
false
42,522
def permalink(func): from django.core.urlresolvers import reverse def inner(*args, **kwargs): bits = func(*args, **kwargs) return reverse(bits[0], None, *bits[1:3]) return inner
[ "def", "permalink", "(", "func", ")", ":", "from", "django", ".", "core", ".", "urlresolvers", "import", "reverse", "def", "inner", "(", "*", "args", ",", "**", "kwargs", ")", ":", "bits", "=", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "reverse", "(", "bits", "[", "0", "]", ",", "None", ",", "*", "bits", "[", "1", ":", "3", "]", ")", "return", "inner" ]
decorator that calls urlresolvers .
train
false
42,523
def base_boxes(): return sorted(list(set([name for (name, provider) in _box_list()])))
[ "def", "base_boxes", "(", ")", ":", "return", "sorted", "(", "list", "(", "set", "(", "[", "name", "for", "(", "name", ",", "provider", ")", "in", "_box_list", "(", ")", "]", ")", ")", ")" ]
get the list of vagrant base boxes .
train
true
42,524
def get_cached_discussion_id_map(course, discussion_ids, user): try: entries = [] for discussion_id in discussion_ids: key = get_cached_discussion_key(course.id, discussion_id) if (not key): continue xblock = modulestore().get_item(key) if (not (has_required_keys(xblock) and has_access(user, 'load', xblock, course.id))): continue entries.append(get_discussion_id_map_entry(xblock)) return dict(entries) except DiscussionIdMapIsNotCached: return get_discussion_id_map(course, user)
[ "def", "get_cached_discussion_id_map", "(", "course", ",", "discussion_ids", ",", "user", ")", ":", "try", ":", "entries", "=", "[", "]", "for", "discussion_id", "in", "discussion_ids", ":", "key", "=", "get_cached_discussion_key", "(", "course", ".", "id", ",", "discussion_id", ")", "if", "(", "not", "key", ")", ":", "continue", "xblock", "=", "modulestore", "(", ")", ".", "get_item", "(", "key", ")", "if", "(", "not", "(", "has_required_keys", "(", "xblock", ")", "and", "has_access", "(", "user", ",", "'load'", ",", "xblock", ",", "course", ".", "id", ")", ")", ")", ":", "continue", "entries", ".", "append", "(", "get_discussion_id_map_entry", "(", "xblock", ")", ")", "return", "dict", "(", "entries", ")", "except", "DiscussionIdMapIsNotCached", ":", "return", "get_discussion_id_map", "(", "course", ",", "user", ")" ]
returns a dict mapping discussion_ids to respective discussion xblock metadata if it is cached and visible to the user .
train
false
42,525
@pytest.mark.cmd def test_pootle_version(capfd): call(['pootle', '--version']) (out, err) = capfd.readouterr() assert ('Pootle' in err) assert ('Django' in err) assert ('Translate Toolkit' in err)
[ "@", "pytest", ".", "mark", ".", "cmd", "def", "test_pootle_version", "(", "capfd", ")", ":", "call", "(", "[", "'pootle'", ",", "'--version'", "]", ")", "(", "out", ",", "err", ")", "=", "capfd", ".", "readouterr", "(", ")", "assert", "(", "'Pootle'", "in", "err", ")", "assert", "(", "'Django'", "in", "err", ")", "assert", "(", "'Translate Toolkit'", "in", "err", ")" ]
display pootle version info .
train
false
42,527
def find_external_links(url, page): for match in REL.finditer(page): (tag, rel) = match.groups() rels = map(str.strip, rel.lower().split(',')) if (('homepage' in rels) or ('download' in rels)): for match in HREF.finditer(tag): (yield urlparse.urljoin(url, htmldecode(match.group(1)))) for tag in ('<th>Home Page', '<th>Download URL'): pos = page.find(tag) if (pos != (-1)): match = HREF.search(page, pos) if match: (yield urlparse.urljoin(url, htmldecode(match.group(1))))
[ "def", "find_external_links", "(", "url", ",", "page", ")", ":", "for", "match", "in", "REL", ".", "finditer", "(", "page", ")", ":", "(", "tag", ",", "rel", ")", "=", "match", ".", "groups", "(", ")", "rels", "=", "map", "(", "str", ".", "strip", ",", "rel", ".", "lower", "(", ")", ".", "split", "(", "','", ")", ")", "if", "(", "(", "'homepage'", "in", "rels", ")", "or", "(", "'download'", "in", "rels", ")", ")", ":", "for", "match", "in", "HREF", ".", "finditer", "(", "tag", ")", ":", "(", "yield", "urlparse", ".", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")", ")", "for", "tag", "in", "(", "'<th>Home Page'", ",", "'<th>Download URL'", ")", ":", "pos", "=", "page", ".", "find", "(", "tag", ")", "if", "(", "pos", "!=", "(", "-", "1", ")", ")", ":", "match", "=", "HREF", ".", "search", "(", "page", ",", "pos", ")", "if", "match", ":", "(", "yield", "urlparse", ".", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")", ")" ]
find rel="homepage" and rel="download" links in page .
train
true
42,529
def featured_map_info(request, site): map_obj = resolve_object(request, Map, {'featuredurl': site}, permission='base.view_resourcebase', permission_msg=_PERMISSION_MSG_VIEW) return map_detail(request, str(map_obj.id))
[ "def", "featured_map_info", "(", "request", ",", "site", ")", ":", "map_obj", "=", "resolve_object", "(", "request", ",", "Map", ",", "{", "'featuredurl'", ":", "site", "}", ",", "permission", "=", "'base.view_resourcebase'", ",", "permission_msg", "=", "_PERMISSION_MSG_VIEW", ")", "return", "map_detail", "(", "request", ",", "str", "(", "map_obj", ".", "id", ")", ")" ]
main view for map resources .
train
false