id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
7,275
def test_string_literals(): source = dedent("\n x = ur''' \n\n def foo():\n pass\n ") script = jedi.Script(dedent(source)) (script._get_module().end_pos == (6, 0)) assert script.completions()
[ "def", "test_string_literals", "(", ")", ":", "source", "=", "dedent", "(", "\"\\n x = ur''' \\n\\n def foo():\\n pass\\n \"", ")", "script", "=", "jedi", ".", "Script", "(", "dedent", "(", "source", ")", ")", "(", "script", ".", "_get_module", "(", ")", ".", "end_pos", "==", "(", "6", ",", "0", ")", ")", "assert", "script", ".", "completions", "(", ")" ]
simplified case of jedi-vim#377 .
train
false
7,276
def patch_exploration_search_document(exp_id, update): doc = search_services.get_document_from_index(exp_id, SEARCH_INDEX_EXPLORATIONS) doc.update(update) search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
[ "def", "patch_exploration_search_document", "(", "exp_id", ",", "update", ")", ":", "doc", "=", "search_services", ".", "get_document_from_index", "(", "exp_id", ",", "SEARCH_INDEX_EXPLORATIONS", ")", "doc", ".", "update", "(", "update", ")", "search_services", ".", "add_documents_to_index", "(", "[", "doc", "]", ",", "SEARCH_INDEX_EXPLORATIONS", ")" ]
patches an explorations current search document .
train
false
7,277
def _handle_post(gs_stub, filename, headers): content_type = _ContentType(headers) token = gs_stub.post_start_creation(filename, headers) response_headers = {'location': ('https://storage.googleapis.com/%s?%s' % (filename, urllib.urlencode({'upload_id': token}))), 'content-type': content_type.value, 'content-length': 0} return _FakeUrlFetchResult(201, response_headers, '')
[ "def", "_handle_post", "(", "gs_stub", ",", "filename", ",", "headers", ")", ":", "content_type", "=", "_ContentType", "(", "headers", ")", "token", "=", "gs_stub", ".", "post_start_creation", "(", "filename", ",", "headers", ")", "response_headers", "=", "{", "'location'", ":", "(", "'https://storage.googleapis.com/%s?%s'", "%", "(", "filename", ",", "urllib", ".", "urlencode", "(", "{", "'upload_id'", ":", "token", "}", ")", ")", ")", ",", "'content-type'", ":", "content_type", ".", "value", ",", "'content-length'", ":", "0", "}", "return", "_FakeUrlFetchResult", "(", "201", ",", "response_headers", ",", "''", ")" ]
handle post that starts object creation .
train
false
7,278
def getsitepackages(): sitepackages = [] seen = set() for prefix in PREFIXES: if ((not prefix) or (prefix in seen)): continue seen.add(prefix) if (sys.platform in ('os2emx', 'riscos')): sitepackages.append(os.path.join(prefix, 'Lib', 'site-packages')) elif (os.sep == '/'): sitepackages.append(os.path.join(prefix, 'lib', ('python' + sys.version[:3]), 'site-packages')) sitepackages.append(os.path.join(prefix, 'lib', 'site-python')) else: sitepackages.append(prefix) sitepackages.append(os.path.join(prefix, 'lib', 'site-packages')) if (sys.platform == 'darwin'): from sysconfig import get_config_var framework = get_config_var('PYTHONFRAMEWORK') if framework: sitepackages.append(os.path.join('/Library', framework, sys.version[:3], 'site-packages')) return sitepackages
[ "def", "getsitepackages", "(", ")", ":", "sitepackages", "=", "[", "]", "seen", "=", "set", "(", ")", "for", "prefix", "in", "PREFIXES", ":", "if", "(", "(", "not", "prefix", ")", "or", "(", "prefix", "in", "seen", ")", ")", ":", "continue", "seen", ".", "add", "(", "prefix", ")", "if", "(", "sys", ".", "platform", "in", "(", "'os2emx'", ",", "'riscos'", ")", ")", ":", "sitepackages", ".", "append", "(", "os", ".", "path", ".", "join", "(", "prefix", ",", "'Lib'", ",", "'site-packages'", ")", ")", "elif", "(", "os", ".", "sep", "==", "'/'", ")", ":", "sitepackages", ".", "append", "(", "os", ".", "path", ".", "join", "(", "prefix", ",", "'lib'", ",", "(", "'python'", "+", "sys", ".", "version", "[", ":", "3", "]", ")", ",", "'site-packages'", ")", ")", "sitepackages", ".", "append", "(", "os", ".", "path", ".", "join", "(", "prefix", ",", "'lib'", ",", "'site-python'", ")", ")", "else", ":", "sitepackages", ".", "append", "(", "prefix", ")", "sitepackages", ".", "append", "(", "os", ".", "path", ".", "join", "(", "prefix", ",", "'lib'", ",", "'site-packages'", ")", ")", "if", "(", "sys", ".", "platform", "==", "'darwin'", ")", ":", "from", "sysconfig", "import", "get_config_var", "framework", "=", "get_config_var", "(", "'PYTHONFRAMEWORK'", ")", "if", "framework", ":", "sitepackages", ".", "append", "(", "os", ".", "path", ".", "join", "(", "'/Library'", ",", "framework", ",", "sys", ".", "version", "[", ":", "3", "]", ",", "'site-packages'", ")", ")", "return", "sitepackages" ]
returns a list containing all global site-packages directories .
train
true
7,280
def step_1a(w): if w.endswith('s'): if w.endswith('sses'): return w[:(-2)] if w.endswith('ies'): return (((len(w) == 4) and w[:(-1)]) or w[:(-2)]) if w.endswith(('us', 'ss')): return w if (find_vowel(w) < (len(w) - 2)): return w[:(-1)] return w
[ "def", "step_1a", "(", "w", ")", ":", "if", "w", ".", "endswith", "(", "'s'", ")", ":", "if", "w", ".", "endswith", "(", "'sses'", ")", ":", "return", "w", "[", ":", "(", "-", "2", ")", "]", "if", "w", ".", "endswith", "(", "'ies'", ")", ":", "return", "(", "(", "(", "len", "(", "w", ")", "==", "4", ")", "and", "w", "[", ":", "(", "-", "1", ")", "]", ")", "or", "w", "[", ":", "(", "-", "2", ")", "]", ")", "if", "w", ".", "endswith", "(", "(", "'us'", ",", "'ss'", ")", ")", ":", "return", "w", "if", "(", "find_vowel", "(", "w", ")", "<", "(", "len", "(", "w", ")", "-", "2", ")", ")", ":", "return", "w", "[", ":", "(", "-", "1", ")", "]", "return", "w" ]
step 1a handles -s suffixes .
train
false
7,281
def forms(): from s3 import S3XForms if (request.env.request_method != 'GET'): raise HTTP(405, current.ERROR.BAD_METHOD) args = request.args if len(args): tablename = args[0] if ('.' in tablename): (tablename, extension) = tablename.split('.', 1) else: extension = 'xhtml' try: (prefix, name) = tablename.split('_', 1) except ValueError: raise HTTP(404, current.error.BAD_RESOURCE) method = [('xform.%s' % extension)] if (len(args) > 1): method.insert(0, args[1]) r = s3_request(prefix, name, args=method, extension=None) r.set_handler('xform', S3XForms) output = r() else: xforms = S3XForms.formlist() if (not xforms): raise HTTP(404, current.T('No XForms configured on this server')) formlist = TAG.forms() for (url, title) in xforms: formlist.append(TAG.form(title, _url=url)) response.headers['Content-Type'] = 'text/xml' response.view = 'xforms/formlist.xml' output = {'formlist': formlist} return output
[ "def", "forms", "(", ")", ":", "from", "s3", "import", "S3XForms", "if", "(", "request", ".", "env", ".", "request_method", "!=", "'GET'", ")", ":", "raise", "HTTP", "(", "405", ",", "current", ".", "ERROR", ".", "BAD_METHOD", ")", "args", "=", "request", ".", "args", "if", "len", "(", "args", ")", ":", "tablename", "=", "args", "[", "0", "]", "if", "(", "'.'", "in", "tablename", ")", ":", "(", "tablename", ",", "extension", ")", "=", "tablename", ".", "split", "(", "'.'", ",", "1", ")", "else", ":", "extension", "=", "'xhtml'", "try", ":", "(", "prefix", ",", "name", ")", "=", "tablename", ".", "split", "(", "'_'", ",", "1", ")", "except", "ValueError", ":", "raise", "HTTP", "(", "404", ",", "current", ".", "error", ".", "BAD_RESOURCE", ")", "method", "=", "[", "(", "'xform.%s'", "%", "extension", ")", "]", "if", "(", "len", "(", "args", ")", ">", "1", ")", ":", "method", ".", "insert", "(", "0", ",", "args", "[", "1", "]", ")", "r", "=", "s3_request", "(", "prefix", ",", "name", ",", "args", "=", "method", ",", "extension", "=", "None", ")", "r", ".", "set_handler", "(", "'xform'", ",", "S3XForms", ")", "output", "=", "r", "(", ")", "else", ":", "xforms", "=", "S3XForms", ".", "formlist", "(", ")", "if", "(", "not", "xforms", ")", ":", "raise", "HTTP", "(", "404", ",", "current", ".", "T", "(", "'No XForms configured on this server'", ")", ")", "formlist", "=", "TAG", ".", "forms", "(", ")", "for", "(", "url", ",", "title", ")", "in", "xforms", ":", "formlist", ".", "append", "(", "TAG", ".", "form", "(", "title", ",", "_url", "=", "url", ")", ")", "response", ".", "headers", "[", "'Content-Type'", "]", "=", "'text/xml'", "response", ".", "view", "=", "'xforms/formlist.xml'", "output", "=", "{", "'formlist'", ":", "formlist", "}", "return", "output" ]
controller to download a list of available forms .
train
false
7,282
def MailEntryFromString(xml_string): return atom.CreateClassFromXMLString(MailEntry, xml_string)
[ "def", "MailEntryFromString", "(", "xml_string", ")", ":", "return", "atom", ".", "CreateClassFromXMLString", "(", "MailEntry", ",", "xml_string", ")" ]
parse in the mailentry from the xml definition .
train
false
7,283
def google_art(album): if (not (album.albumartist and album.album)): return search_string = ((album.albumartist + ',') + album.album).encode('utf-8') response = requests_session.get(GOOGLE_URL, params={'v': '1.0', 'q': search_string, 'start': '0'}) try: results = response.json() data = results['responseData'] dataInfo = data['results'] for myUrl in dataInfo: (yield myUrl['unescapedUrl']) except: log.debug(u'fetchart: error scraping art page') return
[ "def", "google_art", "(", "album", ")", ":", "if", "(", "not", "(", "album", ".", "albumartist", "and", "album", ".", "album", ")", ")", ":", "return", "search_string", "=", "(", "(", "album", ".", "albumartist", "+", "','", ")", "+", "album", ".", "album", ")", ".", "encode", "(", "'utf-8'", ")", "response", "=", "requests_session", ".", "get", "(", "GOOGLE_URL", ",", "params", "=", "{", "'v'", ":", "'1.0'", ",", "'q'", ":", "search_string", ",", "'start'", ":", "'0'", "}", ")", "try", ":", "results", "=", "response", ".", "json", "(", ")", "data", "=", "results", "[", "'responseData'", "]", "dataInfo", "=", "data", "[", "'results'", "]", "for", "myUrl", "in", "dataInfo", ":", "(", "yield", "myUrl", "[", "'unescapedUrl'", "]", ")", "except", ":", "log", ".", "debug", "(", "u'fetchart: error scraping art page'", ")", "return" ]
return art url from google .
train
false
7,284
def collection_set_options(collection_name, options, **kwargs): for option in list(options.keys()): if (option not in CREATION_ONLY_OPTION): raise ValueError((('Option ' + option) + " can't be modified after collection creation.")) options_string = _validate_collection_options(options) _query((('admin/collections?action=MODIFYCOLLECTION&wt=json&collection=' + collection_name) + options_string), **kwargs)
[ "def", "collection_set_options", "(", "collection_name", ",", "options", ",", "**", "kwargs", ")", ":", "for", "option", "in", "list", "(", "options", ".", "keys", "(", ")", ")", ":", "if", "(", "option", "not", "in", "CREATION_ONLY_OPTION", ")", ":", "raise", "ValueError", "(", "(", "(", "'Option '", "+", "option", ")", "+", "\" can't be modified after collection creation.\"", ")", ")", "options_string", "=", "_validate_collection_options", "(", "options", ")", "_query", "(", "(", "(", "'admin/collections?action=MODIFYCOLLECTION&wt=json&collection='", "+", "collection_name", ")", "+", "options_string", ")", ",", "**", "kwargs", ")" ]
change collection options additional parameters may be passed .
train
true
7,285
def get_coursetalk_course_key(course_key): return u'{0.org}_{0.course}'.format(course_key)
[ "def", "get_coursetalk_course_key", "(", "course_key", ")", ":", "return", "u'{0.org}_{0.course}'", ".", "format", "(", "course_key", ")" ]
return course key for coursetalk widget coursetalk unique key for a course contains only organization and course code .
train
false
7,286
def sorter(default_sort_id, kwd): SortSpec = namedtuple('SortSpec', ['sort_id', 'order', 'arrow', 'exc_order']) sort_id = kwd.get('sort_id') order = kwd.get('order') if (sort_id == 'default'): sort_id = default_sort_id if (order == 'asc'): _order = sa.asc(sort_id) elif (order == 'desc'): _order = sa.desc(sort_id) else: order = 'desc' _order = sa.desc(sort_id) up_arrow = '&#x2191;' down_arrow = '&#x2193;' arrow = ' ' if (order == 'asc'): arrow += down_arrow else: arrow += up_arrow return SortSpec(sort_id, order, arrow, _order)
[ "def", "sorter", "(", "default_sort_id", ",", "kwd", ")", ":", "SortSpec", "=", "namedtuple", "(", "'SortSpec'", ",", "[", "'sort_id'", ",", "'order'", ",", "'arrow'", ",", "'exc_order'", "]", ")", "sort_id", "=", "kwd", ".", "get", "(", "'sort_id'", ")", "order", "=", "kwd", ".", "get", "(", "'order'", ")", "if", "(", "sort_id", "==", "'default'", ")", ":", "sort_id", "=", "default_sort_id", "if", "(", "order", "==", "'asc'", ")", ":", "_order", "=", "sa", ".", "asc", "(", "sort_id", ")", "elif", "(", "order", "==", "'desc'", ")", ":", "_order", "=", "sa", ".", "desc", "(", "sort_id", ")", "else", ":", "order", "=", "'desc'", "_order", "=", "sa", ".", "desc", "(", "sort_id", ")", "up_arrow", "=", "'&#x2191;'", "down_arrow", "=", "'&#x2193;'", "arrow", "=", "' '", "if", "(", "order", "==", "'asc'", ")", ":", "arrow", "+=", "down_arrow", "else", ":", "arrow", "+=", "up_arrow", "return", "SortSpec", "(", "sort_id", ",", "order", ",", "arrow", ",", "_order", ")" ]
initialize sorting variables .
train
false
7,288
def dmp_deflate(f, u, K): if dmp_zero_p(f, u): return (((1,) * (u + 1)), f) F = dmp_to_dict(f, u) B = ([0] * (u + 1)) for M in F.keys(): for (i, m) in enumerate(M): B[i] = igcd(B[i], m) for (i, b) in enumerate(B): if (not b): B[i] = 1 B = tuple(B) if all(((b == 1) for b in B)): return (B, f) H = {} for (A, coeff) in F.items(): N = [(a // b) for (a, b) in zip(A, B)] H[tuple(N)] = coeff return (B, dmp_from_dict(H, u, K))
[ "def", "dmp_deflate", "(", "f", ",", "u", ",", "K", ")", ":", "if", "dmp_zero_p", "(", "f", ",", "u", ")", ":", "return", "(", "(", "(", "1", ",", ")", "*", "(", "u", "+", "1", ")", ")", ",", "f", ")", "F", "=", "dmp_to_dict", "(", "f", ",", "u", ")", "B", "=", "(", "[", "0", "]", "*", "(", "u", "+", "1", ")", ")", "for", "M", "in", "F", ".", "keys", "(", ")", ":", "for", "(", "i", ",", "m", ")", "in", "enumerate", "(", "M", ")", ":", "B", "[", "i", "]", "=", "igcd", "(", "B", "[", "i", "]", ",", "m", ")", "for", "(", "i", ",", "b", ")", "in", "enumerate", "(", "B", ")", ":", "if", "(", "not", "b", ")", ":", "B", "[", "i", "]", "=", "1", "B", "=", "tuple", "(", "B", ")", "if", "all", "(", "(", "(", "b", "==", "1", ")", "for", "b", "in", "B", ")", ")", ":", "return", "(", "B", ",", "f", ")", "H", "=", "{", "}", "for", "(", "A", ",", "coeff", ")", "in", "F", ".", "items", "(", ")", ":", "N", "=", "[", "(", "a", "//", "b", ")", "for", "(", "a", ",", "b", ")", "in", "zip", "(", "A", ",", "B", ")", "]", "H", "[", "tuple", "(", "N", ")", "]", "=", "coeff", "return", "(", "B", ",", "dmp_from_dict", "(", "H", ",", "u", ",", "K", ")", ")" ]
map x_i**m_i to y_i in a polynomial in k[x] .
train
false
7,291
def monomial_deg(M): return sum(M)
[ "def", "monomial_deg", "(", "M", ")", ":", "return", "sum", "(", "M", ")" ]
returns the total degree of a monomial .
train
false
7,292
def _get_node_creation_time(node): date_string = node.extra.get('created', node.extra.get('launch_time')) if (date_string is None): return None else: return parse_date(date_string)
[ "def", "_get_node_creation_time", "(", "node", ")", ":", "date_string", "=", "node", ".", "extra", ".", "get", "(", "'created'", ",", "node", ".", "extra", ".", "get", "(", "'launch_time'", ")", ")", "if", "(", "date_string", "is", "None", ")", ":", "return", "None", "else", ":", "return", "parse_date", "(", "date_string", ")" ]
get the creation time of a libcloud node .
train
false
7,293
def _create_wx_app(): wxapp = wx.GetApp() if (wxapp is None): wxapp = wx.PySimpleApp() wxapp.SetExitOnFrameDelete(True) _create_wx_app.theWxApp = wxapp
[ "def", "_create_wx_app", "(", ")", ":", "wxapp", "=", "wx", ".", "GetApp", "(", ")", "if", "(", "wxapp", "is", "None", ")", ":", "wxapp", "=", "wx", ".", "PySimpleApp", "(", ")", "wxapp", ".", "SetExitOnFrameDelete", "(", "True", ")", "_create_wx_app", ".", "theWxApp", "=", "wxapp" ]
creates a wx .
train
false
7,294
def test_bc_fit_single_class(): ratio = 'auto' bc = BalanceCascade(ratio=ratio, random_state=RND_SEED) y_single_class = np.zeros((X.shape[0],)) assert_warns(UserWarning, bc.fit, X, y_single_class)
[ "def", "test_bc_fit_single_class", "(", ")", ":", "ratio", "=", "'auto'", "bc", "=", "BalanceCascade", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "y_single_class", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", ")", ")", "assert_warns", "(", "UserWarning", ",", "bc", ".", "fit", ",", "X", ",", "y_single_class", ")" ]
test either if an error when there is a single class .
train
false
7,295
def propset_dict(propset): if (propset is None): return {} return {prop.name: prop.val for prop in propset}
[ "def", "propset_dict", "(", "propset", ")", ":", "if", "(", "propset", "is", "None", ")", ":", "return", "{", "}", "return", "{", "prop", ".", "name", ":", "prop", ".", "val", "for", "prop", "in", "propset", "}" ]
turn a propset list into a dictionary propset is an optional attribute on objectcontent objects that are returned by the vmware api .
train
false
7,296
def set_harddisk_sleep(minutes): value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated(str(value), get_harddisk_sleep)
[ "def", "set_harddisk_sleep", "(", "minutes", ")", ":", "value", "=", "_validate_sleep", "(", "minutes", ")", "cmd", "=", "'systemsetup -setharddisksleep {0}'", ".", "format", "(", "value", ")", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "confirm_updated", "(", "str", "(", "value", ")", ",", "get_harddisk_sleep", ")" ]
set the amount of idle time until the harddisk sleeps .
train
true
7,297
def _rec_eval_tail(g, i, A, u, K): if (i == u): return dup_eval(g, A[(-1)], K) else: h = [_rec_eval_tail(c, (i + 1), A, u, K) for c in g] if (i < ((u - len(A)) + 1)): return h else: return dup_eval(h, A[(((- u) + i) - 1)], K)
[ "def", "_rec_eval_tail", "(", "g", ",", "i", ",", "A", ",", "u", ",", "K", ")", ":", "if", "(", "i", "==", "u", ")", ":", "return", "dup_eval", "(", "g", ",", "A", "[", "(", "-", "1", ")", "]", ",", "K", ")", "else", ":", "h", "=", "[", "_rec_eval_tail", "(", "c", ",", "(", "i", "+", "1", ")", ",", "A", ",", "u", ",", "K", ")", "for", "c", "in", "g", "]", "if", "(", "i", "<", "(", "(", "u", "-", "len", "(", "A", ")", ")", "+", "1", ")", ")", ":", "return", "h", "else", ":", "return", "dup_eval", "(", "h", ",", "A", "[", "(", "(", "(", "-", "u", ")", "+", "i", ")", "-", "1", ")", "]", ",", "K", ")" ]
recursive helper for :func:dmp_eval_tail .
train
false
7,300
def _make_nxm_w(*args, **kw): t = _fix_types(kw.pop('type', _nxm_maskable_numeric_entry)) ok = False for tt in t: if _issubclass(tt, _nxm_maskable): ok = True break if (not ok): t.insert(0, _nxm_maskable) return _make_nxm(type=t, *args, **kw)
[ "def", "_make_nxm_w", "(", "*", "args", ",", "**", "kw", ")", ":", "t", "=", "_fix_types", "(", "kw", ".", "pop", "(", "'type'", ",", "_nxm_maskable_numeric_entry", ")", ")", "ok", "=", "False", "for", "tt", "in", "t", ":", "if", "_issubclass", "(", "tt", ",", "_nxm_maskable", ")", ":", "ok", "=", "True", "break", "if", "(", "not", "ok", ")", ":", "t", ".", "insert", "(", "0", ",", "_nxm_maskable", ")", "return", "_make_nxm", "(", "type", "=", "t", ",", "*", "args", ",", "**", "kw", ")" ]
make a simple wildcarded nxm entry class .
train
false
7,302
def _set_polling_backend(fn): global _polling_backend _polling_backend = fn
[ "def", "_set_polling_backend", "(", "fn", ")", ":", "global", "_polling_backend", "_polling_backend", "=", "fn" ]
internal api .
train
false
7,303
def test_array_input_assignment(line_builder_array): assert (line_builder_array.y.selection == 'value')
[ "def", "test_array_input_assignment", "(", "line_builder_array", ")", ":", "assert", "(", "line_builder_array", ".", "y", ".", "selection", "==", "'value'", ")" ]
make sure array input is derived to a measurement name .
train
false
7,305
def can_represent_dtype(dtype): return ((dtype in REPRESENTABLE_DTYPES) or (dtype.kind in STRING_KINDS))
[ "def", "can_represent_dtype", "(", "dtype", ")", ":", "return", "(", "(", "dtype", "in", "REPRESENTABLE_DTYPES", ")", "or", "(", "dtype", ".", "kind", "in", "STRING_KINDS", ")", ")" ]
can we build an adjustedarray for a baseline of dtype? .
train
false
7,306
def _fastq_sanger_convert_fastq_illumina(in_handle, out_handle, alphabet=None): trunc_char = chr(1) mapping = ''.join(((([chr(0) for ascii in range(0, 33)] + [chr((64 + q)) for q in range(0, (62 + 1))]) + [trunc_char for ascii in range(96, 127)]) + [chr(0) for ascii in range(127, 256)])) assert (len(mapping) == 256) return _fastq_generic2(in_handle, out_handle, mapping, trunc_char, 'Data loss - max PHRED quality 62 in Illumina 1.3+ FASTQ')
[ "def", "_fastq_sanger_convert_fastq_illumina", "(", "in_handle", ",", "out_handle", ",", "alphabet", "=", "None", ")", ":", "trunc_char", "=", "chr", "(", "1", ")", "mapping", "=", "''", ".", "join", "(", "(", "(", "(", "[", "chr", "(", "0", ")", "for", "ascii", "in", "range", "(", "0", ",", "33", ")", "]", "+", "[", "chr", "(", "(", "64", "+", "q", ")", ")", "for", "q", "in", "range", "(", "0", ",", "(", "62", "+", "1", ")", ")", "]", ")", "+", "[", "trunc_char", "for", "ascii", "in", "range", "(", "96", ",", "127", ")", "]", ")", "+", "[", "chr", "(", "0", ")", "for", "ascii", "in", "range", "(", "127", ",", "256", ")", "]", ")", ")", "assert", "(", "len", "(", "mapping", ")", "==", "256", ")", "return", "_fastq_generic2", "(", "in_handle", ",", "out_handle", ",", "mapping", ",", "trunc_char", ",", "'Data loss - max PHRED quality 62 in Illumina 1.3+ FASTQ'", ")" ]
fast sanger fastq to illumina 1 .
train
false
7,307
def dmg_name(fullversion, pyver, osxver=None): if (not osxver): osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3') return ('scipy-%s-py%s-python.org-macosx%s.dmg' % (fullversion, pyver, osxver))
[ "def", "dmg_name", "(", "fullversion", ",", "pyver", ",", "osxver", "=", "None", ")", ":", "if", "(", "not", "osxver", ")", ":", "osxver", "=", "os", ".", "environ", ".", "get", "(", "'MACOSX_DEPLOYMENT_TARGET'", ",", "'10.3'", ")", "return", "(", "'scipy-%s-py%s-python.org-macosx%s.dmg'", "%", "(", "fullversion", ",", "pyver", ",", "osxver", ")", ")" ]
return name for dmg installer .
train
false
7,309
def guided_tour_finished(): if (request.ajax == True): utable = s3db.tour_user person_id = auth.s3_logged_in_person() query = ((utable.person_id == person_id) & (utable.tour_config_id == request.post_vars.tour_id)) db(query).update(resume='', completed=True, trip_counter=(utable.trip_counter + 1)) return json.dumps({}) else: redirect(URL(f='config'))
[ "def", "guided_tour_finished", "(", ")", ":", "if", "(", "request", ".", "ajax", "==", "True", ")", ":", "utable", "=", "s3db", ".", "tour_user", "person_id", "=", "auth", ".", "s3_logged_in_person", "(", ")", "query", "=", "(", "(", "utable", ".", "person_id", "==", "person_id", ")", "&", "(", "utable", ".", "tour_config_id", "==", "request", ".", "post_vars", ".", "tour_id", ")", ")", "db", "(", "query", ")", ".", "update", "(", "resume", "=", "''", ",", "completed", "=", "True", ",", "trip_counter", "=", "(", "utable", ".", "trip_counter", "+", "1", ")", ")", "return", "json", ".", "dumps", "(", "{", "}", ")", "else", ":", "redirect", "(", "URL", "(", "f", "=", "'config'", ")", ")" ]
update database when tour completed otherwise redirect to tour/config .
train
false
7,310
def tilesetCoordinates(filename): coords = MBTiles.list_tiles(filename) count = len(coords) for (offset, coord) in enumerate(coords): (yield (offset, count, coord))
[ "def", "tilesetCoordinates", "(", "filename", ")", ":", "coords", "=", "MBTiles", ".", "list_tiles", "(", "filename", ")", "count", "=", "len", "(", "coords", ")", "for", "(", "offset", ",", "coord", ")", "in", "enumerate", "(", "coords", ")", ":", "(", "yield", "(", "offset", ",", "count", ",", "coord", ")", ")" ]
generate a stream of tuples for seeding .
train
false
7,311
def deliver(obj, conn): if isproxy(obj): raise TypeError("can't deliver proxies") if orig_isinstance(obj, function): globals = conn.remote_conn._local_namespace dumped = _dump_function(obj) return conn.modules[__name__]._load_function(dumped, globals) else: return conn.modules.cPickle.loads(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))
[ "def", "deliver", "(", "obj", ",", "conn", ")", ":", "if", "isproxy", "(", "obj", ")", ":", "raise", "TypeError", "(", "\"can't deliver proxies\"", ")", "if", "orig_isinstance", "(", "obj", ",", "function", ")", ":", "globals", "=", "conn", ".", "remote_conn", ".", "_local_namespace", "dumped", "=", "_dump_function", "(", "obj", ")", "return", "conn", ".", "modules", "[", "__name__", "]", ".", "_load_function", "(", "dumped", ",", "globals", ")", "else", ":", "return", "conn", ".", "modules", ".", "cPickle", ".", "loads", "(", "pickle", ".", "dumps", "(", "obj", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", ")" ]
delivers a local object to the other side of the connection .
train
false
7,312
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0, maxiter=5000): options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_golden(func, brack, args, **options) if full_output: return (res['x'], res['fun'], res['nfev']) else: return res['x']
[ "def", "golden", "(", "func", ",", "args", "=", "(", ")", ",", "brack", "=", "None", ",", "tol", "=", "_epsilon", ",", "full_output", "=", "0", ",", "maxiter", "=", "5000", ")", ":", "options", "=", "{", "'xtol'", ":", "tol", ",", "'maxiter'", ":", "maxiter", "}", "res", "=", "_minimize_scalar_golden", "(", "func", ",", "brack", ",", "args", ",", "**", "options", ")", "if", "full_output", ":", "return", "(", "res", "[", "'x'", "]", ",", "res", "[", "'fun'", "]", ",", "res", "[", "'nfev'", "]", ")", "else", ":", "return", "res", "[", "'x'", "]" ]
return the minimum of a function of one variable .
train
false
7,314
def check_marshalled_restart_policy(test_case, policy_type, **attributes): expected_name = FLOCKER_RESTART_POLICY_POLICY_TO_NAME[policy_type] test_case.assertEqual(dict(name=expected_name, **attributes), marshalled_restart_policy(policy_type(**attributes)))
[ "def", "check_marshalled_restart_policy", "(", "test_case", ",", "policy_type", ",", "**", "attributes", ")", ":", "expected_name", "=", "FLOCKER_RESTART_POLICY_POLICY_TO_NAME", "[", "policy_type", "]", "test_case", ".", "assertEqual", "(", "dict", "(", "name", "=", "expected_name", ",", "**", "attributes", ")", ",", "marshalled_restart_policy", "(", "policy_type", "(", "**", "attributes", ")", ")", ")" ]
assert that the supplied policy_type can be marshalled to a dict and that the dict contains all the supplied policy attributes .
train
false
7,315
def OpenPathWithStub(path, stub): from six.moves import http_client if (not hasattr(stub, 'scheme')): raise vmodl.fault.NotSupported() elif (stub.scheme == http_client.HTTPConnection): protocol = 'http' elif (stub.scheme == http_client.HTTPSConnection): protocol = 'https' else: raise vmodl.fault.NotSupported() hostPort = stub.host url = ('%s://%s%s' % (protocol, hostPort, path)) headers = {} if stub.cookie: headers['Cookie'] = stub.cookie return requests.get(url, headers=headers, verify=False)
[ "def", "OpenPathWithStub", "(", "path", ",", "stub", ")", ":", "from", "six", ".", "moves", "import", "http_client", "if", "(", "not", "hasattr", "(", "stub", ",", "'scheme'", ")", ")", ":", "raise", "vmodl", ".", "fault", ".", "NotSupported", "(", ")", "elif", "(", "stub", ".", "scheme", "==", "http_client", ".", "HTTPConnection", ")", ":", "protocol", "=", "'http'", "elif", "(", "stub", ".", "scheme", "==", "http_client", ".", "HTTPSConnection", ")", ":", "protocol", "=", "'https'", "else", ":", "raise", "vmodl", ".", "fault", ".", "NotSupported", "(", ")", "hostPort", "=", "stub", ".", "host", "url", "=", "(", "'%s://%s%s'", "%", "(", "protocol", ",", "hostPort", ",", "path", ")", ")", "headers", "=", "{", "}", "if", "stub", ".", "cookie", ":", "headers", "[", "'Cookie'", "]", "=", "stub", ".", "cookie", "return", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "verify", "=", "False", ")" ]
open the specified path using http .
train
true
7,316
def read_token_file(filename): f = open(filename) return (f.readline().strip(), f.readline().strip())
[ "def", "read_token_file", "(", "filename", ")", ":", "f", "=", "open", "(", "filename", ")", "return", "(", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ",", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ")" ]
read a token file and return the oauth token and oauth token secret .
train
false
7,317
def count_ambig(curr_seq, valid_chars='ATCG'): up_seq = curr_seq.upper() total = 0 for vchar in valid_chars: total += up_seq.count(vchar) return (len(curr_seq) - total)
[ "def", "count_ambig", "(", "curr_seq", ",", "valid_chars", "=", "'ATCG'", ")", ":", "up_seq", "=", "curr_seq", ".", "upper", "(", ")", "total", "=", "0", "for", "vchar", "in", "valid_chars", ":", "total", "+=", "up_seq", ".", "count", "(", "vchar", ")", "return", "(", "len", "(", "curr_seq", ")", "-", "total", ")" ]
counts non-standard characters in seq .
train
false
7,318
def _make_image_properties_from_pb(image_properties): return ImagePropertiesAnnotation.from_pb(image_properties)
[ "def", "_make_image_properties_from_pb", "(", "image_properties", ")", ":", "return", "ImagePropertiesAnnotation", ".", "from_pb", "(", "image_properties", ")" ]
create imageproperties object from a protobuf response .
train
false
7,319
def check_if_doc_is_linked(doc, method=u'Delete'): from frappe.model.rename_doc import get_link_fields link_fields = get_link_fields(doc.doctype) link_fields = [[lf[u'parent'], lf[u'fieldname'], lf[u'issingle']] for lf in link_fields] for (link_dt, link_field, issingle) in link_fields: if (not issingle): for item in frappe.db.get_values(link_dt, {link_field: doc.name}, [u'name', u'parent', u'parenttype', u'docstatus'], as_dict=True): if (item and ((item.parent or item.name) != doc.name) and (((method == u'Delete') and (item.docstatus < 2)) or ((method == u'Cancel') and (item.docstatus == 1)))): frappe.throw(_(u'Cannot delete or cancel because {0} <a href="#Form/{0}/{1}">{1}</a> is linked with {2} <a href="#Form/{2}/{3}">{3}</a>').format(doc.doctype, doc.name, (item.parenttype if item.parent else link_dt), (item.parent or item.name)), frappe.LinkExistsError)
[ "def", "check_if_doc_is_linked", "(", "doc", ",", "method", "=", "u'Delete'", ")", ":", "from", "frappe", ".", "model", ".", "rename_doc", "import", "get_link_fields", "link_fields", "=", "get_link_fields", "(", "doc", ".", "doctype", ")", "link_fields", "=", "[", "[", "lf", "[", "u'parent'", "]", ",", "lf", "[", "u'fieldname'", "]", ",", "lf", "[", "u'issingle'", "]", "]", "for", "lf", "in", "link_fields", "]", "for", "(", "link_dt", ",", "link_field", ",", "issingle", ")", "in", "link_fields", ":", "if", "(", "not", "issingle", ")", ":", "for", "item", "in", "frappe", ".", "db", ".", "get_values", "(", "link_dt", ",", "{", "link_field", ":", "doc", ".", "name", "}", ",", "[", "u'name'", ",", "u'parent'", ",", "u'parenttype'", ",", "u'docstatus'", "]", ",", "as_dict", "=", "True", ")", ":", "if", "(", "item", "and", "(", "(", "item", ".", "parent", "or", "item", ".", "name", ")", "!=", "doc", ".", "name", ")", "and", "(", "(", "(", "method", "==", "u'Delete'", ")", "and", "(", "item", ".", "docstatus", "<", "2", ")", ")", "or", "(", "(", "method", "==", "u'Cancel'", ")", "and", "(", "item", ".", "docstatus", "==", "1", ")", ")", ")", ")", ":", "frappe", ".", "throw", "(", "_", "(", "u'Cannot delete or cancel because {0} <a href=\"#Form/{0}/{1}\">{1}</a> is linked with {2} <a href=\"#Form/{2}/{3}\">{3}</a>'", ")", ".", "format", "(", "doc", ".", "doctype", ",", "doc", ".", "name", ",", "(", "item", ".", "parenttype", "if", "item", ".", "parent", "else", "link_dt", ")", ",", "(", "item", ".", "parent", "or", "item", ".", "name", ")", ")", ",", "frappe", ".", "LinkExistsError", ")" ]
raises excption if the given doc is linked in another record .
train
false
7,321
def _read_3(fid): data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32) out = ((np.left_shift(data[0], 16) + np.left_shift(data[1], 8)) + data[2]) return out
[ "def", "_read_3", "(", "fid", ")", ":", "data", "=", "np", ".", "fromfile", "(", "fid", ",", "dtype", "=", "np", ".", "uint8", ",", "count", "=", "3", ")", ".", "astype", "(", "np", ".", "int32", ")", "out", "=", "(", "(", "np", ".", "left_shift", "(", "data", "[", "0", "]", ",", "16", ")", "+", "np", ".", "left_shift", "(", "data", "[", "1", "]", ",", "8", ")", ")", "+", "data", "[", "2", "]", ")", "return", "out" ]
read 3 byte integer from file .
train
false
7,322
def validate_qos_spec(qos_spec): if (qos_spec is None): return normalized_qos_keys = [key.lower() for key in QOS_KEYS] keylist = [] for (key, value) in qos_spec.items(): lower_case_key = key.lower() if (lower_case_key not in normalized_qos_keys): msg = (_('Unrecognized QOS keyword: "%s"') % key) raise exception.Invalid(msg) keylist.append(lower_case_key) if (len(keylist) > 1): msg = _('Only one limit can be set in a QoS spec.') raise exception.Invalid(msg)
[ "def", "validate_qos_spec", "(", "qos_spec", ")", ":", "if", "(", "qos_spec", "is", "None", ")", ":", "return", "normalized_qos_keys", "=", "[", "key", ".", "lower", "(", ")", "for", "key", "in", "QOS_KEYS", "]", "keylist", "=", "[", "]", "for", "(", "key", ",", "value", ")", "in", "qos_spec", ".", "items", "(", ")", ":", "lower_case_key", "=", "key", ".", "lower", "(", ")", "if", "(", "lower_case_key", "not", "in", "normalized_qos_keys", ")", ":", "msg", "=", "(", "_", "(", "'Unrecognized QOS keyword: \"%s\"'", ")", "%", "key", ")", "raise", "exception", ".", "Invalid", "(", "msg", ")", "keylist", ".", "append", "(", "lower_case_key", ")", "if", "(", "len", "(", "keylist", ")", ">", "1", ")", ":", "msg", "=", "_", "(", "'Only one limit can be set in a QoS spec.'", ")", "raise", "exception", ".", "Invalid", "(", "msg", ")" ]
check validity of cinder qos spec for our backend .
train
false
7,324
def _create_cache_timestamp(cache_path): access_path = os.path.join(cache_path, _CACHE_TIMESTAMP_FILE) if (not os.path.exists(access_path)): print u'Writing cache creation timestamp' created = long(time.time()) try: with open(access_path, 'w') as f: f.write(str(created)) except Exception as e: print u'Error occured writing cache creation timestamp' print e
[ "def", "_create_cache_timestamp", "(", "cache_path", ")", ":", "access_path", "=", "os", ".", "path", ".", "join", "(", "cache_path", ",", "_CACHE_TIMESTAMP_FILE", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "access_path", ")", ")", ":", "print", "u'Writing cache creation timestamp'", "created", "=", "long", "(", "time", ".", "time", "(", ")", ")", "try", ":", "with", "open", "(", "access_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "created", ")", ")", "except", "Exception", "as", "e", ":", "print", "u'Error occured writing cache creation timestamp'", "print", "e" ]
creates a life span with the current time .
train
false
7,325
def serve_500_error(request, *args, **kwargs): try: exc_info = sys.exc_info() if exc_info: if (desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]): return django.views.debug.technical_500_response(request, *exc_info) else: return render('500.mako', request, {'traceback': traceback.extract_tb(exc_info[2])}) else: return render('500.mako', request, {}) finally: pass
[ "def", "serve_500_error", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "if", "exc_info", ":", "if", "(", "desktop", ".", "conf", ".", "HTTP_500_DEBUG_MODE", ".", "get", "(", ")", "and", "exc_info", "[", "0", "]", "and", "exc_info", "[", "1", "]", ")", ":", "return", "django", ".", "views", ".", "debug", ".", "technical_500_response", "(", "request", ",", "*", "exc_info", ")", "else", ":", "return", "render", "(", "'500.mako'", ",", "request", ",", "{", "'traceback'", ":", "traceback", ".", "extract_tb", "(", "exc_info", "[", "2", "]", ")", "}", ")", "else", ":", "return", "render", "(", "'500.mako'", ",", "request", ",", "{", "}", ")", "finally", ":", "pass" ]
registered handler for 500 .
train
false
7,326
def is_hexadecimal(string): return all(((ch in HEX_DIGITS) for ch in string))
[ "def", "is_hexadecimal", "(", "string", ")", ":", "return", "all", "(", "(", "(", "ch", "in", "HEX_DIGITS", ")", "for", "ch", "in", "string", ")", ")" ]
checks whether a string is hexadecimal .
train
false
7,327
@_assure_identity def set_credential_file(cred_file, region=None, authenticate=True): global regions, services region = _safe_region(region) identity.set_credential_file(cred_file, region=region, authenticate=authenticate) regions = tuple(identity.regions) services = tuple(identity.services.keys()) connect_to_services(region=region)
[ "@", "_assure_identity", "def", "set_credential_file", "(", "cred_file", ",", "region", "=", "None", ",", "authenticate", "=", "True", ")", ":", "global", "regions", ",", "services", "region", "=", "_safe_region", "(", "region", ")", "identity", ".", "set_credential_file", "(", "cred_file", ",", "region", "=", "region", ",", "authenticate", "=", "authenticate", ")", "regions", "=", "tuple", "(", "identity", ".", "regions", ")", "services", "=", "tuple", "(", "identity", ".", "services", ".", "keys", "(", ")", ")", "connect_to_services", "(", "region", "=", "region", ")" ]
read in the credentials from the supplied file path .
train
true
7,328
def is_valid_bucket_name(bucket_name): regex = re.compile('[a-z0-9][a-z0-9\\._-]{2,254}$') if (not regex.match(bucket_name)): return False if iptools.ipv4.validate_ip(bucket_name): return False return True
[ "def", "is_valid_bucket_name", "(", "bucket_name", ")", ":", "regex", "=", "re", ".", "compile", "(", "'[a-z0-9][a-z0-9\\\\._-]{2,254}$'", ")", "if", "(", "not", "regex", ".", "match", "(", "bucket_name", ")", ")", ":", "return", "False", "if", "iptools", ".", "ipv4", ".", "validate_ip", "(", "bucket_name", ")", ":", "return", "False", "return", "True" ]
check if bucket_name is a valid s3 bucket name : 1 .
train
false
7,329
def _property_name_to_values(entities): property_name_to_values = {} for entity in entities: for (property_name, value) in entity.iteritems(): property_name_to_values.setdefault(property_name, []).append(value) return property_name_to_values
[ "def", "_property_name_to_values", "(", "entities", ")", ":", "property_name_to_values", "=", "{", "}", "for", "entity", "in", "entities", ":", "for", "(", "property_name", ",", "value", ")", "in", "entity", ".", "iteritems", "(", ")", ":", "property_name_to_values", ".", "setdefault", "(", "property_name", ",", "[", "]", ")", ".", "append", "(", "value", ")", "return", "property_name_to_values" ]
returns a a mapping of entity property names to a list of their values .
train
false
7,331
def addFacesByConvexReversed(faces, indexedLoop): addFacesByConvex(faces, indexedLoop[::(-1)])
[ "def", "addFacesByConvexReversed", "(", "faces", ",", "indexedLoop", ")", ":", "addFacesByConvex", "(", "faces", ",", "indexedLoop", "[", ":", ":", "(", "-", "1", ")", "]", ")" ]
add faces from a reversed convex polygon .
train
false
7,332
def get_first_import(node, context, name, base, level): fullname = (('%s.%s' % (base, name)) if base else name) first = None found = False for first in context.body: if (first is node): continue if ((first.scope() is node.scope()) and (first.fromlineno > node.fromlineno)): continue if isinstance(first, astroid.Import): if any(((fullname == iname[0]) for iname in first.names)): found = True break elif isinstance(first, astroid.From): if ((level == first.level) and any(((fullname == ('%s.%s' % (first.modname, iname[0]))) for iname in first.names))): found = True break if (found and (not are_exclusive(first, node))): return first
[ "def", "get_first_import", "(", "node", ",", "context", ",", "name", ",", "base", ",", "level", ")", ":", "fullname", "=", "(", "(", "'%s.%s'", "%", "(", "base", ",", "name", ")", ")", "if", "base", "else", "name", ")", "first", "=", "None", "found", "=", "False", "for", "first", "in", "context", ".", "body", ":", "if", "(", "first", "is", "node", ")", ":", "continue", "if", "(", "(", "first", ".", "scope", "(", ")", "is", "node", ".", "scope", "(", ")", ")", "and", "(", "first", ".", "fromlineno", ">", "node", ".", "fromlineno", ")", ")", ":", "continue", "if", "isinstance", "(", "first", ",", "astroid", ".", "Import", ")", ":", "if", "any", "(", "(", "(", "fullname", "==", "iname", "[", "0", "]", ")", "for", "iname", "in", "first", ".", "names", ")", ")", ":", "found", "=", "True", "break", "elif", "isinstance", "(", "first", ",", "astroid", ".", "From", ")", ":", "if", "(", "(", "level", "==", "first", ".", "level", ")", "and", "any", "(", "(", "(", "fullname", "==", "(", "'%s.%s'", "%", "(", "first", ".", "modname", ",", "iname", "[", "0", "]", ")", ")", ")", "for", "iname", "in", "first", ".", "names", ")", ")", ")", ":", "found", "=", "True", "break", "if", "(", "found", "and", "(", "not", "are_exclusive", "(", "first", ",", "node", ")", ")", ")", ":", "return", "first" ]
return the node where [base .
train
false
7,333
def run_convert_command(args): if (not isinstance(args, list)): raise TypeError('args must be a list') convert_command = _get_convert_command() if (os.path.splitext(os.path.basename(convert_command))[0] == 'magick'): args.insert(0, convert_command) args.insert(1, 'convert') else: args.insert(0, convert_command) execute_command(args, shell=(sublime.platform() == 'windows'))
[ "def", "run_convert_command", "(", "args", ")", ":", "if", "(", "not", "isinstance", "(", "args", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "'args must be a list'", ")", "convert_command", "=", "_get_convert_command", "(", ")", "if", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "convert_command", ")", ")", "[", "0", "]", "==", "'magick'", ")", ":", "args", ".", "insert", "(", "0", ",", "convert_command", ")", "args", ".", "insert", "(", "1", ",", "'convert'", ")", "else", ":", "args", ".", "insert", "(", "0", ",", "convert_command", ")", "execute_command", "(", "args", ",", "shell", "=", "(", "sublime", ".", "platform", "(", ")", "==", "'windows'", ")", ")" ]
executes imagemagick convert or magick command as appropriate with the given args .
train
false
7,334
def _autoregister(admin, model, follow=None): if model._meta.proxy: raise RegistrationError('Proxy models cannot be used with django-reversion, register the parent class instead') if (not is_registered(model)): follow = (follow or []) for (parent_cls, field) in model._meta.parents.items(): follow.append(field.name) _autoregister(admin, parent_cls) register(model, follow=follow, format=admin.reversion_format)
[ "def", "_autoregister", "(", "admin", ",", "model", ",", "follow", "=", "None", ")", ":", "if", "model", ".", "_meta", ".", "proxy", ":", "raise", "RegistrationError", "(", "'Proxy models cannot be used with django-reversion, register the parent class instead'", ")", "if", "(", "not", "is_registered", "(", "model", ")", ")", ":", "follow", "=", "(", "follow", "or", "[", "]", ")", "for", "(", "parent_cls", ",", "field", ")", "in", "model", ".", "_meta", ".", "parents", ".", "items", "(", ")", ":", "follow", ".", "append", "(", "field", ".", "name", ")", "_autoregister", "(", "admin", ",", "parent_cls", ")", "register", "(", "model", ",", "follow", "=", "follow", ",", "format", "=", "admin", ".", "reversion_format", ")" ]
registers a model with reversion .
train
false
7,335
def weighted_score(raw_earned, raw_possible, weight): assert (raw_possible is not None) cannot_compute_with_weight = ((weight is None) or (raw_possible == 0)) if cannot_compute_with_weight: return (raw_earned, raw_possible) else: return (((float(raw_earned) * weight) / raw_possible), float(weight))
[ "def", "weighted_score", "(", "raw_earned", ",", "raw_possible", ",", "weight", ")", ":", "assert", "(", "raw_possible", "is", "not", "None", ")", "cannot_compute_with_weight", "=", "(", "(", "weight", "is", "None", ")", "or", "(", "raw_possible", "==", "0", ")", ")", "if", "cannot_compute_with_weight", ":", "return", "(", "raw_earned", ",", "raw_possible", ")", "else", ":", "return", "(", "(", "(", "float", "(", "raw_earned", ")", "*", "weight", ")", "/", "raw_possible", ")", ",", "float", "(", "weight", ")", ")" ]
returns a tuple that represents the weighted score .
train
false
7,336
def split_in_lines(text): return (line for line in map(str.strip, text.split('\n')) if line)
[ "def", "split_in_lines", "(", "text", ")", ":", "return", "(", "line", "for", "line", "in", "map", "(", "str", ".", "strip", ",", "text", ".", "split", "(", "'\\n'", ")", ")", "if", "line", ")" ]
split a block of text in lines removing unnecessary spaces from each line .
train
false
7,337
def get_urlbase(url): parsed_uri = urlparse(url) return '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
[ "def", "get_urlbase", "(", "url", ")", ":", "parsed_uri", "=", "urlparse", "(", "url", ")", "return", "'{uri.scheme}://{uri.netloc}/'", ".", "format", "(", "uri", "=", "parsed_uri", ")" ]
return the base url .
train
false
7,339
def _check_key_type(key_str, key_type=None): value = __salt__['pillar.get'](key_str, None) if (value is None): return None elif ((type(value) is not key_type) and (key_type is not None)): return False else: return True
[ "def", "_check_key_type", "(", "key_str", ",", "key_type", "=", "None", ")", ":", "value", "=", "__salt__", "[", "'pillar.get'", "]", "(", "key_str", ",", "None", ")", "if", "(", "value", "is", "None", ")", ":", "return", "None", "elif", "(", "(", "type", "(", "value", ")", "is", "not", "key_type", ")", "and", "(", "key_type", "is", "not", "None", ")", ")", ":", "return", "False", "else", ":", "return", "True" ]
helper function to get pillar[key_str] and check if its type is key_type returns none if the pillar key is missing .
train
false
7,341
def _AddPropertiesForField(field, cls): assert (_FieldDescriptor.MAX_CPPTYPE == 10) constant_name = (field.name.upper() + '_FIELD_NUMBER') setattr(cls, constant_name, field.number) if (field.label == _FieldDescriptor.LABEL_REPEATED): _AddPropertiesForRepeatedField(field, cls) elif (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE): _AddPropertiesForNonRepeatedCompositeField(field, cls) else: _AddPropertiesForNonRepeatedScalarField(field, cls)
[ "def", "_AddPropertiesForField", "(", "field", ",", "cls", ")", ":", "assert", "(", "_FieldDescriptor", ".", "MAX_CPPTYPE", "==", "10", ")", "constant_name", "=", "(", "field", ".", "name", ".", "upper", "(", ")", "+", "'_FIELD_NUMBER'", ")", "setattr", "(", "cls", ",", "constant_name", ",", "field", ".", "number", ")", "if", "(", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ")", ":", "_AddPropertiesForRepeatedField", "(", "field", ",", "cls", ")", "elif", "(", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ")", ":", "_AddPropertiesForNonRepeatedCompositeField", "(", "field", ",", "cls", ")", "else", ":", "_AddPropertiesForNonRepeatedScalarField", "(", "field", ",", "cls", ")" ]
adds a public property for a protocol message field .
train
true
7,342
@deprecated('low_pass_filter is deprecated and will be removed in 0.15, use filter_data instead.') @verbose def low_pass_filter(x, Fs, Fp, filter_length='auto', trans_bandwidth='auto', method='fir', iir_params=None, picks=None, n_jobs=1, copy=True, phase='zero', fir_window='hamming', verbose=None): return filter_data(x, Fs, None, Fp, picks, filter_length, 'auto', trans_bandwidth, n_jobs, method, iir_params, copy, phase, fir_window)
[ "@", "deprecated", "(", "'low_pass_filter is deprecated and will be removed in 0.15, use filter_data instead.'", ")", "@", "verbose", "def", "low_pass_filter", "(", "x", ",", "Fs", ",", "Fp", ",", "filter_length", "=", "'auto'", ",", "trans_bandwidth", "=", "'auto'", ",", "method", "=", "'fir'", ",", "iir_params", "=", "None", ",", "picks", "=", "None", ",", "n_jobs", "=", "1", ",", "copy", "=", "True", ",", "phase", "=", "'zero'", ",", "fir_window", "=", "'hamming'", ",", "verbose", "=", "None", ")", ":", "return", "filter_data", "(", "x", ",", "Fs", ",", "None", ",", "Fp", ",", "picks", ",", "filter_length", ",", "'auto'", ",", "trans_bandwidth", ",", "n_jobs", ",", "method", ",", "iir_params", ",", "copy", ",", "phase", ",", "fir_window", ")" ]
lowpass filter for the signal x .
train
false
7,343
def fListToString(a_list, a_precision=3): from numpy import around s_list = ', '.join((('%g' % around(x, a_precision)).ljust((a_precision + 3)) for x in a_list)) return ('[%s]' % s_list)
[ "def", "fListToString", "(", "a_list", ",", "a_precision", "=", "3", ")", ":", "from", "numpy", "import", "around", "s_list", "=", "', '", ".", "join", "(", "(", "(", "'%g'", "%", "around", "(", "x", ",", "a_precision", ")", ")", ".", "ljust", "(", "(", "a_precision", "+", "3", ")", ")", "for", "x", "in", "a_list", ")", ")", "return", "(", "'[%s]'", "%", "s_list", ")" ]
returns a string representing a list of floats with a given precision .
train
false
7,344
def dict_to_sequence(d): if hasattr(d, 'items'): d = d.items() return d
[ "def", "dict_to_sequence", "(", "d", ")", ":", "if", "hasattr", "(", "d", ",", "'items'", ")", ":", "d", "=", "d", ".", "items", "(", ")", "return", "d" ]
returns an internal sequence dictionary update .
train
false
7,345
def test_date_xrange(): datey = DateLine(truncate_label=1000) datey.add('dates', [(date(2013, 1, 2), 300), (date(2013, 1, 12), 412), (date(2013, 2, 2), 823), (date(2013, 2, 22), 672)]) datey.xrange = (date(2013, 1, 1), date(2013, 3, 1)) q = datey.render_pyquery() assert (list(map((lambda t: t.split(' ')[0]), q('.axis.x text').map(texts))) == ['2013-01-01', '2013-01-12', '2013-01-24', '2013-02-04', '2013-02-16', '2013-02-27'])
[ "def", "test_date_xrange", "(", ")", ":", "datey", "=", "DateLine", "(", "truncate_label", "=", "1000", ")", "datey", ".", "add", "(", "'dates'", ",", "[", "(", "date", "(", "2013", ",", "1", ",", "2", ")", ",", "300", ")", ",", "(", "date", "(", "2013", ",", "1", ",", "12", ")", ",", "412", ")", ",", "(", "date", "(", "2013", ",", "2", ",", "2", ")", ",", "823", ")", ",", "(", "date", "(", "2013", ",", "2", ",", "22", ")", ",", "672", ")", "]", ")", "datey", ".", "xrange", "=", "(", "date", "(", "2013", ",", "1", ",", "1", ")", ",", "date", "(", "2013", ",", "3", ",", "1", ")", ")", "q", "=", "datey", ".", "render_pyquery", "(", ")", "assert", "(", "list", "(", "map", "(", "(", "lambda", "t", ":", "t", ".", "split", "(", "' '", ")", "[", "0", "]", ")", ",", "q", "(", "'.axis.x text'", ")", ".", "map", "(", "texts", ")", ")", ")", "==", "[", "'2013-01-01'", ",", "'2013-01-12'", ",", "'2013-01-24'", ",", "'2013-02-04'", ",", "'2013-02-16'", ",", "'2013-02-27'", "]", ")" ]
test dateline with xrange .
train
false
7,346
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None, n_nodes=0, node_angles=None, ylim=[9, 10]): if (event.inaxes != axes): return if (event.button == 1): if (not (ylim[0] <= event.ydata <= ylim[1])): return node_angles = (node_angles % (np.pi * 2)) node = np.argmin(np.abs((event.xdata - node_angles))) patches = event.inaxes.patches for (ii, (x, y)) in enumerate(zip(indices[0], indices[1])): patches[ii].set_visible((node in [x, y])) fig.canvas.draw() elif (event.button == 3): patches = event.inaxes.patches for ii in range(np.size(indices, axis=1)): patches[ii].set_visible(True) fig.canvas.draw()
[ "def", "_plot_connectivity_circle_onpick", "(", "event", ",", "fig", "=", "None", ",", "axes", "=", "None", ",", "indices", "=", "None", ",", "n_nodes", "=", "0", ",", "node_angles", "=", "None", ",", "ylim", "=", "[", "9", ",", "10", "]", ")", ":", "if", "(", "event", ".", "inaxes", "!=", "axes", ")", ":", "return", "if", "(", "event", ".", "button", "==", "1", ")", ":", "if", "(", "not", "(", "ylim", "[", "0", "]", "<=", "event", ".", "ydata", "<=", "ylim", "[", "1", "]", ")", ")", ":", "return", "node_angles", "=", "(", "node_angles", "%", "(", "np", ".", "pi", "*", "2", ")", ")", "node", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "(", "event", ".", "xdata", "-", "node_angles", ")", ")", ")", "patches", "=", "event", ".", "inaxes", ".", "patches", "for", "(", "ii", ",", "(", "x", ",", "y", ")", ")", "in", "enumerate", "(", "zip", "(", "indices", "[", "0", "]", ",", "indices", "[", "1", "]", ")", ")", ":", "patches", "[", "ii", "]", ".", "set_visible", "(", "(", "node", "in", "[", "x", ",", "y", "]", ")", ")", "fig", ".", "canvas", ".", "draw", "(", ")", "elif", "(", "event", ".", "button", "==", "3", ")", ":", "patches", "=", "event", ".", "inaxes", ".", "patches", "for", "ii", "in", "range", "(", "np", ".", "size", "(", "indices", ",", "axis", "=", "1", ")", ")", ":", "patches", "[", "ii", "]", ".", "set_visible", "(", "True", ")", "fig", ".", "canvas", ".", "draw", "(", ")" ]
isolate connections around a single node when user left clicks a node .
train
false
7,347
def test_rsolve_bulk(): funcs = [n, (n + 1), (n ** 2), (n ** 3), (n ** 4), (n + (n ** 2)), (((((27 * n) + (52 * (n ** 2))) - (3 * (n ** 3))) + (12 * (n ** 4))) - (52 * (n ** 5)))] coeffs = [[(-2), 1], [(-2), (-1), 1], [(-1), 1, 1, (-1), 1], [(- n), 1], [(((n ** 2) - n) + 12), 1]] for p in funcs: for c in coeffs: q = recurrence_term(c, p) if p.is_polynomial(n): assert (rsolve_poly(c, q, n) == p)
[ "def", "test_rsolve_bulk", "(", ")", ":", "funcs", "=", "[", "n", ",", "(", "n", "+", "1", ")", ",", "(", "n", "**", "2", ")", ",", "(", "n", "**", "3", ")", ",", "(", "n", "**", "4", ")", ",", "(", "n", "+", "(", "n", "**", "2", ")", ")", ",", "(", "(", "(", "(", "(", "27", "*", "n", ")", "+", "(", "52", "*", "(", "n", "**", "2", ")", ")", ")", "-", "(", "3", "*", "(", "n", "**", "3", ")", ")", ")", "+", "(", "12", "*", "(", "n", "**", "4", ")", ")", ")", "-", "(", "52", "*", "(", "n", "**", "5", ")", ")", ")", "]", "coeffs", "=", "[", "[", "(", "-", "2", ")", ",", "1", "]", ",", "[", "(", "-", "2", ")", ",", "(", "-", "1", ")", ",", "1", "]", ",", "[", "(", "-", "1", ")", ",", "1", ",", "1", ",", "(", "-", "1", ")", ",", "1", "]", ",", "[", "(", "-", "n", ")", ",", "1", "]", ",", "[", "(", "(", "(", "n", "**", "2", ")", "-", "n", ")", "+", "12", ")", ",", "1", "]", "]", "for", "p", "in", "funcs", ":", "for", "c", "in", "coeffs", ":", "q", "=", "recurrence_term", "(", "c", ",", "p", ")", "if", "p", ".", "is_polynomial", "(", "n", ")", ":", "assert", "(", "rsolve_poly", "(", "c", ",", "q", ",", "n", ")", "==", "p", ")" ]
some bulk-generated tests .
train
false
7,348
def _b64(b): return base64.urlsafe_b64encode(b).decode('utf8').replace('=', '')
[ "def", "_b64", "(", "b", ")", ":", "return", "base64", ".", "urlsafe_b64encode", "(", "b", ")", ".", "decode", "(", "'utf8'", ")", ".", "replace", "(", "'='", ",", "''", ")" ]
helper function base64 encode for jose spec .
train
false
7,349
@contextfunction def services_queue_list(context, queues, skip_group=False): request = context['request'] response_format = 'html' if ('response_format' in context): response_format = context['response_format'] return Markup(render_to_string('services/tags/queue_list', {'queues': queues, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
[ "@", "contextfunction", "def", "services_queue_list", "(", "context", ",", "queues", ",", "skip_group", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'html'", "if", "(", "'response_format'", "in", "context", ")", ":", "response_format", "=", "context", "[", "'response_format'", "]", "return", "Markup", "(", "render_to_string", "(", "'services/tags/queue_list'", ",", "{", "'queues'", ":", "queues", ",", "'skip_group'", ":", "skip_group", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")", ")" ]
print a list of queues .
train
false
7,350
def parse_ttag(token, required_tags): if isinstance(token, template.Token): bits = token.split_contents() else: bits = token.split(' ') tags = {'tag_name': bits.pop(0)} for (index, bit) in enumerate(bits): bit = bit.strip() if (bit in required_tags): if (len(bits) != (index - 1)): tags[bit.strip()] = bits[(index + 1)] return tags
[ "def", "parse_ttag", "(", "token", ",", "required_tags", ")", ":", "if", "isinstance", "(", "token", ",", "template", ".", "Token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "else", ":", "bits", "=", "token", ".", "split", "(", "' '", ")", "tags", "=", "{", "'tag_name'", ":", "bits", ".", "pop", "(", "0", ")", "}", "for", "(", "index", ",", "bit", ")", "in", "enumerate", "(", "bits", ")", ":", "bit", "=", "bit", ".", "strip", "(", ")", "if", "(", "bit", "in", "required_tags", ")", ":", "if", "(", "len", "(", "bits", ")", "!=", "(", "index", "-", "1", ")", ")", ":", "tags", "[", "bit", ".", "strip", "(", ")", "]", "=", "bits", "[", "(", "index", "+", "1", ")", "]", "return", "tags" ]
a function to parse a template tag .
train
false
7,351
@task @needs('pavelib.prereqs.install_python_prereqs') @timed def run_complexity(): system_string = 'cms/ lms/ common/ openedx/' complexity_report_dir = (Env.REPORT_DIR / 'complexity') complexity_report = (complexity_report_dir / 'python_complexity.log') Env.METRICS_DIR.makedirs_p() _prepare_report_dir(complexity_report_dir) print '--> Calculating cyclomatic complexity of python files...' try: sh('radon cc {system_string} --total-average > {complexity_report}'.format(system_string=system_string, complexity_report=complexity_report)) complexity_metric = _get_count_from_last_line(complexity_report, 'python_complexity') _write_metric(complexity_metric, (Env.METRICS_DIR / 'python_complexity')) print '--> Python cyclomatic complexity report complete.' print 'radon cyclomatic complexity score: {metric}'.format(metric=str(complexity_metric)) except BuildFailure: print 'ERROR: Unable to calculate python-only code-complexity.'
[ "@", "task", "@", "needs", "(", "'pavelib.prereqs.install_python_prereqs'", ")", "@", "timed", "def", "run_complexity", "(", ")", ":", "system_string", "=", "'cms/ lms/ common/ openedx/'", "complexity_report_dir", "=", "(", "Env", ".", "REPORT_DIR", "/", "'complexity'", ")", "complexity_report", "=", "(", "complexity_report_dir", "/", "'python_complexity.log'", ")", "Env", ".", "METRICS_DIR", ".", "makedirs_p", "(", ")", "_prepare_report_dir", "(", "complexity_report_dir", ")", "print", "'--> Calculating cyclomatic complexity of python files...'", "try", ":", "sh", "(", "'radon cc {system_string} --total-average > {complexity_report}'", ".", "format", "(", "system_string", "=", "system_string", ",", "complexity_report", "=", "complexity_report", ")", ")", "complexity_metric", "=", "_get_count_from_last_line", "(", "complexity_report", ",", "'python_complexity'", ")", "_write_metric", "(", "complexity_metric", ",", "(", "Env", ".", "METRICS_DIR", "/", "'python_complexity'", ")", ")", "print", "'--> Python cyclomatic complexity report complete.'", "print", "'radon cyclomatic complexity score: {metric}'", ".", "format", "(", "metric", "=", "str", "(", "complexity_metric", ")", ")", "except", "BuildFailure", ":", "print", "'ERROR: Unable to calculate python-only code-complexity.'" ]
uses radon to examine cyclomatic complexity .
train
false
7,353
def create_local_pifs(): for host_ref in _db_content['host'].keys(): _create_local_pif(host_ref)
[ "def", "create_local_pifs", "(", ")", ":", "for", "host_ref", "in", "_db_content", "[", "'host'", "]", ".", "keys", "(", ")", ":", "_create_local_pif", "(", "host_ref", ")" ]
adds a pif for each to the local database with vlan=-1 .
train
false
7,354
def split_on_comma(tokens): parts = [] this_part = [] for token in tokens: if ((token.type == u'DELIM') and (token.value == u',')): parts.append(this_part) this_part = [] else: this_part.append(token) parts.append(this_part) return parts
[ "def", "split_on_comma", "(", "tokens", ")", ":", "parts", "=", "[", "]", "this_part", "=", "[", "]", "for", "token", "in", "tokens", ":", "if", "(", "(", "token", ".", "type", "==", "u'DELIM'", ")", "and", "(", "token", ".", "value", "==", "u','", ")", ")", ":", "parts", ".", "append", "(", "this_part", ")", "this_part", "=", "[", "]", "else", ":", "this_part", ".", "append", "(", "token", ")", "parts", ".", "append", "(", "this_part", ")", "return", "parts" ]
split a list of tokens on commas .
train
false
7,355
def run_func_until_ret_arg(fun, kwargs, fun_call=None, argument_being_watched=None, required_argument_response=None): status = None while (status != required_argument_response): f_result = fun(kwargs, call=fun_call) r_set = {} for d in f_result: if isinstance(d, list): d0 = d[0] if isinstance(d0, dict): for (k, v) in six.iteritems(d0): r_set[k] = v status = _unwrap_dict(r_set, argument_being_watched) log.debug('Function: {0}, Watched arg: {1}, Response: {2}'.format(str(fun).split(' ')[1], argument_being_watched, status)) time.sleep(5) return True
[ "def", "run_func_until_ret_arg", "(", "fun", ",", "kwargs", ",", "fun_call", "=", "None", ",", "argument_being_watched", "=", "None", ",", "required_argument_response", "=", "None", ")", ":", "status", "=", "None", "while", "(", "status", "!=", "required_argument_response", ")", ":", "f_result", "=", "fun", "(", "kwargs", ",", "call", "=", "fun_call", ")", "r_set", "=", "{", "}", "for", "d", "in", "f_result", ":", "if", "isinstance", "(", "d", ",", "list", ")", ":", "d0", "=", "d", "[", "0", "]", "if", "isinstance", "(", "d0", ",", "dict", ")", ":", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "d0", ")", ":", "r_set", "[", "k", "]", "=", "v", "status", "=", "_unwrap_dict", "(", "r_set", ",", "argument_being_watched", ")", "log", ".", "debug", "(", "'Function: {0}, Watched arg: {1}, Response: {2}'", ".", "format", "(", "str", "(", "fun", ")", ".", "split", "(", "' '", ")", "[", "1", "]", ",", "argument_being_watched", ",", "status", ")", ")", "time", ".", "sleep", "(", "5", ")", "return", "True" ]
waits until the function retrieves some required argument .
train
true
7,356
def get_file_content(url, comes_from=None, session=None): if (session is None): raise TypeError("get_file_content() missing 1 required keyword argument: 'session'") match = _scheme_re.search(url) if match: scheme = match.group(1).lower() if ((scheme == 'file') and comes_from and comes_from.startswith('http')): raise InstallationError(('Requirements file %s references URL %s, which is local' % (comes_from, url))) if (scheme == 'file'): path = url.split(':', 1)[1] path = path.replace('\\', '/') match = _url_slash_drive_re.match(path) if match: path = ((match.group(1) + ':') + path.split('|', 1)[1]) path = urllib_parse.unquote(path) if path.startswith('/'): path = ('/' + path.lstrip('/')) url = path else: resp = session.get(url) resp.raise_for_status() return (resp.url, resp.text) try: with open(url, 'rb') as f: content = auto_decode(f.read()) except IOError as exc: raise InstallationError(('Could not open requirements file: %s' % str(exc))) return (url, content)
[ "def", "get_file_content", "(", "url", ",", "comes_from", "=", "None", ",", "session", "=", "None", ")", ":", "if", "(", "session", "is", "None", ")", ":", "raise", "TypeError", "(", "\"get_file_content() missing 1 required keyword argument: 'session'\"", ")", "match", "=", "_scheme_re", ".", "search", "(", "url", ")", "if", "match", ":", "scheme", "=", "match", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "if", "(", "(", "scheme", "==", "'file'", ")", "and", "comes_from", "and", "comes_from", ".", "startswith", "(", "'http'", ")", ")", ":", "raise", "InstallationError", "(", "(", "'Requirements file %s references URL %s, which is local'", "%", "(", "comes_from", ",", "url", ")", ")", ")", "if", "(", "scheme", "==", "'file'", ")", ":", "path", "=", "url", ".", "split", "(", "':'", ",", "1", ")", "[", "1", "]", "path", "=", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "match", "=", "_url_slash_drive_re", ".", "match", "(", "path", ")", "if", "match", ":", "path", "=", "(", "(", "match", ".", "group", "(", "1", ")", "+", "':'", ")", "+", "path", ".", "split", "(", "'|'", ",", "1", ")", "[", "1", "]", ")", "path", "=", "urllib_parse", ".", "unquote", "(", "path", ")", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "(", "'/'", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "url", "=", "path", "else", ":", "resp", "=", "session", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "return", "(", "resp", ".", "url", ",", "resp", ".", "text", ")", "try", ":", "with", "open", "(", "url", ",", "'rb'", ")", "as", "f", ":", "content", "=", "auto_decode", "(", "f", ".", "read", "(", ")", ")", "except", "IOError", "as", "exc", ":", "raise", "InstallationError", "(", "(", "'Could not open requirements file: %s'", "%", "str", "(", "exc", ")", ")", ")", "return", "(", "url", ",", "content", ")" ]
returns the content of this file .
train
true
7,357
def payloads(tracking_id, client_id, requestable, extra_info=None, extra_headers=None): extra_payload = {'v': '1', 'tid': tracking_id, 'cid': client_id, 'aip': '1'} if extra_info: for payload in extra_info: extra_payload.update(payload) for request_payload in requestable: final_payload = dict(request_payload) final_payload.update(extra_payload) (yield (final_payload, extra_headers))
[ "def", "payloads", "(", "tracking_id", ",", "client_id", ",", "requestable", ",", "extra_info", "=", "None", ",", "extra_headers", "=", "None", ")", ":", "extra_payload", "=", "{", "'v'", ":", "'1'", ",", "'tid'", ":", "tracking_id", ",", "'cid'", ":", "client_id", ",", "'aip'", ":", "'1'", "}", "if", "extra_info", ":", "for", "payload", "in", "extra_info", ":", "extra_payload", ".", "update", "(", "payload", ")", "for", "request_payload", "in", "requestable", ":", "final_payload", "=", "dict", "(", "request_payload", ")", "final_payload", ".", "update", "(", "extra_payload", ")", "(", "yield", "(", "final_payload", ",", "extra_headers", ")", ")" ]
get data and headers of api requests for google analytics .
train
false
7,360
@dispatch(Join, Sequence, Sequence) def compute_up(t, lhs, rhs, **kwargs): if (lhs == rhs): (lhs, rhs) = itertools.tee(lhs, 2) on_left = [t.lhs.fields.index(col) for col in listpack(t.on_left)] on_right = [t.rhs.fields.index(col) for col in listpack(t.on_right)] left_default = (None if (t.how in ('right', 'outer')) else toolz.itertoolz.no_default) right_default = (None if (t.how in ('left', 'outer')) else toolz.itertoolz.no_default) pairs = toolz.join(on_left, lhs, on_right, rhs, left_default=left_default, right_default=right_default) assemble = pair_assemble(t, on_left, on_right) return map(assemble, pairs)
[ "@", "dispatch", "(", "Join", ",", "Sequence", ",", "Sequence", ")", "def", "compute_up", "(", "t", ",", "lhs", ",", "rhs", ",", "**", "kwargs", ")", ":", "if", "(", "lhs", "==", "rhs", ")", ":", "(", "lhs", ",", "rhs", ")", "=", "itertools", ".", "tee", "(", "lhs", ",", "2", ")", "on_left", "=", "[", "t", ".", "lhs", ".", "fields", ".", "index", "(", "col", ")", "for", "col", "in", "listpack", "(", "t", ".", "on_left", ")", "]", "on_right", "=", "[", "t", ".", "rhs", ".", "fields", ".", "index", "(", "col", ")", "for", "col", "in", "listpack", "(", "t", ".", "on_right", ")", "]", "left_default", "=", "(", "None", "if", "(", "t", ".", "how", "in", "(", "'right'", ",", "'outer'", ")", ")", "else", "toolz", ".", "itertoolz", ".", "no_default", ")", "right_default", "=", "(", "None", "if", "(", "t", ".", "how", "in", "(", "'left'", ",", "'outer'", ")", ")", "else", "toolz", ".", "itertoolz", ".", "no_default", ")", "pairs", "=", "toolz", ".", "join", "(", "on_left", ",", "lhs", ",", "on_right", ",", "rhs", ",", "left_default", "=", "left_default", ",", "right_default", "=", "right_default", ")", "assemble", "=", "pair_assemble", "(", "t", ",", "on_left", ",", "on_right", ")", "return", "map", "(", "assemble", ",", "pairs", ")" ]
join two pandas data frames on arbitrary columns the approach taken here could probably be improved .
train
false
7,362
def compare_dataset_states(discovered_datasets, desired_datasets): for dataset_id in (set(discovered_datasets) | set(desired_datasets)): desired_dataset = desired_datasets.get(dataset_id) discovered_dataset = discovered_datasets.get(dataset_id) if (not compare_dataset_state(discovered_dataset=discovered_dataset, desired_dataset=desired_dataset)): return False return True
[ "def", "compare_dataset_states", "(", "discovered_datasets", ",", "desired_datasets", ")", ":", "for", "dataset_id", "in", "(", "set", "(", "discovered_datasets", ")", "|", "set", "(", "desired_datasets", ")", ")", ":", "desired_dataset", "=", "desired_datasets", ".", "get", "(", "dataset_id", ")", "discovered_dataset", "=", "discovered_datasets", ".", "get", "(", "dataset_id", ")", "if", "(", "not", "compare_dataset_state", "(", "discovered_dataset", "=", "discovered_dataset", ",", "desired_dataset", "=", "desired_dataset", ")", ")", ":", "return", "False", "return", "True" ]
compare discovered and desired state of datasets to determine if they have converged .
train
false
7,364
@profiler.trace def list_resources_with_long_filters(list_method, filter_attr, filter_values, **params): try: params[filter_attr] = filter_values return list_method(**params) except neutron_exc.RequestURITooLong as uri_len_exc: if (type(filter_values) != list): filter_values = [filter_values] all_filter_len = sum((((len(filter_attr) + len(val)) + 2) for val in filter_values)) allowed_filter_len = (all_filter_len - uri_len_exc.excess) val_maxlen = max((len(val) for val in filter_values)) filter_maxlen = ((len(filter_attr) + val_maxlen) + 2) chunk_size = (allowed_filter_len // filter_maxlen) resources = [] for i in range(0, len(filter_values), chunk_size): params[filter_attr] = filter_values[i:(i + chunk_size)] resources.extend(list_method(**params)) return resources
[ "@", "profiler", ".", "trace", "def", "list_resources_with_long_filters", "(", "list_method", ",", "filter_attr", ",", "filter_values", ",", "**", "params", ")", ":", "try", ":", "params", "[", "filter_attr", "]", "=", "filter_values", "return", "list_method", "(", "**", "params", ")", "except", "neutron_exc", ".", "RequestURITooLong", "as", "uri_len_exc", ":", "if", "(", "type", "(", "filter_values", ")", "!=", "list", ")", ":", "filter_values", "=", "[", "filter_values", "]", "all_filter_len", "=", "sum", "(", "(", "(", "(", "len", "(", "filter_attr", ")", "+", "len", "(", "val", ")", ")", "+", "2", ")", "for", "val", "in", "filter_values", ")", ")", "allowed_filter_len", "=", "(", "all_filter_len", "-", "uri_len_exc", ".", "excess", ")", "val_maxlen", "=", "max", "(", "(", "len", "(", "val", ")", "for", "val", "in", "filter_values", ")", ")", "filter_maxlen", "=", "(", "(", "len", "(", "filter_attr", ")", "+", "val_maxlen", ")", "+", "2", ")", "chunk_size", "=", "(", "allowed_filter_len", "//", "filter_maxlen", ")", "resources", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "filter_values", ")", ",", "chunk_size", ")", ":", "params", "[", "filter_attr", "]", "=", "filter_values", "[", "i", ":", "(", "i", "+", "chunk_size", ")", "]", "resources", ".", "extend", "(", "list_method", "(", "**", "params", ")", ")", "return", "resources" ]
list neutron resources with handling requesturitoolong exception .
train
false
7,365
def is_aware(value): return ((value.tzinfo is not None) and (value.tzinfo.utcoffset(value) is not None))
[ "def", "is_aware", "(", "value", ")", ":", "return", "(", "(", "value", ".", "tzinfo", "is", "not", "None", ")", "and", "(", "value", ".", "tzinfo", ".", "utcoffset", "(", "value", ")", "is", "not", "None", ")", ")" ]
determines if a given datetime .
train
false
7,366
@jinja2.contextfunction @library.global_function def ga_push_attribute(context): request = context.get('request') ga_push = context.get('ga_push', []) if ((request.GET.get('fpa') == '1') and request.user.is_authenticated()): user = request.user group_names = user.groups.values_list('name', flat=True) if ('Administrators' in group_names): ga_push.append(['_setCustomVar', 1, 'User Type', 'Contributor - Admin', 1]) elif ('Contributors' in group_names): ga_push.append(['_setCustomVar', 1, 'User Type', 'Contributor', 1]) else: ga_push.append(['_setCustomVar', 1, 'User Type', 'Registered', 1]) return jsonlib.dumps(ga_push)
[ "@", "jinja2", ".", "contextfunction", "@", "library", ".", "global_function", "def", "ga_push_attribute", "(", "context", ")", ":", "request", "=", "context", ".", "get", "(", "'request'", ")", "ga_push", "=", "context", ".", "get", "(", "'ga_push'", ",", "[", "]", ")", "if", "(", "(", "request", ".", "GET", ".", "get", "(", "'fpa'", ")", "==", "'1'", ")", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "user", "=", "request", ".", "user", "group_names", "=", "user", ".", "groups", ".", "values_list", "(", "'name'", ",", "flat", "=", "True", ")", "if", "(", "'Administrators'", "in", "group_names", ")", ":", "ga_push", ".", "append", "(", "[", "'_setCustomVar'", ",", "1", ",", "'User Type'", ",", "'Contributor - Admin'", ",", "1", "]", ")", "elif", "(", "'Contributors'", "in", "group_names", ")", ":", "ga_push", ".", "append", "(", "[", "'_setCustomVar'", ",", "1", ",", "'User Type'", ",", "'Contributor'", ",", "1", "]", ")", "else", ":", "ga_push", ".", "append", "(", "[", "'_setCustomVar'", ",", "1", ",", "'User Type'", ",", "'Registered'", ",", "1", "]", ")", "return", "jsonlib", ".", "dumps", "(", "ga_push", ")" ]
return the json for the data-ga-push attribute .
train
false
7,368
def delete_message(queue, region, receipthandle, opts=None, user=None): queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) if (queue not in url_map): log.info('"{0}" queue does not exist.'.format(queue)) return False out = _run_aws('delete-message', region, opts, user, receipthandle=receipthandle, queue=url_map[queue]) return True
[ "def", "delete_message", "(", "queue", ",", "region", ",", "receipthandle", ",", "opts", "=", "None", ",", "user", "=", "None", ")", ":", "queues", "=", "list_queues", "(", "region", ",", "opts", ",", "user", ")", "url_map", "=", "_parse_queue_list", "(", "queues", ")", "if", "(", "queue", "not", "in", "url_map", ")", ":", "log", ".", "info", "(", "'\"{0}\" queue does not exist.'", ".", "format", "(", "queue", ")", ")", "return", "False", "out", "=", "_run_aws", "(", "'delete-message'", ",", "region", ",", "opts", ",", "user", ",", "receipthandle", "=", "receipthandle", ",", "queue", "=", "url_map", "[", "queue", "]", ")", "return", "True" ]
delete one or more messages from a queue in a region queue the name of the queue to delete messages from region region where sqs queues exists receipthandle the receipthandle of the message to delete .
train
true
7,369
def send_export_mail(event_id, result): job = DataGetter.get_export_jobs(event_id) if (not job): return event = EventModel.query.get(event_id) if (not event): event_name = '(Undefined)' else: event_name = event.name send_email_after_export(job.user_email, event_name, result) user = DataGetter.get_user_by_email(job.user_email) send_notif_after_export(user, event_name, result)
[ "def", "send_export_mail", "(", "event_id", ",", "result", ")", ":", "job", "=", "DataGetter", ".", "get_export_jobs", "(", "event_id", ")", "if", "(", "not", "job", ")", ":", "return", "event", "=", "EventModel", ".", "query", ".", "get", "(", "event_id", ")", "if", "(", "not", "event", ")", ":", "event_name", "=", "'(Undefined)'", "else", ":", "event_name", "=", "event", ".", "name", "send_email_after_export", "(", "job", ".", "user_email", ",", "event_name", ",", "result", ")", "user", "=", "DataGetter", ".", "get_user_by_email", "(", "job", ".", "user_email", ")", "send_notif_after_export", "(", "user", ",", "event_name", ",", "result", ")" ]
send export event mail after the process is complete .
train
false
7,370
def tally(zcontext, url): zsock = zcontext.socket(zmq.PULL) zsock.bind(url) p = q = 0 while True: decision = zsock.recv_string() q += 1 if (decision == 'Y'): p += 4 print (decision, (p / q))
[ "def", "tally", "(", "zcontext", ",", "url", ")", ":", "zsock", "=", "zcontext", ".", "socket", "(", "zmq", ".", "PULL", ")", "zsock", ".", "bind", "(", "url", ")", "p", "=", "q", "=", "0", "while", "True", ":", "decision", "=", "zsock", ".", "recv_string", "(", ")", "q", "+=", "1", "if", "(", "decision", "==", "'Y'", ")", ":", "p", "+=", "4", "print", "(", "decision", ",", "(", "p", "/", "q", ")", ")" ]
tally how many points fall within the unit circle .
train
false
7,372
def _convert_args(args): converted = [] for arg in args: if isinstance(arg, dict): for key in list(arg.keys()): if (key == '__kwarg__'): continue converted.append('{0}={1}'.format(key, arg[key])) else: converted.append(arg) return converted
[ "def", "_convert_args", "(", "args", ")", ":", "converted", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "dict", ")", ":", "for", "key", "in", "list", "(", "arg", ".", "keys", "(", ")", ")", ":", "if", "(", "key", "==", "'__kwarg__'", ")", ":", "continue", "converted", ".", "append", "(", "'{0}={1}'", ".", "format", "(", "key", ",", "arg", "[", "key", "]", ")", ")", "else", ":", "converted", ".", "append", "(", "arg", ")", "return", "converted" ]
convert args to bytestrings for python 2 and convert them to strings on python 3 .
train
true
7,373
def add_local(f): if os.path.exists(f): fn = get_filename(f) if fn: if (get_ext(fn) in VALID_ARCHIVES): ProcessArchiveFile(fn, f, keep=True) elif (get_ext(fn) in ('.nzb', '.gz', '.bz2')): ProcessSingleFile(fn, f, keep=True) else: logging.error('Filename not found: %s', f) else: logging.error('File not found: %s', f)
[ "def", "add_local", "(", "f", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "fn", "=", "get_filename", "(", "f", ")", "if", "fn", ":", "if", "(", "get_ext", "(", "fn", ")", "in", "VALID_ARCHIVES", ")", ":", "ProcessArchiveFile", "(", "fn", ",", "f", ",", "keep", "=", "True", ")", "elif", "(", "get_ext", "(", "fn", ")", "in", "(", "'.nzb'", ",", "'.gz'", ",", "'.bz2'", ")", ")", ":", "ProcessSingleFile", "(", "fn", ",", "f", ",", "keep", "=", "True", ")", "else", ":", "logging", ".", "error", "(", "'Filename not found: %s'", ",", "f", ")", "else", ":", "logging", ".", "error", "(", "'File not found: %s'", ",", "f", ")" ]
adds *prefix* from vrf identified by *route_dist* and sets the source as network controller .
train
false
7,376
def _get_blob_metadata(blob_key): key = blobstore_stub.BlobstoreServiceStub.ToDatastoreBlobKey(blob_key) try: info = datastore.Get(key) return (info['size'], info['content_type'], blob_key) except datastore_errors.EntityNotFoundError: return (None, None, None)
[ "def", "_get_blob_metadata", "(", "blob_key", ")", ":", "key", "=", "blobstore_stub", ".", "BlobstoreServiceStub", ".", "ToDatastoreBlobKey", "(", "blob_key", ")", "try", ":", "info", "=", "datastore", ".", "Get", "(", "key", ")", "return", "(", "info", "[", "'size'", "]", ",", "info", "[", "'content_type'", "]", ",", "blob_key", ")", "except", "datastore_errors", ".", "EntityNotFoundError", ":", "return", "(", "None", ",", "None", ",", "None", ")" ]
retrieve the metadata about a blob from the blob_key .
train
false
7,377
def repo_refresh(m): retvals = {'rc': 0, 'stdout': '', 'stderr': ''} cmd = get_cmd(m, 'refresh') retvals['cmd'] = cmd (result, retvals['rc'], retvals['stdout'], retvals['stderr']) = parse_zypper_xml(m, cmd) return retvals
[ "def", "repo_refresh", "(", "m", ")", ":", "retvals", "=", "{", "'rc'", ":", "0", ",", "'stdout'", ":", "''", ",", "'stderr'", ":", "''", "}", "cmd", "=", "get_cmd", "(", "m", ",", "'refresh'", ")", "retvals", "[", "'cmd'", "]", "=", "cmd", "(", "result", ",", "retvals", "[", "'rc'", "]", ",", "retvals", "[", "'stdout'", "]", ",", "retvals", "[", "'stderr'", "]", ")", "=", "parse_zypper_xml", "(", "m", ",", "cmd", ")", "return", "retvals" ]
update the repositories .
train
false
7,378
def check_migration_histories(histories, delete_ghosts=False, ignore_ghosts=False): exists = SortedSet() ghosts = [] for h in histories: try: m = h.get_migration() m.migration() except exceptions.UnknownMigration: ghosts.append(h) except ImproperlyConfigured: pass else: exists.add(m) if ghosts: if delete_ghosts: for h in ghosts: h.delete() elif (not ignore_ghosts): raise exceptions.GhostMigrations(ghosts) return exists
[ "def", "check_migration_histories", "(", "histories", ",", "delete_ghosts", "=", "False", ",", "ignore_ghosts", "=", "False", ")", ":", "exists", "=", "SortedSet", "(", ")", "ghosts", "=", "[", "]", "for", "h", "in", "histories", ":", "try", ":", "m", "=", "h", ".", "get_migration", "(", ")", "m", ".", "migration", "(", ")", "except", "exceptions", ".", "UnknownMigration", ":", "ghosts", ".", "append", "(", "h", ")", "except", "ImproperlyConfigured", ":", "pass", "else", ":", "exists", ".", "add", "(", "m", ")", "if", "ghosts", ":", "if", "delete_ghosts", ":", "for", "h", "in", "ghosts", ":", "h", ".", "delete", "(", ")", "elif", "(", "not", "ignore_ghosts", ")", ":", "raise", "exceptions", ".", "GhostMigrations", "(", "ghosts", ")", "return", "exists" ]
checks that theres no ghost migrations in the database .
train
false
7,380
def installed(name, dir, pkgs=None, user=None, env=None): ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if (pkgs is not None): pkg_list = pkgs else: pkg_list = [name] try: installed_pkgs = __salt__['bower.list'](dir=dir, runas=user, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = "Error looking up '{0}': {1}".format(name, err) return ret else: installed_pkgs = dict(((p, info) for (p, info) in six.iteritems(installed_pkgs))) pkgs_satisfied = [] pkgs_to_install = [] for pkg in pkg_list: (pkg_name, _, pkg_ver) = pkg.partition('#') pkg_name = pkg_name.strip() if (pkg_name not in installed_pkgs): pkgs_to_install.append(pkg) continue if (pkg_name in installed_pkgs): installed_pkg = installed_pkgs[pkg_name] installed_pkg_ver = installed_pkg.get('pkgMeta').get('version') installed_name_ver = '{0}#{1}'.format(pkg_name, installed_pkg_ver) if pkg_ver: if (installed_pkg_ver != pkg_ver): pkgs_to_install.append(pkg) else: pkgs_satisfied.append(installed_name_ver) continue else: pkgs_satisfied.append(installed_name_ver) continue if __opts__['test']: ret['result'] = None comment_msg = [] if pkgs_to_install: comment_msg.append("Bower package(s) '{0}' are set to be installed".format(', '.join(pkgs_to_install))) ret['changes'] = {'old': [], 'new': pkgs_to_install} if pkgs_satisfied: comment_msg.append("Package(s) '{0}' satisfied by {1}".format(', '.join(pkg_list), ', '.join(pkgs_satisfied))) ret['comment'] = '. '.join(comment_msg) return ret if (not pkgs_to_install): ret['result'] = True ret['comment'] = "Package(s) '{0}' satisfied by {1}".format(', '.join(pkg_list), ', '.join(pkgs_satisfied)) return ret try: cmd_args = {'pkg': None, 'dir': dir, 'pkgs': None, 'runas': user, 'env': env} if (pkgs is not None): cmd_args['pkgs'] = pkgs else: cmd_args['pkg'] = pkg_name call = __salt__['bower.install'](**cmd_args) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = "Error installing '{0}': {1}".format(', '.join(pkg_list), err) return ret if call: ret['result'] = True ret['changes'] = {'old': [], 'new': pkgs_to_install} ret['comment'] = "Package(s) '{0}' successfully installed".format(', '.join(pkgs_to_install)) else: ret['result'] = False ret['comment'] = "Could not install package(s) '{0}'".format(', '.join(pkg_list)) return ret
[ "def", "installed", "(", "name", ",", "dir", ",", "pkgs", "=", "None", ",", "user", "=", "None", ",", "env", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "(", "pkgs", "is", "not", "None", ")", ":", "pkg_list", "=", "pkgs", "else", ":", "pkg_list", "=", "[", "name", "]", "try", ":", "installed_pkgs", "=", "__salt__", "[", "'bower.list'", "]", "(", "dir", "=", "dir", ",", "runas", "=", "user", ",", "env", "=", "env", ")", "except", "(", "CommandNotFoundError", ",", "CommandExecutionError", ")", "as", "err", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error looking up '{0}': {1}\"", ".", "format", "(", "name", ",", "err", ")", "return", "ret", "else", ":", "installed_pkgs", "=", "dict", "(", "(", "(", "p", ",", "info", ")", "for", "(", "p", ",", "info", ")", "in", "six", ".", "iteritems", "(", "installed_pkgs", ")", ")", ")", "pkgs_satisfied", "=", "[", "]", "pkgs_to_install", "=", "[", "]", "for", "pkg", "in", "pkg_list", ":", "(", "pkg_name", ",", "_", ",", "pkg_ver", ")", "=", "pkg", ".", "partition", "(", "'#'", ")", "pkg_name", "=", "pkg_name", ".", "strip", "(", ")", "if", "(", "pkg_name", "not", "in", "installed_pkgs", ")", ":", "pkgs_to_install", ".", "append", "(", "pkg", ")", "continue", "if", "(", "pkg_name", "in", "installed_pkgs", ")", ":", "installed_pkg", "=", "installed_pkgs", "[", "pkg_name", "]", "installed_pkg_ver", "=", "installed_pkg", ".", "get", "(", "'pkgMeta'", ")", ".", "get", "(", "'version'", ")", "installed_name_ver", "=", "'{0}#{1}'", ".", "format", "(", "pkg_name", ",", "installed_pkg_ver", ")", "if", "pkg_ver", ":", "if", "(", "installed_pkg_ver", "!=", "pkg_ver", ")", ":", "pkgs_to_install", ".", "append", "(", "pkg", ")", "else", ":", "pkgs_satisfied", ".", "append", "(", "installed_name_ver", ")", "continue", "else", ":", "pkgs_satisfied", ".", "append", "(", "installed_name_ver", ")", "continue", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "comment_msg", "=", "[", "]", "if", "pkgs_to_install", ":", "comment_msg", ".", "append", "(", "\"Bower package(s) '{0}' are set to be installed\"", ".", "format", "(", "', '", ".", "join", "(", "pkgs_to_install", ")", ")", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "[", "]", ",", "'new'", ":", "pkgs_to_install", "}", "if", "pkgs_satisfied", ":", "comment_msg", ".", "append", "(", "\"Package(s) '{0}' satisfied by {1}\"", ".", "format", "(", "', '", ".", "join", "(", "pkg_list", ")", ",", "', '", ".", "join", "(", "pkgs_satisfied", ")", ")", ")", "ret", "[", "'comment'", "]", "=", "'. '", ".", "join", "(", "comment_msg", ")", "return", "ret", "if", "(", "not", "pkgs_to_install", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "\"Package(s) '{0}' satisfied by {1}\"", ".", "format", "(", "', '", ".", "join", "(", "pkg_list", ")", ",", "', '", ".", "join", "(", "pkgs_satisfied", ")", ")", "return", "ret", "try", ":", "cmd_args", "=", "{", "'pkg'", ":", "None", ",", "'dir'", ":", "dir", ",", "'pkgs'", ":", "None", ",", "'runas'", ":", "user", ",", "'env'", ":", "env", "}", "if", "(", "pkgs", "is", "not", "None", ")", ":", "cmd_args", "[", "'pkgs'", "]", "=", "pkgs", "else", ":", "cmd_args", "[", "'pkg'", "]", "=", "pkg_name", "call", "=", "__salt__", "[", "'bower.install'", "]", "(", "**", "cmd_args", ")", "except", "(", "CommandNotFoundError", ",", "CommandExecutionError", ")", "as", "err", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error installing '{0}': {1}\"", ".", "format", "(", "', '", ".", "join", "(", "pkg_list", ")", ",", "err", ")", "return", "ret", "if", "call", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "[", "]", ",", "'new'", ":", "pkgs_to_install", "}", "ret", "[", "'comment'", "]", "=", "\"Package(s) '{0}' successfully installed\"", ".", "format", "(", "', '", ".", "join", "(", "pkgs_to_install", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Could not install package(s) '{0}'\"", ".", "format", "(", "', '", ".", "join", "(", "pkg_list", ")", ")", "return", "ret" ]
verify that the specified python is installed with pyenv .
train
true
7,381
def get_default_actions(): return [CloseMenuAction([SubmitAction(), DiscardAction(), DeleteAction()]), UpdateMenuAction([UploadDiffAction(), UploadFileAction()]), DownloadDiffAction(), EditReviewAction(), AddGeneralCommentAction(), ShipItAction()]
[ "def", "get_default_actions", "(", ")", ":", "return", "[", "CloseMenuAction", "(", "[", "SubmitAction", "(", ")", ",", "DiscardAction", "(", ")", ",", "DeleteAction", "(", ")", "]", ")", ",", "UpdateMenuAction", "(", "[", "UploadDiffAction", "(", ")", ",", "UploadFileAction", "(", ")", "]", ")", ",", "DownloadDiffAction", "(", ")", ",", "EditReviewAction", "(", ")", ",", "AddGeneralCommentAction", "(", ")", ",", "ShipItAction", "(", ")", "]" ]
parses the key default_actions in the given section .
train
false
7,382
def tgrep_compile(tgrep_string): parser = _build_tgrep_parser(True) if isinstance(tgrep_string, binary_type): tgrep_string = tgrep_string.decode() return list(parser.parseString(tgrep_string, parseAll=True))[0]
[ "def", "tgrep_compile", "(", "tgrep_string", ")", ":", "parser", "=", "_build_tgrep_parser", "(", "True", ")", "if", "isinstance", "(", "tgrep_string", ",", "binary_type", ")", ":", "tgrep_string", "=", "tgrep_string", ".", "decode", "(", ")", "return", "list", "(", "parser", ".", "parseString", "(", "tgrep_string", ",", "parseAll", "=", "True", ")", ")", "[", "0", "]" ]
parses a tgrep search string into a lambda function .
train
false
7,384
def set_nonblocking(fd): fl = (fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(fd, fcntl.F_SETFL, fl)
[ "def", "set_nonblocking", "(", "fd", ")", ":", "fl", "=", "(", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_GETFL", ")", "|", "os", ".", "O_NONBLOCK", ")", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETFL", ",", "fl", ")" ]
sets the given file descriptor to non-blocking mode .
train
false
7,385
def lookupAllRecords(name, timeout=None): return getResolver().lookupAllRecords(name, timeout)
[ "def", "lookupAllRecords", "(", "name", ",", "timeout", "=", "None", ")", ":", "return", "getResolver", "(", ")", ".", "lookupAllRecords", "(", "name", ",", "timeout", ")" ]
all_record lookup .
train
false
7,387
def cors_validation(func): @functools.wraps(func) def wrapped(*a, **kw): controller = a[0] req = a[1] req_origin = req.headers.get('Origin', None) if req_origin: container_info = controller.container_info(controller.account_name, controller.container_name, req) cors_info = container_info.get('cors', {}) resp = func(*a, **kw) if (controller.app.strict_cors_mode and (not controller.is_origin_allowed(cors_info, req_origin))): return resp if ('Access-Control-Expose-Headers' not in resp.headers): expose_headers = set(['cache-control', 'content-language', 'content-type', 'expires', 'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id', 'x-openstack-request-id']) for header in resp.headers: if (header.startswith('X-Container-Meta') or header.startswith('X-Object-Meta')): expose_headers.add(header.lower()) if cors_info.get('expose_headers'): expose_headers = expose_headers.union([header_line.strip().lower() for header_line in cors_info['expose_headers'].split(' ') if header_line.strip()]) resp.headers['Access-Control-Expose-Headers'] = ', '.join(expose_headers) if ('Access-Control-Allow-Origin' not in resp.headers): if (cors_info['allow_origin'] and (cors_info['allow_origin'].strip() == '*')): resp.headers['Access-Control-Allow-Origin'] = '*' else: resp.headers['Access-Control-Allow-Origin'] = req_origin return resp else: return func(*a, **kw) return wrapped
[ "def", "cors_validation", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "a", ",", "**", "kw", ")", ":", "controller", "=", "a", "[", "0", "]", "req", "=", "a", "[", "1", "]", "req_origin", "=", "req", ".", "headers", ".", "get", "(", "'Origin'", ",", "None", ")", "if", "req_origin", ":", "container_info", "=", "controller", ".", "container_info", "(", "controller", ".", "account_name", ",", "controller", ".", "container_name", ",", "req", ")", "cors_info", "=", "container_info", ".", "get", "(", "'cors'", ",", "{", "}", ")", "resp", "=", "func", "(", "*", "a", ",", "**", "kw", ")", "if", "(", "controller", ".", "app", ".", "strict_cors_mode", "and", "(", "not", "controller", ".", "is_origin_allowed", "(", "cors_info", ",", "req_origin", ")", ")", ")", ":", "return", "resp", "if", "(", "'Access-Control-Expose-Headers'", "not", "in", "resp", ".", "headers", ")", ":", "expose_headers", "=", "set", "(", "[", "'cache-control'", ",", "'content-language'", ",", "'content-type'", ",", "'expires'", ",", "'last-modified'", ",", "'pragma'", ",", "'etag'", ",", "'x-timestamp'", ",", "'x-trans-id'", ",", "'x-openstack-request-id'", "]", ")", "for", "header", "in", "resp", ".", "headers", ":", "if", "(", "header", ".", "startswith", "(", "'X-Container-Meta'", ")", "or", "header", ".", "startswith", "(", "'X-Object-Meta'", ")", ")", ":", "expose_headers", ".", "add", "(", "header", ".", "lower", "(", ")", ")", "if", "cors_info", ".", "get", "(", "'expose_headers'", ")", ":", "expose_headers", "=", "expose_headers", ".", "union", "(", "[", "header_line", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "header_line", "in", "cors_info", "[", "'expose_headers'", "]", ".", "split", "(", "' '", ")", "if", "header_line", ".", "strip", "(", ")", "]", ")", "resp", ".", "headers", "[", "'Access-Control-Expose-Headers'", "]", "=", "', '", ".", "join", "(", "expose_headers", ")", "if", "(", "'Access-Control-Allow-Origin'", "not", "in", "resp", ".", "headers", ")", ":", "if", "(", "cors_info", "[", "'allow_origin'", "]", "and", "(", "cors_info", "[", "'allow_origin'", "]", ".", "strip", "(", ")", "==", "'*'", ")", ")", ":", "resp", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "else", ":", "resp", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "req_origin", "return", "resp", "else", ":", "return", "func", "(", "*", "a", ",", "**", "kw", ")", "return", "wrapped" ]
decorator to check if the request is a cors request and if so .
train
false
7,388
def is_hv_pool(metadata): return (POOL_FLAG in metadata.keys())
[ "def", "is_hv_pool", "(", "metadata", ")", ":", "return", "(", "POOL_FLAG", "in", "metadata", ".", "keys", "(", ")", ")" ]
checks if aggregate is a hypervisor_pool .
train
false
7,389
def rename_regkey(skey, ssubkey, dsubkey): res_handle = HANDLE() options = DWORD(0) res = RegOpenKeyExW(skey, ssubkey, options, _winreg.KEY_ALL_ACCESS, byref(res_handle)) if (not res): bsize = c_ushort((len(dsubkey) * 2)) us = UNICODE_STRING() us.Buffer = c_wchar_p(dsubkey) us.Length = bsize us.MaximumLength = bsize res = NtRenameKey(res_handle, pointer(us)) if res: log.warning('Error renaming %s\\%s to %s (0x%x)', skey, ssubkey, dsubkey, (res % (2 ** 32))) if res_handle: RegCloseKey(res_handle)
[ "def", "rename_regkey", "(", "skey", ",", "ssubkey", ",", "dsubkey", ")", ":", "res_handle", "=", "HANDLE", "(", ")", "options", "=", "DWORD", "(", "0", ")", "res", "=", "RegOpenKeyExW", "(", "skey", ",", "ssubkey", ",", "options", ",", "_winreg", ".", "KEY_ALL_ACCESS", ",", "byref", "(", "res_handle", ")", ")", "if", "(", "not", "res", ")", ":", "bsize", "=", "c_ushort", "(", "(", "len", "(", "dsubkey", ")", "*", "2", ")", ")", "us", "=", "UNICODE_STRING", "(", ")", "us", ".", "Buffer", "=", "c_wchar_p", "(", "dsubkey", ")", "us", ".", "Length", "=", "bsize", "us", ".", "MaximumLength", "=", "bsize", "res", "=", "NtRenameKey", "(", "res_handle", ",", "pointer", "(", "us", ")", ")", "if", "res", ":", "log", ".", "warning", "(", "'Error renaming %s\\\\%s to %s (0x%x)'", ",", "skey", ",", "ssubkey", ",", "dsubkey", ",", "(", "res", "%", "(", "2", "**", "32", ")", ")", ")", "if", "res_handle", ":", "RegCloseKey", "(", "res_handle", ")" ]
rename an entire tree of values in the registry .
train
false
7,391
def render_to_html(eq): def err(s): 'Render as an error span' return '<span class="inline-error inline">{0}</span>'.format(s) def render_arrow(arrow): 'Turn text arrows into pretty ones' if (arrow == '->'): return u'\u2192' if (arrow == '<->'): return u'\u2194' return arrow def render_expression(ex): '\n Render a chemical expression--no arrows.\n ' try: return _render_to_html(_get_final_tree(ex)) except ParseException: return err(ex) def spanify(s): return u'<span class="math">{0}</span>'.format(s) (left, arrow, right) = split_on_arrow(eq) if (arrow == ''): return spanify(render_expression(left)) return spanify(((render_expression(left) + render_arrow(arrow)) + render_expression(right)))
[ "def", "render_to_html", "(", "eq", ")", ":", "def", "err", "(", "s", ")", ":", "return", "'<span class=\"inline-error inline\">{0}</span>'", ".", "format", "(", "s", ")", "def", "render_arrow", "(", "arrow", ")", ":", "if", "(", "arrow", "==", "'->'", ")", ":", "return", "u'\\u2192'", "if", "(", "arrow", "==", "'<->'", ")", ":", "return", "u'\\u2194'", "return", "arrow", "def", "render_expression", "(", "ex", ")", ":", "try", ":", "return", "_render_to_html", "(", "_get_final_tree", "(", "ex", ")", ")", "except", "ParseException", ":", "return", "err", "(", "ex", ")", "def", "spanify", "(", "s", ")", ":", "return", "u'<span class=\"math\">{0}</span>'", ".", "format", "(", "s", ")", "(", "left", ",", "arrow", ",", "right", ")", "=", "split_on_arrow", "(", "eq", ")", "if", "(", "arrow", "==", "''", ")", ":", "return", "spanify", "(", "render_expression", "(", "left", ")", ")", "return", "spanify", "(", "(", "(", "render_expression", "(", "left", ")", "+", "render_arrow", "(", "arrow", ")", ")", "+", "render_expression", "(", "right", ")", ")", ")" ]
render a chemical equation string to html .
train
false
7,392
def nu2lambda(nu): return (c / _np.asanyarray(nu))
[ "def", "nu2lambda", "(", "nu", ")", ":", "return", "(", "c", "/", "_np", ".", "asanyarray", "(", "nu", ")", ")" ]
convert optical frequency to wavelength .
train
false
7,393
def SplitBuffer(buff, index=0, length=None): buffer_len = (length or len(buff)) while (index < buffer_len): (encoded_tag, data_index) = ReadTag(buff, index) tag_type = (ORD_MAP[encoded_tag[0]] & TAG_TYPE_MASK) if (tag_type == WIRETYPE_VARINT): (_, new_index) = VarintReader(buff, data_index) (yield (encoded_tag, '', buff[data_index:new_index])) index = new_index elif (tag_type == WIRETYPE_FIXED64): (yield (encoded_tag, '', buff[data_index:(data_index + 8)])) index = (8 + data_index) elif (tag_type == WIRETYPE_FIXED32): (yield (encoded_tag, '', buff[data_index:(data_index + 4)])) index = (4 + data_index) elif (tag_type == WIRETYPE_LENGTH_DELIMITED): (length, start) = VarintReader(buff, data_index) (yield (encoded_tag, buff[data_index:start], buff[start:(start + length)])) index = (start + length) else: raise rdfvalue.DecodeError('Unexpected Tag.')
[ "def", "SplitBuffer", "(", "buff", ",", "index", "=", "0", ",", "length", "=", "None", ")", ":", "buffer_len", "=", "(", "length", "or", "len", "(", "buff", ")", ")", "while", "(", "index", "<", "buffer_len", ")", ":", "(", "encoded_tag", ",", "data_index", ")", "=", "ReadTag", "(", "buff", ",", "index", ")", "tag_type", "=", "(", "ORD_MAP", "[", "encoded_tag", "[", "0", "]", "]", "&", "TAG_TYPE_MASK", ")", "if", "(", "tag_type", "==", "WIRETYPE_VARINT", ")", ":", "(", "_", ",", "new_index", ")", "=", "VarintReader", "(", "buff", ",", "data_index", ")", "(", "yield", "(", "encoded_tag", ",", "''", ",", "buff", "[", "data_index", ":", "new_index", "]", ")", ")", "index", "=", "new_index", "elif", "(", "tag_type", "==", "WIRETYPE_FIXED64", ")", ":", "(", "yield", "(", "encoded_tag", ",", "''", ",", "buff", "[", "data_index", ":", "(", "data_index", "+", "8", ")", "]", ")", ")", "index", "=", "(", "8", "+", "data_index", ")", "elif", "(", "tag_type", "==", "WIRETYPE_FIXED32", ")", ":", "(", "yield", "(", "encoded_tag", ",", "''", ",", "buff", "[", "data_index", ":", "(", "data_index", "+", "4", ")", "]", ")", ")", "index", "=", "(", "4", "+", "data_index", ")", "elif", "(", "tag_type", "==", "WIRETYPE_LENGTH_DELIMITED", ")", ":", "(", "length", ",", "start", ")", "=", "VarintReader", "(", "buff", ",", "data_index", ")", "(", "yield", "(", "encoded_tag", ",", "buff", "[", "data_index", ":", "start", "]", ",", "buff", "[", "start", ":", "(", "start", "+", "length", ")", "]", ")", ")", "index", "=", "(", "start", "+", "length", ")", "else", ":", "raise", "rdfvalue", ".", "DecodeError", "(", "'Unexpected Tag.'", ")" ]
parses the buffer as a prototypes .
train
true
7,394
@utils.arg('network', metavar='<network>', help=_('UUID of network.')) @utils.arg('host', metavar='<host>', help=_('Name of host')) @deprecated_network def do_network_associate_host(cs, args): cs.networks.associate_host(args.network, args.host)
[ "@", "utils", ".", "arg", "(", "'network'", ",", "metavar", "=", "'<network>'", ",", "help", "=", "_", "(", "'UUID of network.'", ")", ")", "@", "utils", ".", "arg", "(", "'host'", ",", "metavar", "=", "'<host>'", ",", "help", "=", "_", "(", "'Name of host'", ")", ")", "@", "deprecated_network", "def", "do_network_associate_host", "(", "cs", ",", "args", ")", ":", "cs", ".", "networks", ".", "associate_host", "(", "args", ".", "network", ",", "args", ".", "host", ")" ]
associate host with network .
train
false
7,395
def get_new_exploration_id(): return exp_models.ExplorationModel.get_new_id('')
[ "def", "get_new_exploration_id", "(", ")", ":", "return", "exp_models", ".", "ExplorationModel", ".", "get_new_id", "(", "''", ")" ]
returns a new exploration id .
train
false
7,397
def _ls_history_logs(fs, log_dir_stream, job_id=None): return _ls_logs(fs, log_dir_stream, _match_history_log_path, job_id=job_id)
[ "def", "_ls_history_logs", "(", "fs", ",", "log_dir_stream", ",", "job_id", "=", "None", ")", ":", "return", "_ls_logs", "(", "fs", ",", "log_dir_stream", ",", "_match_history_log_path", ",", "job_id", "=", "job_id", ")" ]
yield matching files .
train
false
7,398
def run_pillar(pillar_name): data = _execute_pillar(pillar_name, run) return data
[ "def", "run_pillar", "(", "pillar_name", ")", ":", "data", "=", "_execute_pillar", "(", "pillar_name", ",", "run", ")", "return", "data" ]
run one or more nagios plugins from pillar data and get the result of cmd .
train
false
7,401
def _inject_key_into_fs(key, fs): LOG.debug((_('Inject key fs=%(fs)s key=%(key)s') % locals())) sshdir = os.path.join('root', '.ssh') fs.make_path(sshdir) fs.set_ownership(sshdir, 'root', 'root') fs.set_permissions(sshdir, 448) keyfile = os.path.join(sshdir, 'authorized_keys') key_data = ''.join(['\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n']) _inject_file_into_fs(fs, keyfile, key_data, append=True) fs.set_permissions(keyfile, 384) _setup_selinux_for_keys(fs, sshdir)
[ "def", "_inject_key_into_fs", "(", "key", ",", "fs", ")", ":", "LOG", ".", "debug", "(", "(", "_", "(", "'Inject key fs=%(fs)s key=%(key)s'", ")", "%", "locals", "(", ")", ")", ")", "sshdir", "=", "os", ".", "path", ".", "join", "(", "'root'", ",", "'.ssh'", ")", "fs", ".", "make_path", "(", "sshdir", ")", "fs", ".", "set_ownership", "(", "sshdir", ",", "'root'", ",", "'root'", ")", "fs", ".", "set_permissions", "(", "sshdir", ",", "448", ")", "keyfile", "=", "os", ".", "path", ".", "join", "(", "sshdir", ",", "'authorized_keys'", ")", "key_data", "=", "''", ".", "join", "(", "[", "'\\n'", ",", "'# The following ssh key was injected by Nova'", ",", "'\\n'", ",", "key", ".", "strip", "(", ")", ",", "'\\n'", "]", ")", "_inject_file_into_fs", "(", "fs", ",", "keyfile", ",", "key_data", ",", "append", "=", "True", ")", "fs", ".", "set_permissions", "(", "keyfile", ",", "384", ")", "_setup_selinux_for_keys", "(", "fs", ",", "sshdir", ")" ]
add the given public ssh key to roots authorized_keys .
train
false
7,402
def test_equal(): assert (Q.positive(x) == Q.positive(x)) assert (Q.positive(x) != (~ Q.positive(x))) assert ((~ Q.positive(x)) == (~ Q.positive(x)))
[ "def", "test_equal", "(", ")", ":", "assert", "(", "Q", ".", "positive", "(", "x", ")", "==", "Q", ".", "positive", "(", "x", ")", ")", "assert", "(", "Q", ".", "positive", "(", "x", ")", "!=", "(", "~", "Q", ".", "positive", "(", "x", ")", ")", ")", "assert", "(", "(", "~", "Q", ".", "positive", "(", "x", ")", ")", "==", "(", "~", "Q", ".", "positive", "(", "x", ")", ")", ")" ]
test for equality .
train
false
7,404
def get_bond(iface): path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path)
[ "def", "get_bond", "(", "iface", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "_RH_NETWORK_CONF_FILES", ",", "'{0}.conf'", ".", "format", "(", "iface", ")", ")", "return", "_read_file", "(", "path", ")" ]
return the content of a bond script cli example: .
train
true
7,406
def deg2pix(degrees, monitor, correctFlat=False): scrWidthCm = monitor.getWidth() scrSizePix = monitor.getSizePix() if (scrSizePix is None): msg = 'Monitor %s has no known size in pixels (SEE MONITOR CENTER)' raise ValueError((msg % monitor.name)) if (scrWidthCm is None): msg = 'Monitor %s has no known width in cm (SEE MONITOR CENTER)' raise ValueError((msg % monitor.name)) cmSize = deg2cm(degrees, monitor, correctFlat) return ((cmSize * scrSizePix[0]) / float(scrWidthCm))
[ "def", "deg2pix", "(", "degrees", ",", "monitor", ",", "correctFlat", "=", "False", ")", ":", "scrWidthCm", "=", "monitor", ".", "getWidth", "(", ")", "scrSizePix", "=", "monitor", ".", "getSizePix", "(", ")", "if", "(", "scrSizePix", "is", "None", ")", ":", "msg", "=", "'Monitor %s has no known size in pixels (SEE MONITOR CENTER)'", "raise", "ValueError", "(", "(", "msg", "%", "monitor", ".", "name", ")", ")", "if", "(", "scrWidthCm", "is", "None", ")", ":", "msg", "=", "'Monitor %s has no known width in cm (SEE MONITOR CENTER)'", "raise", "ValueError", "(", "(", "msg", "%", "monitor", ".", "name", ")", ")", "cmSize", "=", "deg2cm", "(", "degrees", ",", "monitor", ",", "correctFlat", ")", "return", "(", "(", "cmSize", "*", "scrSizePix", "[", "0", "]", ")", "/", "float", "(", "scrWidthCm", ")", ")" ]
convert size in degrees to size in pixels for a given monitor object .
train
false
7,407
def generate_conflicting_plot_options_in_signin(): def gen_test(plot_options): def test(self): py.sign_in('username', 'key', **plot_options) self.assertRaises(PlotlyError, py._plot_option_logic, {}) return test for (i, plot_options) in enumerate(TestPlotOptionLogic.conflicting_option_set): setattr(TestPlotOptionLogic, 'test_conflicting_plot_options_in_signin_{}'.format(i), gen_test(plot_options))
[ "def", "generate_conflicting_plot_options_in_signin", "(", ")", ":", "def", "gen_test", "(", "plot_options", ")", ":", "def", "test", "(", "self", ")", ":", "py", ".", "sign_in", "(", "'username'", ",", "'key'", ",", "**", "plot_options", ")", "self", ".", "assertRaises", "(", "PlotlyError", ",", "py", ".", "_plot_option_logic", ",", "{", "}", ")", "return", "test", "for", "(", "i", ",", "plot_options", ")", "in", "enumerate", "(", "TestPlotOptionLogic", ".", "conflicting_option_set", ")", ":", "setattr", "(", "TestPlotOptionLogic", ",", "'test_conflicting_plot_options_in_signin_{}'", ".", "format", "(", "i", ")", ",", "gen_test", "(", "plot_options", ")", ")" ]
sign_in overrides the default plot options .
train
false